2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache *delayed_node_cache;
32 int __init btrfs_delayed_inode_init(void)
34 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node),
37 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
39 if (!delayed_node_cache)
44 void btrfs_delayed_inode_exit(void)
46 if (delayed_node_cache)
47 kmem_cache_destroy(delayed_node_cache);
50 static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node *delayed_node,
52 struct btrfs_root *root, u64 inode_id)
54 delayed_node->root = root;
55 delayed_node->inode_id = inode_id;
56 atomic_set(&delayed_node->refs, 0);
57 delayed_node->count = 0;
58 delayed_node->flags = 0;
59 delayed_node->ins_root = RB_ROOT;
60 delayed_node->del_root = RB_ROOT;
61 mutex_init(&delayed_node->mutex);
62 delayed_node->index_cnt = 0;
63 INIT_LIST_HEAD(&delayed_node->n_list);
64 INIT_LIST_HEAD(&delayed_node->p_list);
65 delayed_node->bytes_reserved = 0;
66 memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
69 static inline int btrfs_is_continuous_delayed_item(
70 struct btrfs_delayed_item *item1,
71 struct btrfs_delayed_item *item2)
73 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
74 item1->key.objectid == item2->key.objectid &&
75 item1->key.type == item2->key.type &&
76 item1->key.offset + 1 == item2->key.offset)
81 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
82 struct btrfs_root *root)
84 return root->fs_info->delayed_root;
87 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
89 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
90 struct btrfs_root *root = btrfs_inode->root;
91 u64 ino = btrfs_ino(inode);
92 struct btrfs_delayed_node *node;
94 node = ACCESS_ONCE(btrfs_inode->delayed_node);
96 atomic_inc(&node->refs);
100 spin_lock(&root->inode_lock);
101 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103 if (btrfs_inode->delayed_node) {
104 atomic_inc(&node->refs); /* can be accessed */
105 BUG_ON(btrfs_inode->delayed_node != node);
106 spin_unlock(&root->inode_lock);
109 btrfs_inode->delayed_node = node;
110 /* can be accessed and cached in the inode */
111 atomic_add(2, &node->refs);
112 spin_unlock(&root->inode_lock);
115 spin_unlock(&root->inode_lock);
120 /* Will return either the node or PTR_ERR(-ENOMEM) */
121 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_delayed_node *node;
125 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
126 struct btrfs_root *root = btrfs_inode->root;
127 u64 ino = btrfs_ino(inode);
131 node = btrfs_get_delayed_node(inode);
135 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
137 return ERR_PTR(-ENOMEM);
138 btrfs_init_delayed_node(node, root, ino);
140 /* cached in the btrfs inode and can be accessed */
141 atomic_add(2, &node->refs);
143 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
145 kmem_cache_free(delayed_node_cache, node);
149 spin_lock(&root->inode_lock);
150 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
151 if (ret == -EEXIST) {
152 kmem_cache_free(delayed_node_cache, node);
153 spin_unlock(&root->inode_lock);
154 radix_tree_preload_end();
157 btrfs_inode->delayed_node = node;
158 spin_unlock(&root->inode_lock);
159 radix_tree_preload_end();
165 * Call it when holding delayed_node->mutex
167 * If mod = 1, add this node into the prepared list.
169 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
170 struct btrfs_delayed_node *node,
173 spin_lock(&root->lock);
174 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
175 if (!list_empty(&node->p_list))
176 list_move_tail(&node->p_list, &root->prepare_list);
178 list_add_tail(&node->p_list, &root->prepare_list);
180 list_add_tail(&node->n_list, &root->node_list);
181 list_add_tail(&node->p_list, &root->prepare_list);
182 atomic_inc(&node->refs); /* inserted into list */
184 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186 spin_unlock(&root->lock);
189 /* Call it when holding delayed_node->mutex */
190 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
191 struct btrfs_delayed_node *node)
193 spin_lock(&root->lock);
194 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196 atomic_dec(&node->refs); /* not in the list */
197 list_del_init(&node->n_list);
198 if (!list_empty(&node->p_list))
199 list_del_init(&node->p_list);
200 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202 spin_unlock(&root->lock);
205 static struct btrfs_delayed_node *btrfs_first_delayed_node(
206 struct btrfs_delayed_root *delayed_root)
209 struct btrfs_delayed_node *node = NULL;
211 spin_lock(&delayed_root->lock);
212 if (list_empty(&delayed_root->node_list))
215 p = delayed_root->node_list.next;
216 node = list_entry(p, struct btrfs_delayed_node, n_list);
217 atomic_inc(&node->refs);
219 spin_unlock(&delayed_root->lock);
224 static struct btrfs_delayed_node *btrfs_next_delayed_node(
225 struct btrfs_delayed_node *node)
227 struct btrfs_delayed_root *delayed_root;
229 struct btrfs_delayed_node *next = NULL;
231 delayed_root = node->root->fs_info->delayed_root;
232 spin_lock(&delayed_root->lock);
233 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
234 /* not in the list */
235 if (list_empty(&delayed_root->node_list))
237 p = delayed_root->node_list.next;
238 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
241 p = node->n_list.next;
243 next = list_entry(p, struct btrfs_delayed_node, n_list);
244 atomic_inc(&next->refs);
246 spin_unlock(&delayed_root->lock);
251 static void __btrfs_release_delayed_node(
252 struct btrfs_delayed_node *delayed_node,
255 struct btrfs_delayed_root *delayed_root;
260 delayed_root = delayed_node->root->fs_info->delayed_root;
262 mutex_lock(&delayed_node->mutex);
263 if (delayed_node->count)
264 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
267 mutex_unlock(&delayed_node->mutex);
269 if (atomic_dec_and_test(&delayed_node->refs)) {
270 struct btrfs_root *root = delayed_node->root;
271 spin_lock(&root->inode_lock);
272 if (atomic_read(&delayed_node->refs) == 0) {
273 radix_tree_delete(&root->delayed_nodes_tree,
274 delayed_node->inode_id);
275 kmem_cache_free(delayed_node_cache, delayed_node);
277 spin_unlock(&root->inode_lock);
281 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
283 __btrfs_release_delayed_node(node, 0);
286 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
287 struct btrfs_delayed_root *delayed_root)
290 struct btrfs_delayed_node *node = NULL;
292 spin_lock(&delayed_root->lock);
293 if (list_empty(&delayed_root->prepare_list))
296 p = delayed_root->prepare_list.next;
298 node = list_entry(p, struct btrfs_delayed_node, p_list);
299 atomic_inc(&node->refs);
301 spin_unlock(&delayed_root->lock);
306 static inline void btrfs_release_prepared_delayed_node(
307 struct btrfs_delayed_node *node)
309 __btrfs_release_delayed_node(node, 1);
312 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
314 struct btrfs_delayed_item *item;
315 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
317 item->data_len = data_len;
318 item->ins_or_del = 0;
319 item->bytes_reserved = 0;
320 item->delayed_node = NULL;
321 atomic_set(&item->refs, 1);
327 * __btrfs_lookup_delayed_item - look up the delayed item by key
328 * @delayed_node: pointer to the delayed node
329 * @key: the key to look up
330 * @prev: used to store the prev item if the right item isn't found
331 * @next: used to store the next item if the right item isn't found
333 * Note: if we don't find the right item, we will return the prev item and
336 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
337 struct rb_root *root,
338 struct btrfs_key *key,
339 struct btrfs_delayed_item **prev,
340 struct btrfs_delayed_item **next)
342 struct rb_node *node, *prev_node = NULL;
343 struct btrfs_delayed_item *delayed_item = NULL;
346 node = root->rb_node;
349 delayed_item = rb_entry(node, struct btrfs_delayed_item,
352 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
354 node = node->rb_right;
356 node = node->rb_left;
365 *prev = delayed_item;
366 else if ((node = rb_prev(prev_node)) != NULL) {
367 *prev = rb_entry(node, struct btrfs_delayed_item,
377 *next = delayed_item;
378 else if ((node = rb_next(prev_node)) != NULL) {
379 *next = rb_entry(node, struct btrfs_delayed_item,
387 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
388 struct btrfs_delayed_node *delayed_node,
389 struct btrfs_key *key)
391 struct btrfs_delayed_item *item;
393 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
399 struct btrfs_delayed_item *ins,
402 struct rb_node **p, *node;
403 struct rb_node *parent_node = NULL;
404 struct rb_root *root;
405 struct btrfs_delayed_item *item;
408 if (action == BTRFS_DELAYED_INSERTION_ITEM)
409 root = &delayed_node->ins_root;
410 else if (action == BTRFS_DELAYED_DELETION_ITEM)
411 root = &delayed_node->del_root;
415 node = &ins->rb_node;
419 item = rb_entry(parent_node, struct btrfs_delayed_item,
422 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
431 rb_link_node(node, parent_node, p);
432 rb_insert_color(node, root);
433 ins->delayed_node = delayed_node;
434 ins->ins_or_del = action;
436 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
437 action == BTRFS_DELAYED_INSERTION_ITEM &&
438 ins->key.offset >= delayed_node->index_cnt)
439 delayed_node->index_cnt = ins->key.offset + 1;
441 delayed_node->count++;
442 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
447 struct btrfs_delayed_item *item)
449 return __btrfs_add_delayed_item(node, item,
450 BTRFS_DELAYED_INSERTION_ITEM);
453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
454 struct btrfs_delayed_item *item)
456 return __btrfs_add_delayed_item(node, item,
457 BTRFS_DELAYED_DELETION_ITEM);
460 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
462 int seq = atomic_inc_return(&delayed_root->items_seq);
463 if ((atomic_dec_return(&delayed_root->items) <
464 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
465 waitqueue_active(&delayed_root->wait))
466 wake_up(&delayed_root->wait);
469 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
471 struct rb_root *root;
472 struct btrfs_delayed_root *delayed_root;
474 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
476 BUG_ON(!delayed_root);
477 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
478 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
480 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
481 root = &delayed_item->delayed_node->ins_root;
483 root = &delayed_item->delayed_node->del_root;
485 rb_erase(&delayed_item->rb_node, root);
486 delayed_item->delayed_node->count--;
488 finish_one_item(delayed_root);
491 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
494 __btrfs_remove_delayed_item(item);
495 if (atomic_dec_and_test(&item->refs))
500 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
501 struct btrfs_delayed_node *delayed_node)
504 struct btrfs_delayed_item *item = NULL;
506 p = rb_first(&delayed_node->ins_root);
508 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
513 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
514 struct btrfs_delayed_node *delayed_node)
517 struct btrfs_delayed_item *item = NULL;
519 p = rb_first(&delayed_node->del_root);
521 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
526 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
527 struct btrfs_delayed_item *item)
530 struct btrfs_delayed_item *next = NULL;
532 p = rb_next(&item->rb_node);
534 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
539 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
540 struct btrfs_root *root,
541 struct btrfs_delayed_item *item)
543 struct btrfs_block_rsv *src_rsv;
544 struct btrfs_block_rsv *dst_rsv;
548 if (!trans->bytes_reserved)
551 src_rsv = trans->block_rsv;
552 dst_rsv = &root->fs_info->delayed_block_rsv;
554 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
555 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
557 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
560 item->bytes_reserved = num_bytes;
566 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
567 struct btrfs_delayed_item *item)
569 struct btrfs_block_rsv *rsv;
571 if (!item->bytes_reserved)
574 rsv = &root->fs_info->delayed_block_rsv;
575 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
576 item->key.objectid, item->bytes_reserved,
578 btrfs_block_rsv_release(root, rsv,
579 item->bytes_reserved);
582 static int btrfs_delayed_inode_reserve_metadata(
583 struct btrfs_trans_handle *trans,
584 struct btrfs_root *root,
586 struct btrfs_delayed_node *node)
588 struct btrfs_block_rsv *src_rsv;
589 struct btrfs_block_rsv *dst_rsv;
592 bool release = false;
594 src_rsv = trans->block_rsv;
595 dst_rsv = &root->fs_info->delayed_block_rsv;
597 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
600 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
601 * which doesn't reserve space for speed. This is a problem since we
602 * still need to reserve space for this update, so try to reserve the
605 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
606 * we're accounted for.
608 if (!src_rsv || (!trans->bytes_reserved &&
609 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
610 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
611 BTRFS_RESERVE_NO_FLUSH);
613 * Since we're under a transaction reserve_metadata_bytes could
614 * try to commit the transaction which will make it return
615 * EAGAIN to make us stop the transaction we have, so return
616 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
621 node->bytes_reserved = num_bytes;
622 trace_btrfs_space_reservation(root->fs_info,
628 } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
629 spin_lock(&BTRFS_I(inode)->lock);
630 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
631 &BTRFS_I(inode)->runtime_flags)) {
632 spin_unlock(&BTRFS_I(inode)->lock);
636 spin_unlock(&BTRFS_I(inode)->lock);
638 /* Ok we didn't have space pre-reserved. This shouldn't happen
639 * too often but it can happen if we do delalloc to an existing
640 * inode which gets dirtied because of the time update, and then
641 * isn't touched again until after the transaction commits and
642 * then we try to write out the data. First try to be nice and
643 * reserve something strictly for us. If not be a pain and try
644 * to steal from the delalloc block rsv.
646 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
647 BTRFS_RESERVE_NO_FLUSH);
651 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
656 * Ok this is a problem, let's just steal from the global rsv
657 * since this really shouldn't happen that often.
659 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
665 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
669 * Migrate only takes a reservation, it doesn't touch the size of the
670 * block_rsv. This is to simplify people who don't normally have things
671 * migrated from their block rsv. If they go to release their
672 * reservation, that will decrease the size as well, so if migrate
673 * reduced size we'd end up with a negative size. But for the
674 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
675 * but we could in fact do this reserve/migrate dance several times
676 * between the time we did the original reservation and we'd clean it
677 * up. So to take care of this, release the space for the meta
678 * reservation here. I think it may be time for a documentation page on
679 * how block rsvs. work.
682 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
683 btrfs_ino(inode), num_bytes, 1);
684 node->bytes_reserved = num_bytes;
688 trace_btrfs_space_reservation(root->fs_info, "delalloc",
689 btrfs_ino(inode), num_bytes, 0);
690 btrfs_block_rsv_release(root, src_rsv, num_bytes);
696 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
697 struct btrfs_delayed_node *node)
699 struct btrfs_block_rsv *rsv;
701 if (!node->bytes_reserved)
704 rsv = &root->fs_info->delayed_block_rsv;
705 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
706 node->inode_id, node->bytes_reserved, 0);
707 btrfs_block_rsv_release(root, rsv,
708 node->bytes_reserved);
709 node->bytes_reserved = 0;
713 * This helper will insert some continuous items into the same leaf according
714 * to the free space of the leaf.
716 static int btrfs_batch_insert_items(struct btrfs_root *root,
717 struct btrfs_path *path,
718 struct btrfs_delayed_item *item)
720 struct btrfs_delayed_item *curr, *next;
722 int total_data_size = 0, total_size = 0;
723 struct extent_buffer *leaf;
725 struct btrfs_key *keys;
727 struct list_head head;
733 BUG_ON(!path->nodes[0]);
735 leaf = path->nodes[0];
736 free_space = btrfs_leaf_free_space(root, leaf);
737 INIT_LIST_HEAD(&head);
743 * count the number of the continuous items that we can insert in batch
745 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
747 total_data_size += next->data_len;
748 total_size += next->data_len + sizeof(struct btrfs_item);
749 list_add_tail(&next->tree_list, &head);
753 next = __btrfs_next_delayed_item(curr);
757 if (!btrfs_is_continuous_delayed_item(curr, next))
767 * we need allocate some memory space, but it might cause the task
768 * to sleep, so we set all locked nodes in the path to blocking locks
771 btrfs_set_path_blocking(path);
773 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
779 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
785 /* get keys of all the delayed items */
787 list_for_each_entry(next, &head, tree_list) {
789 data_size[i] = next->data_len;
793 /* reset all the locked nodes in the patch to spinning locks. */
794 btrfs_clear_path_blocking(path, NULL, 0);
796 /* insert the keys of the items */
797 setup_items_for_insert(root, path, keys, data_size,
798 total_data_size, total_size, nitems);
800 /* insert the dir index items */
801 slot = path->slots[0];
802 list_for_each_entry_safe(curr, next, &head, tree_list) {
803 data_ptr = btrfs_item_ptr(leaf, slot, char);
804 write_extent_buffer(leaf, &curr->data,
805 (unsigned long)data_ptr,
809 btrfs_delayed_item_release_metadata(root, curr);
811 list_del(&curr->tree_list);
812 btrfs_release_delayed_item(curr);
823 * This helper can just do simple insertion that needn't extend item for new
824 * data, such as directory name index insertion, inode insertion.
826 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
827 struct btrfs_root *root,
828 struct btrfs_path *path,
829 struct btrfs_delayed_item *delayed_item)
831 struct extent_buffer *leaf;
835 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
836 delayed_item->data_len);
837 if (ret < 0 && ret != -EEXIST)
840 leaf = path->nodes[0];
842 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
844 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
845 delayed_item->data_len);
846 btrfs_mark_buffer_dirty(leaf);
848 btrfs_delayed_item_release_metadata(root, delayed_item);
853 * we insert an item first, then if there are some continuous items, we try
854 * to insert those items into the same leaf.
856 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
857 struct btrfs_path *path,
858 struct btrfs_root *root,
859 struct btrfs_delayed_node *node)
861 struct btrfs_delayed_item *curr, *prev;
865 mutex_lock(&node->mutex);
866 curr = __btrfs_first_delayed_insertion_item(node);
870 ret = btrfs_insert_delayed_item(trans, root, path, curr);
872 btrfs_release_path(path);
877 curr = __btrfs_next_delayed_item(prev);
878 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
879 /* insert the continuous items into the same leaf */
881 btrfs_batch_insert_items(root, path, curr);
883 btrfs_release_delayed_item(prev);
884 btrfs_mark_buffer_dirty(path->nodes[0]);
886 btrfs_release_path(path);
887 mutex_unlock(&node->mutex);
891 mutex_unlock(&node->mutex);
895 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root,
897 struct btrfs_path *path,
898 struct btrfs_delayed_item *item)
900 struct btrfs_delayed_item *curr, *next;
901 struct extent_buffer *leaf;
902 struct btrfs_key key;
903 struct list_head head;
904 int nitems, i, last_item;
907 BUG_ON(!path->nodes[0]);
909 leaf = path->nodes[0];
912 last_item = btrfs_header_nritems(leaf) - 1;
914 return -ENOENT; /* FIXME: Is errno suitable? */
917 INIT_LIST_HEAD(&head);
918 btrfs_item_key_to_cpu(leaf, &key, i);
921 * count the number of the dir index items that we can delete in batch
923 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
924 list_add_tail(&next->tree_list, &head);
928 next = __btrfs_next_delayed_item(curr);
932 if (!btrfs_is_continuous_delayed_item(curr, next))
938 btrfs_item_key_to_cpu(leaf, &key, i);
944 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
948 list_for_each_entry_safe(curr, next, &head, tree_list) {
949 btrfs_delayed_item_release_metadata(root, curr);
950 list_del(&curr->tree_list);
951 btrfs_release_delayed_item(curr);
958 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
959 struct btrfs_path *path,
960 struct btrfs_root *root,
961 struct btrfs_delayed_node *node)
963 struct btrfs_delayed_item *curr, *prev;
967 mutex_lock(&node->mutex);
968 curr = __btrfs_first_delayed_deletion_item(node);
972 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
977 * can't find the item which the node points to, so this node
978 * is invalid, just drop it.
981 curr = __btrfs_next_delayed_item(prev);
982 btrfs_release_delayed_item(prev);
984 btrfs_release_path(path);
986 mutex_unlock(&node->mutex);
992 btrfs_batch_delete_items(trans, root, path, curr);
993 btrfs_release_path(path);
994 mutex_unlock(&node->mutex);
998 btrfs_release_path(path);
999 mutex_unlock(&node->mutex);
1003 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1005 struct btrfs_delayed_root *delayed_root;
1008 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1009 BUG_ON(!delayed_node->root);
1010 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1011 delayed_node->count--;
1013 delayed_root = delayed_node->root->fs_info->delayed_root;
1014 finish_one_item(delayed_root);
1018 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1020 struct btrfs_delayed_root *delayed_root;
1022 ASSERT(delayed_node->root);
1023 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1024 delayed_node->count--;
1026 delayed_root = delayed_node->root->fs_info->delayed_root;
1027 finish_one_item(delayed_root);
1030 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1031 struct btrfs_root *root,
1032 struct btrfs_path *path,
1033 struct btrfs_delayed_node *node)
1035 struct btrfs_key key;
1036 struct btrfs_inode_item *inode_item;
1037 struct extent_buffer *leaf;
1041 key.objectid = node->inode_id;
1042 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1045 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1050 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1052 btrfs_release_path(path);
1054 } else if (ret < 0) {
1058 leaf = path->nodes[0];
1059 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1060 struct btrfs_inode_item);
1061 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1062 sizeof(struct btrfs_inode_item));
1063 btrfs_mark_buffer_dirty(leaf);
1065 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1069 if (path->slots[0] >= btrfs_header_nritems(leaf))
1072 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1073 if (key.objectid != node->inode_id)
1076 if (key.type != BTRFS_INODE_REF_KEY &&
1077 key.type != BTRFS_INODE_EXTREF_KEY)
1081 * Delayed iref deletion is for the inode who has only one link,
1082 * so there is only one iref. The case that several irefs are
1083 * in the same item doesn't exist.
1085 btrfs_del_item(trans, root, path);
1087 btrfs_release_delayed_iref(node);
1089 btrfs_release_path(path);
1091 btrfs_delayed_inode_release_metadata(root, node);
1092 btrfs_release_delayed_inode(node);
1097 btrfs_release_path(path);
1099 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1101 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1107 leaf = path->nodes[0];
1112 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1113 struct btrfs_root *root,
1114 struct btrfs_path *path,
1115 struct btrfs_delayed_node *node)
1119 mutex_lock(&node->mutex);
1120 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1121 mutex_unlock(&node->mutex);
1125 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1126 mutex_unlock(&node->mutex);
1131 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1132 struct btrfs_path *path,
1133 struct btrfs_delayed_node *node)
1137 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1141 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1145 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1150 * Called when committing the transaction.
1151 * Returns 0 on success.
1152 * Returns < 0 on error and returns with an aborted transaction with any
1153 * outstanding delayed items cleaned up.
1155 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1156 struct btrfs_root *root, int nr)
1158 struct btrfs_delayed_root *delayed_root;
1159 struct btrfs_delayed_node *curr_node, *prev_node;
1160 struct btrfs_path *path;
1161 struct btrfs_block_rsv *block_rsv;
1163 bool count = (nr > 0);
1168 path = btrfs_alloc_path();
1171 path->leave_spinning = 1;
1173 block_rsv = trans->block_rsv;
1174 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1176 delayed_root = btrfs_get_delayed_root(root);
1178 curr_node = btrfs_first_delayed_node(delayed_root);
1179 while (curr_node && (!count || (count && nr--))) {
1180 ret = __btrfs_commit_inode_delayed_items(trans, path,
1183 btrfs_release_delayed_node(curr_node);
1185 btrfs_abort_transaction(trans, root, ret);
1189 prev_node = curr_node;
1190 curr_node = btrfs_next_delayed_node(curr_node);
1191 btrfs_release_delayed_node(prev_node);
1195 btrfs_release_delayed_node(curr_node);
1196 btrfs_free_path(path);
1197 trans->block_rsv = block_rsv;
1202 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1203 struct btrfs_root *root)
1205 return __btrfs_run_delayed_items(trans, root, -1);
1208 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1209 struct btrfs_root *root, int nr)
1211 return __btrfs_run_delayed_items(trans, root, nr);
1214 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1215 struct inode *inode)
1217 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1218 struct btrfs_path *path;
1219 struct btrfs_block_rsv *block_rsv;
1225 mutex_lock(&delayed_node->mutex);
1226 if (!delayed_node->count) {
1227 mutex_unlock(&delayed_node->mutex);
1228 btrfs_release_delayed_node(delayed_node);
1231 mutex_unlock(&delayed_node->mutex);
1233 path = btrfs_alloc_path();
1235 btrfs_release_delayed_node(delayed_node);
1238 path->leave_spinning = 1;
1240 block_rsv = trans->block_rsv;
1241 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1243 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1245 btrfs_release_delayed_node(delayed_node);
1246 btrfs_free_path(path);
1247 trans->block_rsv = block_rsv;
1252 int btrfs_commit_inode_delayed_inode(struct inode *inode)
1254 struct btrfs_trans_handle *trans;
1255 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1256 struct btrfs_path *path;
1257 struct btrfs_block_rsv *block_rsv;
1263 mutex_lock(&delayed_node->mutex);
1264 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1265 mutex_unlock(&delayed_node->mutex);
1266 btrfs_release_delayed_node(delayed_node);
1269 mutex_unlock(&delayed_node->mutex);
1271 trans = btrfs_join_transaction(delayed_node->root);
1272 if (IS_ERR(trans)) {
1273 ret = PTR_ERR(trans);
1277 path = btrfs_alloc_path();
1282 path->leave_spinning = 1;
1284 block_rsv = trans->block_rsv;
1285 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1287 mutex_lock(&delayed_node->mutex);
1288 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1289 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1290 path, delayed_node);
1293 mutex_unlock(&delayed_node->mutex);
1295 btrfs_free_path(path);
1296 trans->block_rsv = block_rsv;
1298 btrfs_end_transaction(trans, delayed_node->root);
1299 btrfs_btree_balance_dirty(delayed_node->root);
1301 btrfs_release_delayed_node(delayed_node);
1306 void btrfs_remove_delayed_node(struct inode *inode)
1308 struct btrfs_delayed_node *delayed_node;
1310 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1314 BTRFS_I(inode)->delayed_node = NULL;
1315 btrfs_release_delayed_node(delayed_node);
1318 struct btrfs_async_delayed_work {
1319 struct btrfs_delayed_root *delayed_root;
1321 struct btrfs_work work;
1324 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1326 struct btrfs_async_delayed_work *async_work;
1327 struct btrfs_delayed_root *delayed_root;
1328 struct btrfs_trans_handle *trans;
1329 struct btrfs_path *path;
1330 struct btrfs_delayed_node *delayed_node = NULL;
1331 struct btrfs_root *root;
1332 struct btrfs_block_rsv *block_rsv;
1335 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1336 delayed_root = async_work->delayed_root;
1338 path = btrfs_alloc_path();
1343 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1346 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1350 path->leave_spinning = 1;
1351 root = delayed_node->root;
1353 trans = btrfs_join_transaction(root);
1357 block_rsv = trans->block_rsv;
1358 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1360 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1362 trans->block_rsv = block_rsv;
1363 btrfs_end_transaction(trans, root);
1364 btrfs_btree_balance_dirty_nodelay(root);
1367 btrfs_release_path(path);
1370 btrfs_release_prepared_delayed_node(delayed_node);
1371 if (async_work->nr == 0 || total_done < async_work->nr)
1375 btrfs_free_path(path);
1377 wake_up(&delayed_root->wait);
1382 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1383 struct btrfs_root *root, int nr)
1385 struct btrfs_async_delayed_work *async_work;
1387 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1390 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1394 async_work->delayed_root = delayed_root;
1395 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
1397 async_work->nr = nr;
1399 btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
1403 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1405 struct btrfs_delayed_root *delayed_root;
1406 delayed_root = btrfs_get_delayed_root(root);
1407 WARN_ON(btrfs_first_delayed_node(delayed_root));
1410 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1412 int val = atomic_read(&delayed_root->items_seq);
1414 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1417 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1423 void btrfs_balance_delayed_items(struct btrfs_root *root)
1425 struct btrfs_delayed_root *delayed_root;
1427 delayed_root = btrfs_get_delayed_root(root);
1429 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1432 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1436 seq = atomic_read(&delayed_root->items_seq);
1438 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
1442 wait_event_interruptible(delayed_root->wait,
1443 could_end_wait(delayed_root, seq));
1447 btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
1450 /* Will return 0 or -ENOMEM */
1451 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1452 struct btrfs_root *root, const char *name,
1453 int name_len, struct inode *dir,
1454 struct btrfs_disk_key *disk_key, u8 type,
1457 struct btrfs_delayed_node *delayed_node;
1458 struct btrfs_delayed_item *delayed_item;
1459 struct btrfs_dir_item *dir_item;
1462 delayed_node = btrfs_get_or_create_delayed_node(dir);
1463 if (IS_ERR(delayed_node))
1464 return PTR_ERR(delayed_node);
1466 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1467 if (!delayed_item) {
1472 delayed_item->key.objectid = btrfs_ino(dir);
1473 btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1474 delayed_item->key.offset = index;
1476 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1477 dir_item->location = *disk_key;
1478 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1479 btrfs_set_stack_dir_data_len(dir_item, 0);
1480 btrfs_set_stack_dir_name_len(dir_item, name_len);
1481 btrfs_set_stack_dir_type(dir_item, type);
1482 memcpy((char *)(dir_item + 1), name, name_len);
1484 ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1486 * we have reserved enough space when we start a new transaction,
1487 * so reserving metadata failure is impossible
1492 mutex_lock(&delayed_node->mutex);
1493 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1494 if (unlikely(ret)) {
1495 btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1496 "into the insertion tree of the delayed node"
1497 "(root id: %llu, inode id: %llu, errno: %d)",
1498 name_len, name, delayed_node->root->objectid,
1499 delayed_node->inode_id, ret);
1502 mutex_unlock(&delayed_node->mutex);
1505 btrfs_release_delayed_node(delayed_node);
1509 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1510 struct btrfs_delayed_node *node,
1511 struct btrfs_key *key)
1513 struct btrfs_delayed_item *item;
1515 mutex_lock(&node->mutex);
1516 item = __btrfs_lookup_delayed_insertion_item(node, key);
1518 mutex_unlock(&node->mutex);
1522 btrfs_delayed_item_release_metadata(root, item);
1523 btrfs_release_delayed_item(item);
1524 mutex_unlock(&node->mutex);
1528 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1529 struct btrfs_root *root, struct inode *dir,
1532 struct btrfs_delayed_node *node;
1533 struct btrfs_delayed_item *item;
1534 struct btrfs_key item_key;
1537 node = btrfs_get_or_create_delayed_node(dir);
1539 return PTR_ERR(node);
1541 item_key.objectid = btrfs_ino(dir);
1542 btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1543 item_key.offset = index;
1545 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1549 item = btrfs_alloc_delayed_item(0);
1555 item->key = item_key;
1557 ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1559 * we have reserved enough space when we start a new transaction,
1560 * so reserving metadata failure is impossible.
1564 mutex_lock(&node->mutex);
1565 ret = __btrfs_add_delayed_deletion_item(node, item);
1566 if (unlikely(ret)) {
1567 btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1568 "into the deletion tree of the delayed node"
1569 "(root id: %llu, inode id: %llu, errno: %d)",
1570 index, node->root->objectid, node->inode_id,
1574 mutex_unlock(&node->mutex);
1576 btrfs_release_delayed_node(node);
1580 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1582 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1588 * Since we have held i_mutex of this directory, it is impossible that
1589 * a new directory index is added into the delayed node and index_cnt
1590 * is updated now. So we needn't lock the delayed node.
1592 if (!delayed_node->index_cnt) {
1593 btrfs_release_delayed_node(delayed_node);
1597 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1598 btrfs_release_delayed_node(delayed_node);
1602 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1603 struct list_head *del_list)
1605 struct btrfs_delayed_node *delayed_node;
1606 struct btrfs_delayed_item *item;
1608 delayed_node = btrfs_get_delayed_node(inode);
1612 mutex_lock(&delayed_node->mutex);
1613 item = __btrfs_first_delayed_insertion_item(delayed_node);
1615 atomic_inc(&item->refs);
1616 list_add_tail(&item->readdir_list, ins_list);
1617 item = __btrfs_next_delayed_item(item);
1620 item = __btrfs_first_delayed_deletion_item(delayed_node);
1622 atomic_inc(&item->refs);
1623 list_add_tail(&item->readdir_list, del_list);
1624 item = __btrfs_next_delayed_item(item);
1626 mutex_unlock(&delayed_node->mutex);
1628 * This delayed node is still cached in the btrfs inode, so refs
1629 * must be > 1 now, and we needn't check it is going to be freed
1632 * Besides that, this function is used to read dir, we do not
1633 * insert/delete delayed items in this period. So we also needn't
1634 * requeue or dequeue this delayed node.
1636 atomic_dec(&delayed_node->refs);
1639 void btrfs_put_delayed_items(struct list_head *ins_list,
1640 struct list_head *del_list)
1642 struct btrfs_delayed_item *curr, *next;
1644 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1645 list_del(&curr->readdir_list);
1646 if (atomic_dec_and_test(&curr->refs))
1650 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1651 list_del(&curr->readdir_list);
1652 if (atomic_dec_and_test(&curr->refs))
1657 int btrfs_should_delete_dir_index(struct list_head *del_list,
1660 struct btrfs_delayed_item *curr, *next;
1663 if (list_empty(del_list))
1666 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1667 if (curr->key.offset > index)
1670 list_del(&curr->readdir_list);
1671 ret = (curr->key.offset == index);
1673 if (atomic_dec_and_test(&curr->refs))
1685 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1688 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1689 struct list_head *ins_list)
1691 struct btrfs_dir_item *di;
1692 struct btrfs_delayed_item *curr, *next;
1693 struct btrfs_key location;
1697 unsigned char d_type;
1699 if (list_empty(ins_list))
1703 * Changing the data of the delayed item is impossible. So
1704 * we needn't lock them. And we have held i_mutex of the
1705 * directory, nobody can delete any directory indexes now.
1707 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1708 list_del(&curr->readdir_list);
1710 if (curr->key.offset < ctx->pos) {
1711 if (atomic_dec_and_test(&curr->refs))
1716 ctx->pos = curr->key.offset;
1718 di = (struct btrfs_dir_item *)curr->data;
1719 name = (char *)(di + 1);
1720 name_len = btrfs_stack_dir_name_len(di);
1722 d_type = btrfs_filetype_table[di->type];
1723 btrfs_disk_key_to_cpu(&location, &di->location);
1725 over = !dir_emit(ctx, name, name_len,
1726 location.objectid, d_type);
1728 if (atomic_dec_and_test(&curr->refs))
1737 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1738 struct btrfs_inode_item *inode_item,
1739 struct inode *inode)
1741 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1742 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1743 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1744 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1745 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1746 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1747 btrfs_set_stack_inode_generation(inode_item,
1748 BTRFS_I(inode)->generation);
1749 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1750 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1751 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1752 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1753 btrfs_set_stack_inode_block_group(inode_item, 0);
1755 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1756 inode->i_atime.tv_sec);
1757 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1758 inode->i_atime.tv_nsec);
1760 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1761 inode->i_mtime.tv_sec);
1762 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1763 inode->i_mtime.tv_nsec);
1765 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1766 inode->i_ctime.tv_sec);
1767 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1768 inode->i_ctime.tv_nsec);
1771 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1773 struct btrfs_delayed_node *delayed_node;
1774 struct btrfs_inode_item *inode_item;
1775 struct btrfs_timespec *tspec;
1777 delayed_node = btrfs_get_delayed_node(inode);
1781 mutex_lock(&delayed_node->mutex);
1782 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1783 mutex_unlock(&delayed_node->mutex);
1784 btrfs_release_delayed_node(delayed_node);
1788 inode_item = &delayed_node->inode_item;
1790 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1791 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1792 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1793 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1794 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1795 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1796 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1797 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1799 *rdev = btrfs_stack_inode_rdev(inode_item);
1800 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1802 tspec = btrfs_inode_atime(inode_item);
1803 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1804 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1806 tspec = btrfs_inode_mtime(inode_item);
1807 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1808 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1810 tspec = btrfs_inode_ctime(inode_item);
1811 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1812 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1814 inode->i_generation = BTRFS_I(inode)->generation;
1815 BTRFS_I(inode)->index_cnt = (u64)-1;
1817 mutex_unlock(&delayed_node->mutex);
1818 btrfs_release_delayed_node(delayed_node);
1822 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1823 struct btrfs_root *root, struct inode *inode)
1825 struct btrfs_delayed_node *delayed_node;
1828 delayed_node = btrfs_get_or_create_delayed_node(inode);
1829 if (IS_ERR(delayed_node))
1830 return PTR_ERR(delayed_node);
1832 mutex_lock(&delayed_node->mutex);
1833 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1834 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1838 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1843 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1844 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1845 delayed_node->count++;
1846 atomic_inc(&root->fs_info->delayed_root->items);
1848 mutex_unlock(&delayed_node->mutex);
1849 btrfs_release_delayed_node(delayed_node);
1853 int btrfs_delayed_delete_inode_ref(struct inode *inode)
1855 struct btrfs_delayed_node *delayed_node;
1857 delayed_node = btrfs_get_or_create_delayed_node(inode);
1858 if (IS_ERR(delayed_node))
1859 return PTR_ERR(delayed_node);
1862 * We don't reserve space for inode ref deletion is because:
1863 * - We ONLY do async inode ref deletion for the inode who has only
1864 * one link(i_nlink == 1), it means there is only one inode ref.
1865 * And in most case, the inode ref and the inode item are in the
1866 * same leaf, and we will deal with them at the same time.
1867 * Since we are sure we will reserve the space for the inode item,
1868 * it is unnecessary to reserve space for inode ref deletion.
1869 * - If the inode ref and the inode item are not in the same leaf,
1870 * We also needn't worry about enospc problem, because we reserve
1871 * much more space for the inode update than it needs.
1872 * - At the worst, we can steal some space from the global reservation.
1875 mutex_lock(&delayed_node->mutex);
1876 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1879 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1880 delayed_node->count++;
1881 atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1883 mutex_unlock(&delayed_node->mutex);
1884 btrfs_release_delayed_node(delayed_node);
1888 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1890 struct btrfs_root *root = delayed_node->root;
1891 struct btrfs_delayed_item *curr_item, *prev_item;
1893 mutex_lock(&delayed_node->mutex);
1894 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1896 btrfs_delayed_item_release_metadata(root, curr_item);
1897 prev_item = curr_item;
1898 curr_item = __btrfs_next_delayed_item(prev_item);
1899 btrfs_release_delayed_item(prev_item);
1902 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1904 btrfs_delayed_item_release_metadata(root, curr_item);
1905 prev_item = curr_item;
1906 curr_item = __btrfs_next_delayed_item(prev_item);
1907 btrfs_release_delayed_item(prev_item);
1910 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1911 btrfs_release_delayed_iref(delayed_node);
1913 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1914 btrfs_delayed_inode_release_metadata(root, delayed_node);
1915 btrfs_release_delayed_inode(delayed_node);
1917 mutex_unlock(&delayed_node->mutex);
1920 void btrfs_kill_delayed_inode_items(struct inode *inode)
1922 struct btrfs_delayed_node *delayed_node;
1924 delayed_node = btrfs_get_delayed_node(inode);
1928 __btrfs_kill_delayed_node(delayed_node);
1929 btrfs_release_delayed_node(delayed_node);
1932 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1935 struct btrfs_delayed_node *delayed_nodes[8];
1939 spin_lock(&root->inode_lock);
1940 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1941 (void **)delayed_nodes, inode_id,
1942 ARRAY_SIZE(delayed_nodes));
1944 spin_unlock(&root->inode_lock);
1948 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1950 for (i = 0; i < n; i++)
1951 atomic_inc(&delayed_nodes[i]->refs);
1952 spin_unlock(&root->inode_lock);
1954 for (i = 0; i < n; i++) {
1955 __btrfs_kill_delayed_node(delayed_nodes[i]);
1956 btrfs_release_delayed_node(delayed_nodes[i]);
1961 void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1963 struct btrfs_delayed_root *delayed_root;
1964 struct btrfs_delayed_node *curr_node, *prev_node;
1966 delayed_root = btrfs_get_delayed_root(root);
1968 curr_node = btrfs_first_delayed_node(delayed_root);
1970 __btrfs_kill_delayed_node(curr_node);
1972 prev_node = curr_node;
1973 curr_node = btrfs_next_delayed_node(curr_node);
1974 btrfs_release_delayed_node(prev_node);