2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache *delayed_node_cache;
32 int __init btrfs_delayed_inode_init(void)
34 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node),
39 if (!delayed_node_cache)
44 void btrfs_delayed_inode_exit(void)
46 kmem_cache_destroy(delayed_node_cache);
49 static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node *delayed_node,
51 struct btrfs_root *root, u64 inode_id)
53 delayed_node->root = root;
54 delayed_node->inode_id = inode_id;
55 atomic_set(&delayed_node->refs, 0);
56 delayed_node->ins_root = RB_ROOT;
57 delayed_node->del_root = RB_ROOT;
58 mutex_init(&delayed_node->mutex);
59 INIT_LIST_HEAD(&delayed_node->n_list);
60 INIT_LIST_HEAD(&delayed_node->p_list);
63 static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item *item1,
65 struct btrfs_delayed_item *item2)
67 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 item1->key.objectid == item2->key.objectid &&
69 item1->key.type == item2->key.type &&
70 item1->key.offset + 1 == item2->key.offset)
75 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
77 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
78 struct btrfs_root *root = btrfs_inode->root;
79 u64 ino = btrfs_ino(inode);
80 struct btrfs_delayed_node *node;
82 node = ACCESS_ONCE(btrfs_inode->delayed_node);
84 atomic_inc(&node->refs);
88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
91 if (btrfs_inode->delayed_node) {
92 atomic_inc(&node->refs); /* can be accessed */
93 BUG_ON(btrfs_inode->delayed_node != node);
94 spin_unlock(&root->inode_lock);
97 btrfs_inode->delayed_node = node;
98 /* can be accessed and cached in the inode */
99 atomic_add(2, &node->refs);
100 spin_unlock(&root->inode_lock);
103 spin_unlock(&root->inode_lock);
108 /* Will return either the node or PTR_ERR(-ENOMEM) */
109 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
112 struct btrfs_delayed_node *node;
113 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
114 struct btrfs_root *root = btrfs_inode->root;
115 u64 ino = btrfs_ino(inode);
119 node = btrfs_get_delayed_node(inode);
123 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
125 return ERR_PTR(-ENOMEM);
126 btrfs_init_delayed_node(node, root, ino);
128 /* cached in the btrfs inode and can be accessed */
129 atomic_add(2, &node->refs);
131 ret = radix_tree_preload(GFP_NOFS);
133 kmem_cache_free(delayed_node_cache, node);
137 spin_lock(&root->inode_lock);
138 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
139 if (ret == -EEXIST) {
140 spin_unlock(&root->inode_lock);
141 kmem_cache_free(delayed_node_cache, node);
142 radix_tree_preload_end();
145 btrfs_inode->delayed_node = node;
146 spin_unlock(&root->inode_lock);
147 radix_tree_preload_end();
153 * Call it when holding delayed_node->mutex
155 * If mod = 1, add this node into the prepared list.
157 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
158 struct btrfs_delayed_node *node,
161 spin_lock(&root->lock);
162 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
163 if (!list_empty(&node->p_list))
164 list_move_tail(&node->p_list, &root->prepare_list);
166 list_add_tail(&node->p_list, &root->prepare_list);
168 list_add_tail(&node->n_list, &root->node_list);
169 list_add_tail(&node->p_list, &root->prepare_list);
170 atomic_inc(&node->refs); /* inserted into list */
172 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
174 spin_unlock(&root->lock);
177 /* Call it when holding delayed_node->mutex */
178 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
179 struct btrfs_delayed_node *node)
181 spin_lock(&root->lock);
182 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
184 atomic_dec(&node->refs); /* not in the list */
185 list_del_init(&node->n_list);
186 if (!list_empty(&node->p_list))
187 list_del_init(&node->p_list);
188 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
190 spin_unlock(&root->lock);
193 static struct btrfs_delayed_node *btrfs_first_delayed_node(
194 struct btrfs_delayed_root *delayed_root)
197 struct btrfs_delayed_node *node = NULL;
199 spin_lock(&delayed_root->lock);
200 if (list_empty(&delayed_root->node_list))
203 p = delayed_root->node_list.next;
204 node = list_entry(p, struct btrfs_delayed_node, n_list);
205 atomic_inc(&node->refs);
207 spin_unlock(&delayed_root->lock);
212 static struct btrfs_delayed_node *btrfs_next_delayed_node(
213 struct btrfs_delayed_node *node)
215 struct btrfs_delayed_root *delayed_root;
217 struct btrfs_delayed_node *next = NULL;
219 delayed_root = node->root->fs_info->delayed_root;
220 spin_lock(&delayed_root->lock);
221 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
222 /* not in the list */
223 if (list_empty(&delayed_root->node_list))
225 p = delayed_root->node_list.next;
226 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
229 p = node->n_list.next;
231 next = list_entry(p, struct btrfs_delayed_node, n_list);
232 atomic_inc(&next->refs);
234 spin_unlock(&delayed_root->lock);
239 static void __btrfs_release_delayed_node(
240 struct btrfs_delayed_node *delayed_node,
243 struct btrfs_delayed_root *delayed_root;
248 delayed_root = delayed_node->root->fs_info->delayed_root;
250 mutex_lock(&delayed_node->mutex);
251 if (delayed_node->count)
252 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
254 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
255 mutex_unlock(&delayed_node->mutex);
257 if (atomic_dec_and_test(&delayed_node->refs)) {
259 struct btrfs_root *root = delayed_node->root;
260 spin_lock(&root->inode_lock);
261 if (atomic_read(&delayed_node->refs) == 0) {
262 radix_tree_delete(&root->delayed_nodes_tree,
263 delayed_node->inode_id);
266 spin_unlock(&root->inode_lock);
268 kmem_cache_free(delayed_node_cache, delayed_node);
272 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
274 __btrfs_release_delayed_node(node, 0);
277 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
278 struct btrfs_delayed_root *delayed_root)
281 struct btrfs_delayed_node *node = NULL;
283 spin_lock(&delayed_root->lock);
284 if (list_empty(&delayed_root->prepare_list))
287 p = delayed_root->prepare_list.next;
289 node = list_entry(p, struct btrfs_delayed_node, p_list);
290 atomic_inc(&node->refs);
292 spin_unlock(&delayed_root->lock);
297 static inline void btrfs_release_prepared_delayed_node(
298 struct btrfs_delayed_node *node)
300 __btrfs_release_delayed_node(node, 1);
303 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
305 struct btrfs_delayed_item *item;
306 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
308 item->data_len = data_len;
309 item->ins_or_del = 0;
310 item->bytes_reserved = 0;
311 item->delayed_node = NULL;
312 atomic_set(&item->refs, 1);
318 * __btrfs_lookup_delayed_item - look up the delayed item by key
319 * @delayed_node: pointer to the delayed node
320 * @key: the key to look up
321 * @prev: used to store the prev item if the right item isn't found
322 * @next: used to store the next item if the right item isn't found
324 * Note: if we don't find the right item, we will return the prev item and
327 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
328 struct rb_root *root,
329 struct btrfs_key *key,
330 struct btrfs_delayed_item **prev,
331 struct btrfs_delayed_item **next)
333 struct rb_node *node, *prev_node = NULL;
334 struct btrfs_delayed_item *delayed_item = NULL;
337 node = root->rb_node;
340 delayed_item = rb_entry(node, struct btrfs_delayed_item,
343 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
345 node = node->rb_right;
347 node = node->rb_left;
356 *prev = delayed_item;
357 else if ((node = rb_prev(prev_node)) != NULL) {
358 *prev = rb_entry(node, struct btrfs_delayed_item,
368 *next = delayed_item;
369 else if ((node = rb_next(prev_node)) != NULL) {
370 *next = rb_entry(node, struct btrfs_delayed_item,
378 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
379 struct btrfs_delayed_node *delayed_node,
380 struct btrfs_key *key)
382 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
386 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
387 struct btrfs_delayed_item *ins,
390 struct rb_node **p, *node;
391 struct rb_node *parent_node = NULL;
392 struct rb_root *root;
393 struct btrfs_delayed_item *item;
396 if (action == BTRFS_DELAYED_INSERTION_ITEM)
397 root = &delayed_node->ins_root;
398 else if (action == BTRFS_DELAYED_DELETION_ITEM)
399 root = &delayed_node->del_root;
403 node = &ins->rb_node;
407 item = rb_entry(parent_node, struct btrfs_delayed_item,
410 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
419 rb_link_node(node, parent_node, p);
420 rb_insert_color(node, root);
421 ins->delayed_node = delayed_node;
422 ins->ins_or_del = action;
424 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
425 action == BTRFS_DELAYED_INSERTION_ITEM &&
426 ins->key.offset >= delayed_node->index_cnt)
427 delayed_node->index_cnt = ins->key.offset + 1;
429 delayed_node->count++;
430 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
434 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
435 struct btrfs_delayed_item *item)
437 return __btrfs_add_delayed_item(node, item,
438 BTRFS_DELAYED_INSERTION_ITEM);
441 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
442 struct btrfs_delayed_item *item)
444 return __btrfs_add_delayed_item(node, item,
445 BTRFS_DELAYED_DELETION_ITEM);
448 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
450 int seq = atomic_inc_return(&delayed_root->items_seq);
453 * atomic_dec_return implies a barrier for waitqueue_active
455 if ((atomic_dec_return(&delayed_root->items) <
456 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
457 waitqueue_active(&delayed_root->wait))
458 wake_up(&delayed_root->wait);
461 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
463 struct rb_root *root;
464 struct btrfs_delayed_root *delayed_root;
466 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
468 BUG_ON(!delayed_root);
469 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
470 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
472 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
473 root = &delayed_item->delayed_node->ins_root;
475 root = &delayed_item->delayed_node->del_root;
477 rb_erase(&delayed_item->rb_node, root);
478 delayed_item->delayed_node->count--;
480 finish_one_item(delayed_root);
483 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
486 __btrfs_remove_delayed_item(item);
487 if (atomic_dec_and_test(&item->refs))
492 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
493 struct btrfs_delayed_node *delayed_node)
496 struct btrfs_delayed_item *item = NULL;
498 p = rb_first(&delayed_node->ins_root);
500 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
505 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
506 struct btrfs_delayed_node *delayed_node)
509 struct btrfs_delayed_item *item = NULL;
511 p = rb_first(&delayed_node->del_root);
513 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
519 struct btrfs_delayed_item *item)
522 struct btrfs_delayed_item *next = NULL;
524 p = rb_next(&item->rb_node);
526 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
531 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
532 struct btrfs_fs_info *fs_info,
533 struct btrfs_delayed_item *item)
535 struct btrfs_block_rsv *src_rsv;
536 struct btrfs_block_rsv *dst_rsv;
540 if (!trans->bytes_reserved)
543 src_rsv = trans->block_rsv;
544 dst_rsv = &fs_info->delayed_block_rsv;
546 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
547 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
549 trace_btrfs_space_reservation(fs_info, "delayed_item",
552 item->bytes_reserved = num_bytes;
558 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
559 struct btrfs_delayed_item *item)
561 struct btrfs_block_rsv *rsv;
563 if (!item->bytes_reserved)
566 rsv = &fs_info->delayed_block_rsv;
567 trace_btrfs_space_reservation(fs_info, "delayed_item",
568 item->key.objectid, item->bytes_reserved,
570 btrfs_block_rsv_release(fs_info, rsv,
571 item->bytes_reserved);
574 static int btrfs_delayed_inode_reserve_metadata(
575 struct btrfs_trans_handle *trans,
576 struct btrfs_root *root,
578 struct btrfs_delayed_node *node)
580 struct btrfs_fs_info *fs_info = root->fs_info;
581 struct btrfs_block_rsv *src_rsv;
582 struct btrfs_block_rsv *dst_rsv;
585 bool release = false;
587 src_rsv = trans->block_rsv;
588 dst_rsv = &fs_info->delayed_block_rsv;
590 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
593 * If our block_rsv is the delalloc block reserve then check and see if
594 * we have our extra reservation for updating the inode. If not fall
595 * through and try to reserve space quickly.
597 * We used to try and steal from the delalloc block rsv or the global
598 * reserve, but we'd steal a full reservation, which isn't kind. We are
599 * here through delalloc which means we've likely just cowed down close
600 * to the leaf that contains the inode, so we would steal less just
601 * doing the fallback inode update, so if we do end up having to steal
602 * from the global block rsv we hopefully only steal one or two blocks
603 * worth which is less likely to hurt us.
605 if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
606 spin_lock(&BTRFS_I(inode)->lock);
607 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
608 &BTRFS_I(inode)->runtime_flags))
612 spin_unlock(&BTRFS_I(inode)->lock);
616 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
617 * which doesn't reserve space for speed. This is a problem since we
618 * still need to reserve space for this update, so try to reserve the
621 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
622 * we're accounted for.
624 if (!src_rsv || (!trans->bytes_reserved &&
625 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
626 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
627 BTRFS_RESERVE_NO_FLUSH);
629 * Since we're under a transaction reserve_metadata_bytes could
630 * try to commit the transaction which will make it return
631 * EAGAIN to make us stop the transaction we have, so return
632 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
637 node->bytes_reserved = num_bytes;
638 trace_btrfs_space_reservation(fs_info,
646 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
649 * Migrate only takes a reservation, it doesn't touch the size of the
650 * block_rsv. This is to simplify people who don't normally have things
651 * migrated from their block rsv. If they go to release their
652 * reservation, that will decrease the size as well, so if migrate
653 * reduced size we'd end up with a negative size. But for the
654 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
655 * but we could in fact do this reserve/migrate dance several times
656 * between the time we did the original reservation and we'd clean it
657 * up. So to take care of this, release the space for the meta
658 * reservation here. I think it may be time for a documentation page on
659 * how block rsvs. work.
662 trace_btrfs_space_reservation(fs_info, "delayed_inode",
663 btrfs_ino(inode), num_bytes, 1);
664 node->bytes_reserved = num_bytes;
668 trace_btrfs_space_reservation(fs_info, "delalloc",
669 btrfs_ino(inode), num_bytes, 0);
670 btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
676 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
677 struct btrfs_delayed_node *node)
679 struct btrfs_block_rsv *rsv;
681 if (!node->bytes_reserved)
684 rsv = &fs_info->delayed_block_rsv;
685 trace_btrfs_space_reservation(fs_info, "delayed_inode",
686 node->inode_id, node->bytes_reserved, 0);
687 btrfs_block_rsv_release(fs_info, rsv,
688 node->bytes_reserved);
689 node->bytes_reserved = 0;
693 * This helper will insert some continuous items into the same leaf according
694 * to the free space of the leaf.
696 static int btrfs_batch_insert_items(struct btrfs_root *root,
697 struct btrfs_path *path,
698 struct btrfs_delayed_item *item)
700 struct btrfs_fs_info *fs_info = root->fs_info;
701 struct btrfs_delayed_item *curr, *next;
703 int total_data_size = 0, total_size = 0;
704 struct extent_buffer *leaf;
706 struct btrfs_key *keys;
708 struct list_head head;
714 BUG_ON(!path->nodes[0]);
716 leaf = path->nodes[0];
717 free_space = btrfs_leaf_free_space(fs_info, leaf);
718 INIT_LIST_HEAD(&head);
724 * count the number of the continuous items that we can insert in batch
726 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
728 total_data_size += next->data_len;
729 total_size += next->data_len + sizeof(struct btrfs_item);
730 list_add_tail(&next->tree_list, &head);
734 next = __btrfs_next_delayed_item(curr);
738 if (!btrfs_is_continuous_delayed_item(curr, next))
748 * we need allocate some memory space, but it might cause the task
749 * to sleep, so we set all locked nodes in the path to blocking locks
752 btrfs_set_path_blocking(path);
754 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
760 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
766 /* get keys of all the delayed items */
768 list_for_each_entry(next, &head, tree_list) {
770 data_size[i] = next->data_len;
774 /* reset all the locked nodes in the patch to spinning locks. */
775 btrfs_clear_path_blocking(path, NULL, 0);
777 /* insert the keys of the items */
778 setup_items_for_insert(root, path, keys, data_size,
779 total_data_size, total_size, nitems);
781 /* insert the dir index items */
782 slot = path->slots[0];
783 list_for_each_entry_safe(curr, next, &head, tree_list) {
784 data_ptr = btrfs_item_ptr(leaf, slot, char);
785 write_extent_buffer(leaf, &curr->data,
786 (unsigned long)data_ptr,
790 btrfs_delayed_item_release_metadata(fs_info, curr);
792 list_del(&curr->tree_list);
793 btrfs_release_delayed_item(curr);
804 * This helper can just do simple insertion that needn't extend item for new
805 * data, such as directory name index insertion, inode insertion.
807 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
808 struct btrfs_root *root,
809 struct btrfs_path *path,
810 struct btrfs_delayed_item *delayed_item)
812 struct btrfs_fs_info *fs_info = root->fs_info;
813 struct extent_buffer *leaf;
817 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
818 delayed_item->data_len);
819 if (ret < 0 && ret != -EEXIST)
822 leaf = path->nodes[0];
824 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
826 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
827 delayed_item->data_len);
828 btrfs_mark_buffer_dirty(leaf);
830 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
835 * we insert an item first, then if there are some continuous items, we try
836 * to insert those items into the same leaf.
838 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
839 struct btrfs_path *path,
840 struct btrfs_root *root,
841 struct btrfs_delayed_node *node)
843 struct btrfs_delayed_item *curr, *prev;
847 mutex_lock(&node->mutex);
848 curr = __btrfs_first_delayed_insertion_item(node);
852 ret = btrfs_insert_delayed_item(trans, root, path, curr);
854 btrfs_release_path(path);
859 curr = __btrfs_next_delayed_item(prev);
860 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
861 /* insert the continuous items into the same leaf */
863 btrfs_batch_insert_items(root, path, curr);
865 btrfs_release_delayed_item(prev);
866 btrfs_mark_buffer_dirty(path->nodes[0]);
868 btrfs_release_path(path);
869 mutex_unlock(&node->mutex);
873 mutex_unlock(&node->mutex);
877 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
878 struct btrfs_root *root,
879 struct btrfs_path *path,
880 struct btrfs_delayed_item *item)
882 struct btrfs_fs_info *fs_info = root->fs_info;
883 struct btrfs_delayed_item *curr, *next;
884 struct extent_buffer *leaf;
885 struct btrfs_key key;
886 struct list_head head;
887 int nitems, i, last_item;
890 BUG_ON(!path->nodes[0]);
892 leaf = path->nodes[0];
895 last_item = btrfs_header_nritems(leaf) - 1;
897 return -ENOENT; /* FIXME: Is errno suitable? */
900 INIT_LIST_HEAD(&head);
901 btrfs_item_key_to_cpu(leaf, &key, i);
904 * count the number of the dir index items that we can delete in batch
906 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
907 list_add_tail(&next->tree_list, &head);
911 next = __btrfs_next_delayed_item(curr);
915 if (!btrfs_is_continuous_delayed_item(curr, next))
921 btrfs_item_key_to_cpu(leaf, &key, i);
927 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
931 list_for_each_entry_safe(curr, next, &head, tree_list) {
932 btrfs_delayed_item_release_metadata(fs_info, curr);
933 list_del(&curr->tree_list);
934 btrfs_release_delayed_item(curr);
941 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
942 struct btrfs_path *path,
943 struct btrfs_root *root,
944 struct btrfs_delayed_node *node)
946 struct btrfs_delayed_item *curr, *prev;
950 mutex_lock(&node->mutex);
951 curr = __btrfs_first_delayed_deletion_item(node);
955 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
960 * can't find the item which the node points to, so this node
961 * is invalid, just drop it.
964 curr = __btrfs_next_delayed_item(prev);
965 btrfs_release_delayed_item(prev);
967 btrfs_release_path(path);
969 mutex_unlock(&node->mutex);
975 btrfs_batch_delete_items(trans, root, path, curr);
976 btrfs_release_path(path);
977 mutex_unlock(&node->mutex);
981 btrfs_release_path(path);
982 mutex_unlock(&node->mutex);
986 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
988 struct btrfs_delayed_root *delayed_root;
991 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
992 BUG_ON(!delayed_node->root);
993 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
994 delayed_node->count--;
996 delayed_root = delayed_node->root->fs_info->delayed_root;
997 finish_one_item(delayed_root);
1001 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1003 struct btrfs_delayed_root *delayed_root;
1005 ASSERT(delayed_node->root);
1006 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1007 delayed_node->count--;
1009 delayed_root = delayed_node->root->fs_info->delayed_root;
1010 finish_one_item(delayed_root);
1013 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1014 struct btrfs_root *root,
1015 struct btrfs_path *path,
1016 struct btrfs_delayed_node *node)
1018 struct btrfs_fs_info *fs_info = root->fs_info;
1019 struct btrfs_key key;
1020 struct btrfs_inode_item *inode_item;
1021 struct extent_buffer *leaf;
1025 key.objectid = node->inode_id;
1026 key.type = BTRFS_INODE_ITEM_KEY;
1029 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1034 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1036 btrfs_release_path(path);
1038 } else if (ret < 0) {
1042 leaf = path->nodes[0];
1043 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1044 struct btrfs_inode_item);
1045 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1046 sizeof(struct btrfs_inode_item));
1047 btrfs_mark_buffer_dirty(leaf);
1049 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1053 if (path->slots[0] >= btrfs_header_nritems(leaf))
1056 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057 if (key.objectid != node->inode_id)
1060 if (key.type != BTRFS_INODE_REF_KEY &&
1061 key.type != BTRFS_INODE_EXTREF_KEY)
1065 * Delayed iref deletion is for the inode who has only one link,
1066 * so there is only one iref. The case that several irefs are
1067 * in the same item doesn't exist.
1069 btrfs_del_item(trans, root, path);
1071 btrfs_release_delayed_iref(node);
1073 btrfs_release_path(path);
1075 btrfs_delayed_inode_release_metadata(fs_info, node);
1076 btrfs_release_delayed_inode(node);
1081 btrfs_release_path(path);
1083 key.type = BTRFS_INODE_EXTREF_KEY;
1085 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1091 leaf = path->nodes[0];
1096 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1097 struct btrfs_root *root,
1098 struct btrfs_path *path,
1099 struct btrfs_delayed_node *node)
1103 mutex_lock(&node->mutex);
1104 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1105 mutex_unlock(&node->mutex);
1109 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1110 mutex_unlock(&node->mutex);
1115 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1116 struct btrfs_path *path,
1117 struct btrfs_delayed_node *node)
1121 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1125 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1129 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1134 * Called when committing the transaction.
1135 * Returns 0 on success.
1136 * Returns < 0 on error and returns with an aborted transaction with any
1137 * outstanding delayed items cleaned up.
1139 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1140 struct btrfs_fs_info *fs_info, int nr)
1142 struct btrfs_delayed_root *delayed_root;
1143 struct btrfs_delayed_node *curr_node, *prev_node;
1144 struct btrfs_path *path;
1145 struct btrfs_block_rsv *block_rsv;
1147 bool count = (nr > 0);
1152 path = btrfs_alloc_path();
1155 path->leave_spinning = 1;
1157 block_rsv = trans->block_rsv;
1158 trans->block_rsv = &fs_info->delayed_block_rsv;
1160 delayed_root = fs_info->delayed_root;
1162 curr_node = btrfs_first_delayed_node(delayed_root);
1163 while (curr_node && (!count || (count && nr--))) {
1164 ret = __btrfs_commit_inode_delayed_items(trans, path,
1167 btrfs_release_delayed_node(curr_node);
1169 btrfs_abort_transaction(trans, ret);
1173 prev_node = curr_node;
1174 curr_node = btrfs_next_delayed_node(curr_node);
1175 btrfs_release_delayed_node(prev_node);
1179 btrfs_release_delayed_node(curr_node);
1180 btrfs_free_path(path);
1181 trans->block_rsv = block_rsv;
1186 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1187 struct btrfs_fs_info *fs_info)
1189 return __btrfs_run_delayed_items(trans, fs_info, -1);
1192 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1193 struct btrfs_fs_info *fs_info, int nr)
1195 return __btrfs_run_delayed_items(trans, fs_info, nr);
1198 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1199 struct inode *inode)
1201 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1202 struct btrfs_path *path;
1203 struct btrfs_block_rsv *block_rsv;
1209 mutex_lock(&delayed_node->mutex);
1210 if (!delayed_node->count) {
1211 mutex_unlock(&delayed_node->mutex);
1212 btrfs_release_delayed_node(delayed_node);
1215 mutex_unlock(&delayed_node->mutex);
1217 path = btrfs_alloc_path();
1219 btrfs_release_delayed_node(delayed_node);
1222 path->leave_spinning = 1;
1224 block_rsv = trans->block_rsv;
1225 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1227 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1229 btrfs_release_delayed_node(delayed_node);
1230 btrfs_free_path(path);
1231 trans->block_rsv = block_rsv;
1236 int btrfs_commit_inode_delayed_inode(struct inode *inode)
1238 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1239 struct btrfs_trans_handle *trans;
1240 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1241 struct btrfs_path *path;
1242 struct btrfs_block_rsv *block_rsv;
1248 mutex_lock(&delayed_node->mutex);
1249 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1250 mutex_unlock(&delayed_node->mutex);
1251 btrfs_release_delayed_node(delayed_node);
1254 mutex_unlock(&delayed_node->mutex);
1256 trans = btrfs_join_transaction(delayed_node->root);
1257 if (IS_ERR(trans)) {
1258 ret = PTR_ERR(trans);
1262 path = btrfs_alloc_path();
1267 path->leave_spinning = 1;
1269 block_rsv = trans->block_rsv;
1270 trans->block_rsv = &fs_info->delayed_block_rsv;
1272 mutex_lock(&delayed_node->mutex);
1273 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1274 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1275 path, delayed_node);
1278 mutex_unlock(&delayed_node->mutex);
1280 btrfs_free_path(path);
1281 trans->block_rsv = block_rsv;
1283 btrfs_end_transaction(trans);
1284 btrfs_btree_balance_dirty(fs_info);
1286 btrfs_release_delayed_node(delayed_node);
1291 void btrfs_remove_delayed_node(struct inode *inode)
1293 struct btrfs_delayed_node *delayed_node;
1295 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1299 BTRFS_I(inode)->delayed_node = NULL;
1300 btrfs_release_delayed_node(delayed_node);
1303 struct btrfs_async_delayed_work {
1304 struct btrfs_delayed_root *delayed_root;
1306 struct btrfs_work work;
1309 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1311 struct btrfs_async_delayed_work *async_work;
1312 struct btrfs_delayed_root *delayed_root;
1313 struct btrfs_trans_handle *trans;
1314 struct btrfs_path *path;
1315 struct btrfs_delayed_node *delayed_node = NULL;
1316 struct btrfs_root *root;
1317 struct btrfs_block_rsv *block_rsv;
1320 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1321 delayed_root = async_work->delayed_root;
1323 path = btrfs_alloc_path();
1328 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1331 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1335 path->leave_spinning = 1;
1336 root = delayed_node->root;
1338 trans = btrfs_join_transaction(root);
1342 block_rsv = trans->block_rsv;
1343 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1345 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1347 trans->block_rsv = block_rsv;
1348 btrfs_end_transaction(trans);
1349 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1352 btrfs_release_path(path);
1355 btrfs_release_prepared_delayed_node(delayed_node);
1356 if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1357 total_done < async_work->nr)
1361 btrfs_free_path(path);
1363 wake_up(&delayed_root->wait);
1368 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1369 struct btrfs_fs_info *fs_info, int nr)
1371 struct btrfs_async_delayed_work *async_work;
1373 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1374 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1377 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1381 async_work->delayed_root = delayed_root;
1382 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1383 btrfs_async_run_delayed_root, NULL, NULL);
1384 async_work->nr = nr;
1386 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1390 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1392 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1395 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1397 int val = atomic_read(&delayed_root->items_seq);
1399 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1402 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1408 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1410 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1412 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1415 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1419 seq = atomic_read(&delayed_root->items_seq);
1421 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1425 wait_event_interruptible(delayed_root->wait,
1426 could_end_wait(delayed_root, seq));
1430 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1433 /* Will return 0 or -ENOMEM */
1434 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1435 struct btrfs_fs_info *fs_info,
1436 const char *name, int name_len,
1438 struct btrfs_disk_key *disk_key, u8 type,
1441 struct btrfs_delayed_node *delayed_node;
1442 struct btrfs_delayed_item *delayed_item;
1443 struct btrfs_dir_item *dir_item;
1446 delayed_node = btrfs_get_or_create_delayed_node(dir);
1447 if (IS_ERR(delayed_node))
1448 return PTR_ERR(delayed_node);
1450 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451 if (!delayed_item) {
1456 delayed_item->key.objectid = btrfs_ino(dir);
1457 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458 delayed_item->key.offset = index;
1460 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461 dir_item->location = *disk_key;
1462 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463 btrfs_set_stack_dir_data_len(dir_item, 0);
1464 btrfs_set_stack_dir_name_len(dir_item, name_len);
1465 btrfs_set_stack_dir_type(dir_item, type);
1466 memcpy((char *)(dir_item + 1), name, name_len);
1468 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1470 * we have reserved enough space when we start a new transaction,
1471 * so reserving metadata failure is impossible
1476 mutex_lock(&delayed_node->mutex);
1477 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1478 if (unlikely(ret)) {
1480 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1481 name_len, name, delayed_node->root->objectid,
1482 delayed_node->inode_id, ret);
1485 mutex_unlock(&delayed_node->mutex);
1488 btrfs_release_delayed_node(delayed_node);
1492 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1493 struct btrfs_delayed_node *node,
1494 struct btrfs_key *key)
1496 struct btrfs_delayed_item *item;
1498 mutex_lock(&node->mutex);
1499 item = __btrfs_lookup_delayed_insertion_item(node, key);
1501 mutex_unlock(&node->mutex);
1505 btrfs_delayed_item_release_metadata(fs_info, item);
1506 btrfs_release_delayed_item(item);
1507 mutex_unlock(&node->mutex);
1511 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1512 struct btrfs_fs_info *fs_info,
1513 struct inode *dir, u64 index)
1515 struct btrfs_delayed_node *node;
1516 struct btrfs_delayed_item *item;
1517 struct btrfs_key item_key;
1520 node = btrfs_get_or_create_delayed_node(dir);
1522 return PTR_ERR(node);
1524 item_key.objectid = btrfs_ino(dir);
1525 item_key.type = BTRFS_DIR_INDEX_KEY;
1526 item_key.offset = index;
1528 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1532 item = btrfs_alloc_delayed_item(0);
1538 item->key = item_key;
1540 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1542 * we have reserved enough space when we start a new transaction,
1543 * so reserving metadata failure is impossible.
1547 mutex_lock(&node->mutex);
1548 ret = __btrfs_add_delayed_deletion_item(node, item);
1549 if (unlikely(ret)) {
1551 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1552 index, node->root->objectid, node->inode_id, ret);
1555 mutex_unlock(&node->mutex);
1557 btrfs_release_delayed_node(node);
1561 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1563 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1569 * Since we have held i_mutex of this directory, it is impossible that
1570 * a new directory index is added into the delayed node and index_cnt
1571 * is updated now. So we needn't lock the delayed node.
1573 if (!delayed_node->index_cnt) {
1574 btrfs_release_delayed_node(delayed_node);
1578 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1579 btrfs_release_delayed_node(delayed_node);
1583 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1584 struct list_head *ins_list,
1585 struct list_head *del_list)
1587 struct btrfs_delayed_node *delayed_node;
1588 struct btrfs_delayed_item *item;
1590 delayed_node = btrfs_get_delayed_node(inode);
1595 * We can only do one readdir with delayed items at a time because of
1596 * item->readdir_list.
1598 inode_unlock_shared(inode);
1601 mutex_lock(&delayed_node->mutex);
1602 item = __btrfs_first_delayed_insertion_item(delayed_node);
1604 atomic_inc(&item->refs);
1605 list_add_tail(&item->readdir_list, ins_list);
1606 item = __btrfs_next_delayed_item(item);
1609 item = __btrfs_first_delayed_deletion_item(delayed_node);
1611 atomic_inc(&item->refs);
1612 list_add_tail(&item->readdir_list, del_list);
1613 item = __btrfs_next_delayed_item(item);
1615 mutex_unlock(&delayed_node->mutex);
1617 * This delayed node is still cached in the btrfs inode, so refs
1618 * must be > 1 now, and we needn't check it is going to be freed
1621 * Besides that, this function is used to read dir, we do not
1622 * insert/delete delayed items in this period. So we also needn't
1623 * requeue or dequeue this delayed node.
1625 atomic_dec(&delayed_node->refs);
1630 void btrfs_readdir_put_delayed_items(struct inode *inode,
1631 struct list_head *ins_list,
1632 struct list_head *del_list)
1634 struct btrfs_delayed_item *curr, *next;
1636 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1637 list_del(&curr->readdir_list);
1638 if (atomic_dec_and_test(&curr->refs))
1642 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1643 list_del(&curr->readdir_list);
1644 if (atomic_dec_and_test(&curr->refs))
1649 * The VFS is going to do up_read(), so we need to downgrade back to a
1652 downgrade_write(&inode->i_rwsem);
1655 int btrfs_should_delete_dir_index(struct list_head *del_list,
1658 struct btrfs_delayed_item *curr, *next;
1661 if (list_empty(del_list))
1664 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1665 if (curr->key.offset > index)
1668 list_del(&curr->readdir_list);
1669 ret = (curr->key.offset == index);
1671 if (atomic_dec_and_test(&curr->refs))
1683 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1686 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1687 struct list_head *ins_list)
1689 struct btrfs_dir_item *di;
1690 struct btrfs_delayed_item *curr, *next;
1691 struct btrfs_key location;
1695 unsigned char d_type;
1697 if (list_empty(ins_list))
1701 * Changing the data of the delayed item is impossible. So
1702 * we needn't lock them. And we have held i_mutex of the
1703 * directory, nobody can delete any directory indexes now.
1705 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1706 list_del(&curr->readdir_list);
1708 if (curr->key.offset < ctx->pos) {
1709 if (atomic_dec_and_test(&curr->refs))
1714 ctx->pos = curr->key.offset;
1716 di = (struct btrfs_dir_item *)curr->data;
1717 name = (char *)(di + 1);
1718 name_len = btrfs_stack_dir_name_len(di);
1720 d_type = btrfs_filetype_table[di->type];
1721 btrfs_disk_key_to_cpu(&location, &di->location);
1723 over = !dir_emit(ctx, name, name_len,
1724 location.objectid, d_type);
1726 if (atomic_dec_and_test(&curr->refs))
1735 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1736 struct btrfs_inode_item *inode_item,
1737 struct inode *inode)
1739 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1740 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1741 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1742 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1743 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1744 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1745 btrfs_set_stack_inode_generation(inode_item,
1746 BTRFS_I(inode)->generation);
1747 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1748 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1749 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1750 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1751 btrfs_set_stack_inode_block_group(inode_item, 0);
1753 btrfs_set_stack_timespec_sec(&inode_item->atime,
1754 inode->i_atime.tv_sec);
1755 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1756 inode->i_atime.tv_nsec);
1758 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1759 inode->i_mtime.tv_sec);
1760 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1761 inode->i_mtime.tv_nsec);
1763 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1764 inode->i_ctime.tv_sec);
1765 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1766 inode->i_ctime.tv_nsec);
1768 btrfs_set_stack_timespec_sec(&inode_item->otime,
1769 BTRFS_I(inode)->i_otime.tv_sec);
1770 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1771 BTRFS_I(inode)->i_otime.tv_nsec);
1774 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1776 struct btrfs_delayed_node *delayed_node;
1777 struct btrfs_inode_item *inode_item;
1779 delayed_node = btrfs_get_delayed_node(inode);
1783 mutex_lock(&delayed_node->mutex);
1784 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1785 mutex_unlock(&delayed_node->mutex);
1786 btrfs_release_delayed_node(delayed_node);
1790 inode_item = &delayed_node->inode_item;
1792 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1793 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1794 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1795 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1796 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1797 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1798 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1799 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1801 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1803 *rdev = btrfs_stack_inode_rdev(inode_item);
1804 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1806 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1807 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1809 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1810 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1812 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1813 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1815 BTRFS_I(inode)->i_otime.tv_sec =
1816 btrfs_stack_timespec_sec(&inode_item->otime);
1817 BTRFS_I(inode)->i_otime.tv_nsec =
1818 btrfs_stack_timespec_nsec(&inode_item->otime);
1820 inode->i_generation = BTRFS_I(inode)->generation;
1821 BTRFS_I(inode)->index_cnt = (u64)-1;
1823 mutex_unlock(&delayed_node->mutex);
1824 btrfs_release_delayed_node(delayed_node);
1828 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1829 struct btrfs_root *root, struct inode *inode)
1831 struct btrfs_delayed_node *delayed_node;
1834 delayed_node = btrfs_get_or_create_delayed_node(inode);
1835 if (IS_ERR(delayed_node))
1836 return PTR_ERR(delayed_node);
1838 mutex_lock(&delayed_node->mutex);
1839 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1840 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1844 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1849 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1851 delayed_node->count++;
1852 atomic_inc(&root->fs_info->delayed_root->items);
1854 mutex_unlock(&delayed_node->mutex);
1855 btrfs_release_delayed_node(delayed_node);
1859 int btrfs_delayed_delete_inode_ref(struct inode *inode)
1861 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1862 struct btrfs_delayed_node *delayed_node;
1865 * we don't do delayed inode updates during log recovery because it
1866 * leads to enospc problems. This means we also can't do
1867 * delayed inode refs
1869 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1872 delayed_node = btrfs_get_or_create_delayed_node(inode);
1873 if (IS_ERR(delayed_node))
1874 return PTR_ERR(delayed_node);
1877 * We don't reserve space for inode ref deletion is because:
1878 * - We ONLY do async inode ref deletion for the inode who has only
1879 * one link(i_nlink == 1), it means there is only one inode ref.
1880 * And in most case, the inode ref and the inode item are in the
1881 * same leaf, and we will deal with them at the same time.
1882 * Since we are sure we will reserve the space for the inode item,
1883 * it is unnecessary to reserve space for inode ref deletion.
1884 * - If the inode ref and the inode item are not in the same leaf,
1885 * We also needn't worry about enospc problem, because we reserve
1886 * much more space for the inode update than it needs.
1887 * - At the worst, we can steal some space from the global reservation.
1890 mutex_lock(&delayed_node->mutex);
1891 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1894 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1895 delayed_node->count++;
1896 atomic_inc(&fs_info->delayed_root->items);
1898 mutex_unlock(&delayed_node->mutex);
1899 btrfs_release_delayed_node(delayed_node);
1903 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1905 struct btrfs_root *root = delayed_node->root;
1906 struct btrfs_fs_info *fs_info = root->fs_info;
1907 struct btrfs_delayed_item *curr_item, *prev_item;
1909 mutex_lock(&delayed_node->mutex);
1910 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1912 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1913 prev_item = curr_item;
1914 curr_item = __btrfs_next_delayed_item(prev_item);
1915 btrfs_release_delayed_item(prev_item);
1918 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1920 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1921 prev_item = curr_item;
1922 curr_item = __btrfs_next_delayed_item(prev_item);
1923 btrfs_release_delayed_item(prev_item);
1926 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1927 btrfs_release_delayed_iref(delayed_node);
1929 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1931 btrfs_release_delayed_inode(delayed_node);
1933 mutex_unlock(&delayed_node->mutex);
1936 void btrfs_kill_delayed_inode_items(struct inode *inode)
1938 struct btrfs_delayed_node *delayed_node;
1940 delayed_node = btrfs_get_delayed_node(inode);
1944 __btrfs_kill_delayed_node(delayed_node);
1945 btrfs_release_delayed_node(delayed_node);
1948 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1951 struct btrfs_delayed_node *delayed_nodes[8];
1955 spin_lock(&root->inode_lock);
1956 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1957 (void **)delayed_nodes, inode_id,
1958 ARRAY_SIZE(delayed_nodes));
1960 spin_unlock(&root->inode_lock);
1964 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1966 for (i = 0; i < n; i++)
1967 atomic_inc(&delayed_nodes[i]->refs);
1968 spin_unlock(&root->inode_lock);
1970 for (i = 0; i < n; i++) {
1971 __btrfs_kill_delayed_node(delayed_nodes[i]);
1972 btrfs_release_delayed_node(delayed_nodes[i]);
1977 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1979 struct btrfs_delayed_node *curr_node, *prev_node;
1981 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1983 __btrfs_kill_delayed_node(curr_node);
1985 prev_node = curr_node;
1986 curr_node = btrfs_next_delayed_node(curr_node);
1987 btrfs_release_delayed_node(prev_node);