2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
23 #include "transaction.h"
24 #include "btrfs_inode.h"
30 struct rb_node rb_node;
34 * returns > 0 if entry passed (root, objectid) is > entry,
35 * < 0 if (root, objectid) < entry and zero if they are equal
37 static int comp_entry(struct tree_entry *entry, u64 root_objectid,
40 if (root_objectid < entry->root_objectid)
42 if (root_objectid > entry->root_objectid)
44 if (objectid < entry->objectid)
46 if (objectid > entry->objectid)
51 static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid,
52 u64 objectid, struct rb_node *node)
54 struct rb_node ** p = &root->rb_node;
55 struct rb_node * parent = NULL;
56 struct tree_entry *entry;
61 entry = rb_entry(parent, struct tree_entry, rb_node);
63 comp = comp_entry(entry, root_objectid, objectid);
72 rb_link_node(node, parent, p);
73 rb_insert_color(node, root);
77 static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid,
78 u64 objectid, struct rb_node **prev_ret)
80 struct rb_node * n = root->rb_node;
81 struct rb_node *prev = NULL;
82 struct tree_entry *entry;
83 struct tree_entry *prev_entry = NULL;
87 entry = rb_entry(n, struct tree_entry, rb_node);
90 comp = comp_entry(entry, root_objectid, objectid);
102 while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) {
103 prev = rb_next(prev);
104 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
110 static inline struct rb_node *tree_search(struct rb_root *root,
111 u64 root_objectid, u64 objectid)
113 struct rb_node *prev;
115 ret = __tree_search(root, root_objectid, objectid, &prev);
121 int btrfs_add_ordered_inode(struct inode *inode)
123 struct btrfs_root *root = BTRFS_I(inode)->root;
124 u64 root_objectid = root->root_key.objectid;
125 u64 transid = root->fs_info->running_transaction->transid;
126 struct tree_entry *entry;
127 struct rb_node *node;
128 struct btrfs_ordered_inode_tree *tree;
130 if (transid <= BTRFS_I(inode)->ordered_trans)
133 tree = &root->fs_info->running_transaction->ordered_inode_tree;
135 read_lock(&tree->lock);
136 node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL);
137 read_unlock(&tree->lock);
142 entry = kmalloc(sizeof(*entry), GFP_NOFS);
146 write_lock(&tree->lock);
147 entry->objectid = inode->i_ino;
148 entry->root_objectid = root_objectid;
149 entry->inode = inode;
151 node = tree_insert(&tree->tree, root_objectid,
152 inode->i_ino, &entry->rb_node);
154 BTRFS_I(inode)->ordered_trans = transid;
158 write_unlock(&tree->lock);
165 int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
166 u64 *root_objectid, u64 *objectid,
167 struct inode **inode)
169 struct tree_entry *entry;
170 struct rb_node *node;
172 write_lock(&tree->lock);
173 node = tree_search(&tree->tree, *root_objectid, *objectid);
175 write_unlock(&tree->lock);
178 entry = rb_entry(node, struct tree_entry, rb_node);
180 while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
181 node = rb_next(node);
184 entry = rb_entry(node, struct tree_entry, rb_node);
187 write_unlock(&tree->lock);
191 *root_objectid = entry->root_objectid;
192 *inode = entry->inode;
193 atomic_inc(&entry->inode->i_count);
194 *objectid = entry->objectid;
195 write_unlock(&tree->lock);
199 int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
200 u64 *root_objectid, u64 *objectid,
201 struct inode **inode)
203 struct tree_entry *entry;
204 struct rb_node *node;
206 write_lock(&tree->lock);
207 node = tree_search(&tree->tree, *root_objectid, *objectid);
209 write_unlock(&tree->lock);
213 entry = rb_entry(node, struct tree_entry, rb_node);
214 while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
215 node = rb_next(node);
218 entry = rb_entry(node, struct tree_entry, rb_node);
221 write_unlock(&tree->lock);
225 *root_objectid = entry->root_objectid;
226 *objectid = entry->objectid;
227 *inode = entry->inode;
228 atomic_inc(&entry->inode->i_count);
229 rb_erase(node, &tree->tree);
230 write_unlock(&tree->lock);
235 static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree,
237 u64 root_objectid, u64 objectid)
239 struct tree_entry *entry;
240 struct rb_node *node;
241 struct rb_node *prev;
243 write_lock(&tree->lock);
244 node = __tree_search(&tree->tree, root_objectid, objectid, &prev);
246 write_unlock(&tree->lock);
249 rb_erase(node, &tree->tree);
250 BTRFS_I(inode)->ordered_trans = 0;
251 write_unlock(&tree->lock);
252 atomic_dec(&inode->i_count);
253 entry = rb_entry(node, struct tree_entry, rb_node);
258 void btrfs_del_ordered_inode(struct inode *inode, int force)
260 struct btrfs_root *root = BTRFS_I(inode)->root;
261 u64 root_objectid = root->root_key.objectid;
263 if (!BTRFS_I(inode)->ordered_trans) {
267 if (!force && (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
268 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
271 spin_lock(&root->fs_info->new_trans_lock);
272 if (root->fs_info->running_transaction) {
273 struct btrfs_ordered_inode_tree *tree;
274 tree = &root->fs_info->running_transaction->ordered_inode_tree;
275 __btrfs_del_ordered_inode(tree, inode, root_objectid,
278 spin_unlock(&root->fs_info->new_trans_lock);
281 int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode)
283 struct btrfs_transaction *cur = root->fs_info->running_transaction;
284 while(cur == root->fs_info->running_transaction &&
285 atomic_read(&BTRFS_I(inode)->ordered_writeback)) {
286 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
287 congestion_wait(WRITE, HZ/20);
289 blk_congestion_wait(WRITE, HZ/20);