2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/delay.h>
20 #include <linux/kthread.h>
21 #include <linux/pagemap.h>
25 #include "free-space-cache.h"
26 #include "inode-map.h"
27 #include "transaction.h"
29 static int caching_kthread(void *data)
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
35 struct btrfs_path *path;
36 struct extent_buffer *leaf;
41 path = btrfs_alloc_path();
45 /* Since the commit root is read-only, we can safely skip locking. */
46 path->skip_locking = 1;
47 path->search_commit_root = 1;
50 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
52 key.type = BTRFS_INODE_ITEM_KEY;
54 /* need to make sure the commit_root doesn't disappear */
55 mutex_lock(&root->fs_commit_mutex);
57 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
66 leaf = path->nodes[0];
67 slot = path->slots[0];
68 if (slot >= btrfs_header_nritems(leaf)) {
69 ret = btrfs_next_leaf(root, path);
76 btrfs_transaction_in_commit(fs_info)) {
77 leaf = path->nodes[0];
79 if (btrfs_header_nritems(leaf) == 0) {
85 * Save the key so we can advances forward
88 btrfs_item_key_to_cpu(leaf, &key, 0);
89 btrfs_release_path(path);
90 root->cache_progress = last;
91 mutex_unlock(&root->fs_commit_mutex);
98 btrfs_item_key_to_cpu(leaf, &key, slot);
100 if (key.type != BTRFS_INODE_ITEM_KEY)
103 if (key.objectid >= root->highest_objectid)
106 if (last != (u64)-1 && last + 1 != key.objectid) {
107 __btrfs_add_free_space(ctl, last + 1,
108 key.objectid - last - 1);
109 wake_up(&root->cache_wait);
117 if (last < root->highest_objectid - 1) {
118 __btrfs_add_free_space(ctl, last + 1,
119 root->highest_objectid - last - 1);
122 spin_lock(&root->cache_lock);
123 root->cached = BTRFS_CACHE_FINISHED;
124 spin_unlock(&root->cache_lock);
126 root->cache_progress = (u64)-1;
127 btrfs_unpin_free_ino(root);
129 wake_up(&root->cache_wait);
130 mutex_unlock(&root->fs_commit_mutex);
132 btrfs_free_path(path);
137 static void start_caching(struct btrfs_root *root)
139 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
140 struct task_struct *tsk;
144 spin_lock(&root->cache_lock);
145 if (root->cached != BTRFS_CACHE_NO) {
146 spin_unlock(&root->cache_lock);
150 root->cached = BTRFS_CACHE_STARTED;
151 spin_unlock(&root->cache_lock);
153 ret = load_free_ino_cache(root->fs_info, root);
155 spin_lock(&root->cache_lock);
156 root->cached = BTRFS_CACHE_FINISHED;
157 spin_unlock(&root->cache_lock);
162 * It can be quite time-consuming to fill the cache by searching
163 * through the extent tree, and this can keep ino allocation path
164 * waiting. Therefore at start we quickly find out the highest
165 * inode number and we know we can use inode numbers which fall in
166 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
168 ret = btrfs_find_free_objectid(root, &objectid);
169 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
170 __btrfs_add_free_space(ctl, objectid,
171 BTRFS_LAST_FREE_OBJECTID - objectid + 1);
174 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
175 root->root_key.objectid);
179 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
182 *objectid = btrfs_find_ino_for_alloc(root);
189 wait_event(root->cache_wait,
190 root->cached == BTRFS_CACHE_FINISHED ||
191 root->free_ino_ctl->free_space > 0);
193 if (root->cached == BTRFS_CACHE_FINISHED &&
194 root->free_ino_ctl->free_space == 0)
200 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
202 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
203 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
205 if (root->cached == BTRFS_CACHE_FINISHED) {
206 __btrfs_add_free_space(ctl, objectid, 1);
209 * If we are in the process of caching free ino chunks,
210 * to avoid adding the same inode number to the free_ino
211 * tree twice due to cross transaction, we'll leave it
212 * in the pinned tree until a transaction is committed
213 * or the caching work is done.
216 mutex_lock(&root->fs_commit_mutex);
217 spin_lock(&root->cache_lock);
218 if (root->cached == BTRFS_CACHE_FINISHED) {
219 spin_unlock(&root->cache_lock);
220 mutex_unlock(&root->fs_commit_mutex);
223 spin_unlock(&root->cache_lock);
227 if (objectid <= root->cache_progress ||
228 objectid > root->highest_objectid)
229 __btrfs_add_free_space(ctl, objectid, 1);
231 __btrfs_add_free_space(pinned, objectid, 1);
233 mutex_unlock(&root->fs_commit_mutex);
238 * When a transaction is committed, we'll move those inode numbers which
239 * are smaller than root->cache_progress from pinned tree to free_ino tree,
240 * and others will just be dropped, because the commit root we were
241 * searching has changed.
243 * Must be called with root->fs_commit_mutex held
245 void btrfs_unpin_free_ino(struct btrfs_root *root)
247 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
248 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
249 struct btrfs_free_space *info;
254 n = rb_first(rbroot);
258 info = rb_entry(n, struct btrfs_free_space, offset_index);
259 BUG_ON(info->bitmap);
261 if (info->offset > root->cache_progress)
263 else if (info->offset + info->bytes > root->cache_progress)
264 count = root->cache_progress - info->offset + 1;
268 __btrfs_add_free_space(ctl, info->offset, count);
270 rb_erase(&info->offset_index, rbroot);
275 #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
276 #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
279 * The goal is to keep the memory used by the free_ino tree won't
280 * exceed the memory if we use bitmaps only.
282 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
284 struct btrfs_free_space *info;
289 n = rb_last(&ctl->free_space_offset);
291 ctl->extents_thresh = INIT_THRESHOLD;
294 info = rb_entry(n, struct btrfs_free_space, offset_index);
297 * Find the maximum inode number in the filesystem. Note we
298 * ignore the fact that this can be a bitmap, because we are
299 * not doing precise calculation.
301 max_ino = info->bytes - 1;
303 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
304 if (max_bitmaps <= ctl->total_bitmaps) {
305 ctl->extents_thresh = 0;
309 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
310 PAGE_CACHE_SIZE / sizeof(*info);
314 * We don't fall back to bitmap, if we are below the extents threshold
315 * or this chunk of inode numbers is a big one.
317 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
318 struct btrfs_free_space *info)
320 if (ctl->free_extents < ctl->extents_thresh ||
321 info->bytes > INODES_PER_BITMAP / 10)
327 static struct btrfs_free_space_op free_ino_op = {
328 .recalc_thresholds = recalculate_thresholds,
329 .use_bitmap = use_bitmap,
332 static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
336 static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
337 struct btrfs_free_space *info)
340 * We always use extents for two reasons:
342 * - The pinned tree is only used during the process of caching
344 * - Make code simpler. See btrfs_unpin_free_ino().
349 static struct btrfs_free_space_op pinned_free_ino_op = {
350 .recalc_thresholds = pinned_recalc_thresholds,
351 .use_bitmap = pinned_use_bitmap,
354 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
356 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
357 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
359 spin_lock_init(&ctl->tree_lock);
363 ctl->op = &free_ino_op;
366 * Initially we allow to use 16K of ram to cache chunks of
367 * inode numbers before we resort to bitmaps. This is somewhat
368 * arbitrary, but it will be adjusted in runtime.
370 ctl->extents_thresh = INIT_THRESHOLD;
372 spin_lock_init(&pinned->tree_lock);
375 pinned->private = NULL;
376 pinned->extents_thresh = 0;
377 pinned->op = &pinned_free_ino_op;
380 int btrfs_save_ino_cache(struct btrfs_root *root,
381 struct btrfs_trans_handle *trans)
383 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
384 struct btrfs_path *path;
391 path = btrfs_alloc_path();
395 inode = lookup_free_ino_inode(root, path);
396 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
397 ret = PTR_ERR(inode);
405 ret = create_free_ino_inode(root, trans, path);
411 BTRFS_I(inode)->generation = 0;
412 ret = btrfs_update_inode(trans, root, inode);
415 if (i_size_read(inode) > 0) {
416 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
421 spin_lock(&root->cache_lock);
422 if (root->cached != BTRFS_CACHE_FINISHED) {
424 spin_unlock(&root->cache_lock);
427 spin_unlock(&root->cache_lock);
429 spin_lock(&ctl->tree_lock);
430 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
431 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
432 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
433 spin_unlock(&ctl->tree_lock);
435 /* Just to make sure we have enough space */
436 prealloc += 8 * PAGE_CACHE_SIZE;
438 ret = btrfs_check_data_free_space(inode, prealloc);
442 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
443 prealloc, prealloc, &alloc_hint);
446 btrfs_free_reserved_data_space(inode, prealloc);
452 ret = btrfs_write_out_ino_cache(root, trans, path);
454 btrfs_free_path(path);
458 static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
460 struct btrfs_path *path;
462 struct extent_buffer *l;
463 struct btrfs_key search_key;
464 struct btrfs_key found_key;
467 path = btrfs_alloc_path();
471 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
472 search_key.type = -1;
473 search_key.offset = (u64)-1;
474 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
478 if (path->slots[0] > 0) {
479 slot = path->slots[0] - 1;
481 btrfs_item_key_to_cpu(l, &found_key, slot);
482 *objectid = max_t(u64, found_key.objectid,
483 BTRFS_FIRST_FREE_OBJECTID - 1);
485 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
489 btrfs_free_path(path);
493 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
496 mutex_lock(&root->objectid_mutex);
498 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
499 ret = btrfs_find_highest_objectid(root,
500 &root->highest_objectid);
505 if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
510 *objectid = ++root->highest_objectid;
513 mutex_unlock(&root->objectid_mutex);