]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/inode-map.c
btrfs scrub: don't coalesce pages that are logically discontiguous
[karo-tx-linux.git] / fs / btrfs / inode-map.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/delay.h>
20 #include <linux/kthread.h>
21 #include <linux/pagemap.h>
22
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "free-space-cache.h"
26 #include "inode-map.h"
27 #include "transaction.h"
28
29 static int caching_kthread(void *data)
30 {
31         struct btrfs_root *root = data;
32         struct btrfs_fs_info *fs_info = root->fs_info;
33         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
34         struct btrfs_key key;
35         struct btrfs_path *path;
36         struct extent_buffer *leaf;
37         u64 last = (u64)-1;
38         int slot;
39         int ret;
40
41         path = btrfs_alloc_path();
42         if (!path)
43                 return -ENOMEM;
44
45         /* Since the commit root is read-only, we can safely skip locking. */
46         path->skip_locking = 1;
47         path->search_commit_root = 1;
48         path->reada = 2;
49
50         key.objectid = BTRFS_FIRST_FREE_OBJECTID;
51         key.offset = 0;
52         key.type = BTRFS_INODE_ITEM_KEY;
53 again:
54         /* need to make sure the commit_root doesn't disappear */
55         mutex_lock(&root->fs_commit_mutex);
56
57         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
58         if (ret < 0)
59                 goto out;
60
61         while (1) {
62                 smp_mb();
63                 if (fs_info->closing > 1)
64                         goto out;
65
66                 leaf = path->nodes[0];
67                 slot = path->slots[0];
68                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
69                         ret = btrfs_next_leaf(root, path);
70                         if (ret < 0)
71                                 goto out;
72                         else if (ret > 0)
73                                 break;
74
75                         if (need_resched() ||
76                             btrfs_transaction_in_commit(fs_info)) {
77                                 leaf = path->nodes[0];
78
79                                 if (btrfs_header_nritems(leaf) == 0) {
80                                         WARN_ON(1);
81                                         break;
82                                 }
83
84                                 /*
85                                  * Save the key so we can advances forward
86                                  * in the next search.
87                                  */
88                                 btrfs_item_key_to_cpu(leaf, &key, 0);
89                                 btrfs_release_path(path);
90                                 root->cache_progress = last;
91                                 mutex_unlock(&root->fs_commit_mutex);
92                                 schedule_timeout(1);
93                                 goto again;
94                         } else
95                                 continue;
96                 }
97
98                 btrfs_item_key_to_cpu(leaf, &key, slot);
99
100                 if (key.type != BTRFS_INODE_ITEM_KEY)
101                         goto next;
102
103                 if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
104                         break;
105
106                 if (last != (u64)-1 && last + 1 != key.objectid) {
107                         __btrfs_add_free_space(ctl, last + 1,
108                                                key.objectid - last - 1);
109                         wake_up(&root->cache_wait);
110                 }
111
112                 last = key.objectid;
113 next:
114                 path->slots[0]++;
115         }
116
117         if (last < BTRFS_LAST_FREE_OBJECTID - 1) {
118                 __btrfs_add_free_space(ctl, last + 1,
119                                        BTRFS_LAST_FREE_OBJECTID - last - 1);
120         }
121
122         spin_lock(&root->cache_lock);
123         root->cached = BTRFS_CACHE_FINISHED;
124         spin_unlock(&root->cache_lock);
125
126         root->cache_progress = (u64)-1;
127         btrfs_unpin_free_ino(root);
128 out:
129         wake_up(&root->cache_wait);
130         mutex_unlock(&root->fs_commit_mutex);
131
132         btrfs_free_path(path);
133
134         return ret;
135 }
136
137 static void start_caching(struct btrfs_root *root)
138 {
139         struct task_struct *tsk;
140         int ret;
141
142         spin_lock(&root->cache_lock);
143         if (root->cached != BTRFS_CACHE_NO) {
144                 spin_unlock(&root->cache_lock);
145                 return;
146         }
147
148         root->cached = BTRFS_CACHE_STARTED;
149         spin_unlock(&root->cache_lock);
150
151         ret = load_free_ino_cache(root->fs_info, root);
152         if (ret == 1) {
153                 spin_lock(&root->cache_lock);
154                 root->cached = BTRFS_CACHE_FINISHED;
155                 spin_unlock(&root->cache_lock);
156                 return;
157         }
158
159         tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
160                           root->root_key.objectid);
161         BUG_ON(IS_ERR(tsk));
162 }
163
164 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
165 {
166 again:
167         *objectid = btrfs_find_ino_for_alloc(root);
168
169         if (*objectid != 0)
170                 return 0;
171
172         start_caching(root);
173
174         wait_event(root->cache_wait,
175                    root->cached == BTRFS_CACHE_FINISHED ||
176                    root->free_ino_ctl->free_space > 0);
177
178         if (root->cached == BTRFS_CACHE_FINISHED &&
179             root->free_ino_ctl->free_space == 0)
180                 return -ENOSPC;
181         else
182                 goto again;
183 }
184
185 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
186 {
187         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
188         struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
189 again:
190         if (root->cached == BTRFS_CACHE_FINISHED) {
191                 __btrfs_add_free_space(ctl, objectid, 1);
192         } else {
193                 /*
194                  * If we are in the process of caching free ino chunks,
195                  * to avoid adding the same inode number to the free_ino
196                  * tree twice due to cross transaction, we'll leave it
197                  * in the pinned tree until a transaction is committed
198                  * or the caching work is done.
199                  */
200
201                 mutex_lock(&root->fs_commit_mutex);
202                 spin_lock(&root->cache_lock);
203                 if (root->cached == BTRFS_CACHE_FINISHED) {
204                         spin_unlock(&root->cache_lock);
205                         mutex_unlock(&root->fs_commit_mutex);
206                         goto again;
207                 }
208                 spin_unlock(&root->cache_lock);
209
210                 start_caching(root);
211
212                 if (objectid <= root->cache_progress)
213                         __btrfs_add_free_space(ctl, objectid, 1);
214                 else
215                         __btrfs_add_free_space(pinned, objectid, 1);
216
217                 mutex_unlock(&root->fs_commit_mutex);
218         }
219 }
220
221 /*
222  * When a transaction is committed, we'll move those inode numbers which
223  * are smaller than root->cache_progress from pinned tree to free_ino tree,
224  * and others will just be dropped, because the commit root we were
225  * searching has changed.
226  *
227  * Must be called with root->fs_commit_mutex held
228  */
229 void btrfs_unpin_free_ino(struct btrfs_root *root)
230 {
231         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
232         struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
233         struct btrfs_free_space *info;
234         struct rb_node *n;
235         u64 count;
236
237         while (1) {
238                 n = rb_first(rbroot);
239                 if (!n)
240                         break;
241
242                 info = rb_entry(n, struct btrfs_free_space, offset_index);
243                 BUG_ON(info->bitmap);
244
245                 if (info->offset > root->cache_progress)
246                         goto free;
247                 else if (info->offset + info->bytes > root->cache_progress)
248                         count = root->cache_progress - info->offset + 1;
249                 else
250                         count = info->bytes;
251
252                 __btrfs_add_free_space(ctl, info->offset, count);
253 free:
254                 rb_erase(&info->offset_index, rbroot);
255                 kfree(info);
256         }
257 }
258
259 #define INIT_THRESHOLD  (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
260 #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
261
262 /*
263  * The goal is to keep the memory used by the free_ino tree won't
264  * exceed the memory if we use bitmaps only.
265  */
266 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
267 {
268         struct btrfs_free_space *info;
269         struct rb_node *n;
270         int max_ino;
271         int max_bitmaps;
272
273         n = rb_last(&ctl->free_space_offset);
274         if (!n) {
275                 ctl->extents_thresh = INIT_THRESHOLD;
276                 return;
277         }
278         info = rb_entry(n, struct btrfs_free_space, offset_index);
279
280         /*
281          * Find the maximum inode number in the filesystem. Note we
282          * ignore the fact that this can be a bitmap, because we are
283          * not doing precise calculation.
284          */
285         max_ino = info->bytes - 1;
286
287         max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
288         if (max_bitmaps <= ctl->total_bitmaps) {
289                 ctl->extents_thresh = 0;
290                 return;
291         }
292
293         ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
294                                 PAGE_CACHE_SIZE / sizeof(*info);
295 }
296
297 /*
298  * We don't fall back to bitmap, if we are below the extents threshold
299  * or this chunk of inode numbers is a big one.
300  */
301 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
302                        struct btrfs_free_space *info)
303 {
304         if (ctl->free_extents < ctl->extents_thresh ||
305             info->bytes > INODES_PER_BITMAP / 10)
306                 return false;
307
308         return true;
309 }
310
311 static struct btrfs_free_space_op free_ino_op = {
312         .recalc_thresholds      = recalculate_thresholds,
313         .use_bitmap             = use_bitmap,
314 };
315
316 static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
317 {
318 }
319
320 static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
321                               struct btrfs_free_space *info)
322 {
323         /*
324          * We always use extents for two reasons:
325          *
326          * - The pinned tree is only used during the process of caching
327          *   work.
328          * - Make code simpler. See btrfs_unpin_free_ino().
329          */
330         return false;
331 }
332
333 static struct btrfs_free_space_op pinned_free_ino_op = {
334         .recalc_thresholds      = pinned_recalc_thresholds,
335         .use_bitmap             = pinned_use_bitmap,
336 };
337
338 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
339 {
340         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
341         struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
342
343         spin_lock_init(&ctl->tree_lock);
344         ctl->unit = 1;
345         ctl->start = 0;
346         ctl->private = NULL;
347         ctl->op = &free_ino_op;
348
349         /*
350          * Initially we allow to use 16K of ram to cache chunks of
351          * inode numbers before we resort to bitmaps. This is somewhat
352          * arbitrary, but it will be adjusted in runtime.
353          */
354         ctl->extents_thresh = INIT_THRESHOLD;
355
356         spin_lock_init(&pinned->tree_lock);
357         pinned->unit = 1;
358         pinned->start = 0;
359         pinned->private = NULL;
360         pinned->extents_thresh = 0;
361         pinned->op = &pinned_free_ino_op;
362 }
363
364 int btrfs_save_ino_cache(struct btrfs_root *root,
365                          struct btrfs_trans_handle *trans)
366 {
367         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
368         struct btrfs_path *path;
369         struct inode *inode;
370         u64 alloc_hint = 0;
371         int ret;
372         int prealloc;
373         bool retry = false;
374
375         path = btrfs_alloc_path();
376         if (!path)
377                 return -ENOMEM;
378 again:
379         inode = lookup_free_ino_inode(root, path);
380         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
381                 ret = PTR_ERR(inode);
382                 goto out;
383         }
384
385         if (IS_ERR(inode)) {
386                 BUG_ON(retry);
387                 retry = true;
388
389                 ret = create_free_ino_inode(root, trans, path);
390                 if (ret)
391                         goto out;
392                 goto again;
393         }
394
395         BTRFS_I(inode)->generation = 0;
396         ret = btrfs_update_inode(trans, root, inode);
397         WARN_ON(ret);
398
399         if (i_size_read(inode) > 0) {
400                 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
401                 if (ret)
402                         goto out_put;
403         }
404
405         spin_lock(&root->cache_lock);
406         if (root->cached != BTRFS_CACHE_FINISHED) {
407                 ret = -1;
408                 spin_unlock(&root->cache_lock);
409                 goto out_put;
410         }
411         spin_unlock(&root->cache_lock);
412
413         spin_lock(&ctl->tree_lock);
414         prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
415         prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
416         prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
417         spin_unlock(&ctl->tree_lock);
418
419         /* Just to make sure we have enough space */
420         prealloc += 8 * PAGE_CACHE_SIZE;
421
422         ret = btrfs_check_data_free_space(inode, prealloc);
423         if (ret)
424                 goto out_put;
425
426         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
427                                               prealloc, prealloc, &alloc_hint);
428         if (ret)
429                 goto out_put;
430         btrfs_free_reserved_data_space(inode, prealloc);
431
432 out_put:
433         iput(inode);
434 out:
435         if (ret == 0)
436                 ret = btrfs_write_out_ino_cache(root, trans, path);
437
438         btrfs_free_path(path);
439         return ret;
440 }
441
442 static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
443 {
444         struct btrfs_path *path;
445         int ret;
446         struct extent_buffer *l;
447         struct btrfs_key search_key;
448         struct btrfs_key found_key;
449         int slot;
450
451         path = btrfs_alloc_path();
452         if (!path)
453                 return -ENOMEM;
454
455         search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
456         search_key.type = -1;
457         search_key.offset = (u64)-1;
458         ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
459         if (ret < 0)
460                 goto error;
461         BUG_ON(ret == 0);
462         if (path->slots[0] > 0) {
463                 slot = path->slots[0] - 1;
464                 l = path->nodes[0];
465                 btrfs_item_key_to_cpu(l, &found_key, slot);
466                 *objectid = max_t(u64, found_key.objectid,
467                                   BTRFS_FIRST_FREE_OBJECTID - 1);
468         } else {
469                 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
470         }
471         ret = 0;
472 error:
473         btrfs_free_path(path);
474         return ret;
475 }
476
477 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
478 {
479         int ret;
480         mutex_lock(&root->objectid_mutex);
481
482         if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
483                 ret = btrfs_find_highest_objectid(root,
484                                                   &root->highest_objectid);
485                 if (ret)
486                         goto out;
487         }
488
489         if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
490                 ret = -ENOSPC;
491                 goto out;
492         }
493
494         *objectid = ++root->highest_objectid;
495         ret = 0;
496 out:
497         mutex_unlock(&root->objectid_mutex);
498         return ret;
499 }