2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
29 static struct kmem_cache *btrfs_ordered_extent_cache;
31 static u64 entry_end(struct btrfs_ordered_extent *entry)
33 if (entry->file_offset + entry->len < entry->file_offset)
35 return entry->file_offset + entry->len;
38 /* returns NULL if the insertion worked, or it returns the node it did find
41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
44 struct rb_node **p = &root->rb_node;
45 struct rb_node *parent = NULL;
46 struct btrfs_ordered_extent *entry;
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
52 if (file_offset < entry->file_offset)
54 else if (file_offset >= entry_end(entry))
60 rb_link_node(node, parent, p);
61 rb_insert_color(node, root);
65 static void ordered_data_tree_panic(struct inode *inode, int errno,
68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
74 * look for a given offset in the tree, and if it can't be found return the
77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 struct rb_node **prev_ret)
80 struct rb_node *n = root->rb_node;
81 struct rb_node *prev = NULL;
83 struct btrfs_ordered_extent *entry;
84 struct btrfs_ordered_extent *prev_entry = NULL;
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
91 if (file_offset < entry->file_offset)
93 else if (file_offset >= entry_end(entry))
101 while (prev && file_offset >= entry_end(prev_entry)) {
102 test = rb_next(prev);
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
107 if (file_offset < entry_end(prev_entry))
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
115 while (prev && file_offset < entry_end(prev_entry)) {
116 test = rb_prev(prev);
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
128 * helper to check if a given offset is inside a given entry
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
132 if (file_offset < entry->file_offset ||
133 entry->file_offset + entry->len <= file_offset)
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
141 if (file_offset + len <= entry->file_offset ||
142 entry->file_offset + entry->len <= file_offset)
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
154 struct rb_root *root = &tree->tree;
155 struct rb_node *prev = NULL;
157 struct btrfs_ordered_extent *entry;
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
162 if (offset_in_entry(entry, file_offset))
165 ret = __tree_search(root, file_offset, &prev);
173 /* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
179 * len is the length of the extent
181 * The tree is given a single reference on the ordered extent that was
184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 u64 start, u64 len, u64 disk_len,
186 int type, int dio, int compress_type)
188 struct btrfs_root *root = BTRFS_I(inode)->root;
189 struct btrfs_ordered_inode_tree *tree;
190 struct rb_node *node;
191 struct btrfs_ordered_extent *entry;
193 tree = &BTRFS_I(inode)->ordered_tree;
194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
198 entry->file_offset = file_offset;
199 entry->start = start;
201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 !(type == BTRFS_ORDERED_NOCOW))
203 entry->csum_bytes_left = disk_len;
204 entry->disk_len = disk_len;
205 entry->bytes_left = len;
206 entry->inode = igrab(inode);
207 entry->compress_type = compress_type;
208 entry->truncated_len = (u64)-1;
209 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
210 set_bit(type, &entry->flags);
213 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
215 /* one ref for the tree */
216 atomic_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
219 INIT_LIST_HEAD(&entry->root_extent_list);
220 INIT_LIST_HEAD(&entry->work_list);
221 init_completion(&entry->completion);
222 INIT_LIST_HEAD(&entry->log_list);
224 trace_btrfs_ordered_extent_add(inode, entry);
226 spin_lock_irq(&tree->lock);
227 node = tree_insert(&tree->tree, file_offset,
230 ordered_data_tree_panic(inode, -EEXIST, file_offset);
231 spin_unlock_irq(&tree->lock);
233 spin_lock(&root->ordered_extent_lock);
234 list_add_tail(&entry->root_extent_list,
235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&root->fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root,
241 &root->fs_info->ordered_roots);
242 spin_unlock(&root->fs_info->ordered_root_lock);
244 spin_unlock(&root->ordered_extent_lock);
249 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
250 u64 start, u64 len, u64 disk_len, int type)
252 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
254 BTRFS_COMPRESS_NONE);
257 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
258 u64 start, u64 len, u64 disk_len, int type)
260 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
262 BTRFS_COMPRESS_NONE);
265 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
266 u64 start, u64 len, u64 disk_len,
267 int type, int compress_type)
269 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
276 * when an ordered extent is finished. If the list covers more than one
277 * ordered extent, it is split across multiples.
279 void btrfs_add_ordered_sum(struct inode *inode,
280 struct btrfs_ordered_extent *entry,
281 struct btrfs_ordered_sum *sum)
283 struct btrfs_ordered_inode_tree *tree;
285 tree = &BTRFS_I(inode)->ordered_tree;
286 spin_lock_irq(&tree->lock);
287 list_add_tail(&sum->list, &entry->list);
288 WARN_ON(entry->csum_bytes_left < sum->len);
289 entry->csum_bytes_left -= sum->len;
290 if (entry->csum_bytes_left == 0)
291 wake_up(&entry->wait);
292 spin_unlock_irq(&tree->lock);
296 * this is used to account for finished IO across a given range
297 * of the file. The IO may span ordered extents. If
298 * a given ordered_extent is completely done, 1 is returned, otherwise
301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
302 * to make sure this function only returns 1 once for a given ordered extent.
304 * file_offset is updated to one byte past the range that is recorded as
305 * complete. This allows you to walk forward in the file.
307 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
308 struct btrfs_ordered_extent **cached,
309 u64 *file_offset, u64 io_size, int uptodate)
311 struct btrfs_ordered_inode_tree *tree;
312 struct rb_node *node;
313 struct btrfs_ordered_extent *entry = NULL;
320 tree = &BTRFS_I(inode)->ordered_tree;
321 spin_lock_irqsave(&tree->lock, flags);
322 node = tree_search(tree, *file_offset);
328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 if (!offset_in_entry(entry, *file_offset)) {
334 dec_start = max(*file_offset, entry->file_offset);
335 dec_end = min(*file_offset + io_size, entry->file_offset +
337 *file_offset = dec_end;
338 if (dec_start > dec_end) {
339 btrfs_crit(BTRFS_I(inode)->root->fs_info,
340 "bad ordering dec_start %llu end %llu", dec_start, dec_end);
342 to_dec = dec_end - dec_start;
343 if (to_dec > entry->bytes_left) {
344 btrfs_crit(BTRFS_I(inode)->root->fs_info,
345 "bad ordered accounting left %llu size %llu",
346 entry->bytes_left, to_dec);
348 entry->bytes_left -= to_dec;
350 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
352 if (entry->bytes_left == 0) {
353 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
354 if (waitqueue_active(&entry->wait))
355 wake_up(&entry->wait);
360 if (!ret && cached && entry) {
362 atomic_inc(&entry->refs);
364 spin_unlock_irqrestore(&tree->lock, flags);
369 * this is used to account for finished IO across a given range
370 * of the file. The IO should not span ordered extents. If
371 * a given ordered_extent is completely done, 1 is returned, otherwise
374 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
375 * to make sure this function only returns 1 once for a given ordered extent.
377 int btrfs_dec_test_ordered_pending(struct inode *inode,
378 struct btrfs_ordered_extent **cached,
379 u64 file_offset, u64 io_size, int uptodate)
381 struct btrfs_ordered_inode_tree *tree;
382 struct rb_node *node;
383 struct btrfs_ordered_extent *entry = NULL;
387 tree = &BTRFS_I(inode)->ordered_tree;
388 spin_lock_irqsave(&tree->lock, flags);
389 if (cached && *cached) {
394 node = tree_search(tree, file_offset);
400 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
402 if (!offset_in_entry(entry, file_offset)) {
407 if (io_size > entry->bytes_left) {
408 btrfs_crit(BTRFS_I(inode)->root->fs_info,
409 "bad ordered accounting left %llu size %llu",
410 entry->bytes_left, io_size);
412 entry->bytes_left -= io_size;
414 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
416 if (entry->bytes_left == 0) {
417 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
418 if (waitqueue_active(&entry->wait))
419 wake_up(&entry->wait);
424 if (!ret && cached && entry) {
426 atomic_inc(&entry->refs);
428 spin_unlock_irqrestore(&tree->lock, flags);
432 /* Needs to either be called under a log transaction or the log_mutex */
433 void btrfs_get_logged_extents(struct inode *inode,
434 struct list_head *logged_list)
436 struct btrfs_ordered_inode_tree *tree;
437 struct btrfs_ordered_extent *ordered;
440 tree = &BTRFS_I(inode)->ordered_tree;
441 spin_lock_irq(&tree->lock);
442 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
443 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
444 if (!list_empty(&ordered->log_list))
446 list_add_tail(&ordered->log_list, logged_list);
447 atomic_inc(&ordered->refs);
449 spin_unlock_irq(&tree->lock);
452 void btrfs_put_logged_extents(struct list_head *logged_list)
454 struct btrfs_ordered_extent *ordered;
456 while (!list_empty(logged_list)) {
457 ordered = list_first_entry(logged_list,
458 struct btrfs_ordered_extent,
460 list_del_init(&ordered->log_list);
461 btrfs_put_ordered_extent(ordered);
465 void btrfs_submit_logged_extents(struct list_head *logged_list,
466 struct btrfs_root *log)
468 int index = log->log_transid % 2;
470 spin_lock_irq(&log->log_extents_lock[index]);
471 list_splice_tail(logged_list, &log->logged_list[index]);
472 spin_unlock_irq(&log->log_extents_lock[index]);
475 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
477 struct btrfs_ordered_extent *ordered;
478 int index = transid % 2;
480 spin_lock_irq(&log->log_extents_lock[index]);
481 while (!list_empty(&log->logged_list[index])) {
482 ordered = list_first_entry(&log->logged_list[index],
483 struct btrfs_ordered_extent,
485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]);
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
500 btrfs_put_ordered_extent(ordered);
501 spin_lock_irq(&log->log_extents_lock[index]);
503 spin_unlock_irq(&log->log_extents_lock[index]);
506 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
508 struct btrfs_ordered_extent *ordered;
509 int index = transid % 2;
511 spin_lock_irq(&log->log_extents_lock[index]);
512 while (!list_empty(&log->logged_list[index])) {
513 ordered = list_first_entry(&log->logged_list[index],
514 struct btrfs_ordered_extent,
516 list_del_init(&ordered->log_list);
517 spin_unlock_irq(&log->log_extents_lock[index]);
518 btrfs_put_ordered_extent(ordered);
519 spin_lock_irq(&log->log_extents_lock[index]);
521 spin_unlock_irq(&log->log_extents_lock[index]);
525 * used to drop a reference on an ordered extent. This will free
526 * the extent if the last reference is dropped
528 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
530 struct list_head *cur;
531 struct btrfs_ordered_sum *sum;
533 trace_btrfs_ordered_extent_put(entry->inode, entry);
535 if (atomic_dec_and_test(&entry->refs)) {
537 btrfs_add_delayed_iput(entry->inode);
538 while (!list_empty(&entry->list)) {
539 cur = entry->list.next;
540 sum = list_entry(cur, struct btrfs_ordered_sum, list);
541 list_del(&sum->list);
544 kmem_cache_free(btrfs_ordered_extent_cache, entry);
549 * remove an ordered extent from the tree. No references are dropped
550 * and waiters are woken up.
552 void btrfs_remove_ordered_extent(struct inode *inode,
553 struct btrfs_ordered_extent *entry)
555 struct btrfs_ordered_inode_tree *tree;
556 struct btrfs_root *root = BTRFS_I(inode)->root;
557 struct rb_node *node;
559 tree = &BTRFS_I(inode)->ordered_tree;
560 spin_lock_irq(&tree->lock);
561 node = &entry->rb_node;
562 rb_erase(node, &tree->tree);
563 if (tree->last == node)
565 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
566 spin_unlock_irq(&tree->lock);
568 spin_lock(&root->ordered_extent_lock);
569 list_del_init(&entry->root_extent_list);
570 root->nr_ordered_extents--;
572 trace_btrfs_ordered_extent_remove(inode, entry);
574 if (!root->nr_ordered_extents) {
575 spin_lock(&root->fs_info->ordered_root_lock);
576 BUG_ON(list_empty(&root->ordered_root));
577 list_del_init(&root->ordered_root);
578 spin_unlock(&root->fs_info->ordered_root_lock);
580 spin_unlock(&root->ordered_extent_lock);
581 wake_up(&entry->wait);
584 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
586 struct btrfs_ordered_extent *ordered;
588 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
589 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
590 complete(&ordered->completion);
594 * wait for all the ordered extents in a root. This is done when balancing
595 * space between drives.
597 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
599 struct list_head splice, works;
600 struct btrfs_ordered_extent *ordered, *next;
603 INIT_LIST_HEAD(&splice);
604 INIT_LIST_HEAD(&works);
606 mutex_lock(&root->ordered_extent_mutex);
607 spin_lock(&root->ordered_extent_lock);
608 list_splice_init(&root->ordered_extents, &splice);
609 while (!list_empty(&splice) && nr) {
610 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
612 list_move_tail(&ordered->root_extent_list,
613 &root->ordered_extents);
614 atomic_inc(&ordered->refs);
615 spin_unlock(&root->ordered_extent_lock);
617 btrfs_init_work(&ordered->flush_work,
618 btrfs_flush_delalloc_helper,
619 btrfs_run_ordered_extent_work, NULL, NULL);
620 list_add_tail(&ordered->work_list, &works);
621 btrfs_queue_work(root->fs_info->flush_workers,
622 &ordered->flush_work);
625 spin_lock(&root->ordered_extent_lock);
630 list_splice_tail(&splice, &root->ordered_extents);
631 spin_unlock(&root->ordered_extent_lock);
633 list_for_each_entry_safe(ordered, next, &works, work_list) {
634 list_del_init(&ordered->work_list);
635 wait_for_completion(&ordered->completion);
636 btrfs_put_ordered_extent(ordered);
639 mutex_unlock(&root->ordered_extent_mutex);
644 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
646 struct btrfs_root *root;
647 struct list_head splice;
650 INIT_LIST_HEAD(&splice);
652 mutex_lock(&fs_info->ordered_operations_mutex);
653 spin_lock(&fs_info->ordered_root_lock);
654 list_splice_init(&fs_info->ordered_roots, &splice);
655 while (!list_empty(&splice) && nr) {
656 root = list_first_entry(&splice, struct btrfs_root,
658 root = btrfs_grab_fs_root(root);
660 list_move_tail(&root->ordered_root,
661 &fs_info->ordered_roots);
662 spin_unlock(&fs_info->ordered_root_lock);
664 done = btrfs_wait_ordered_extents(root, nr);
665 btrfs_put_fs_root(root);
667 spin_lock(&fs_info->ordered_root_lock);
673 list_splice_tail(&splice, &fs_info->ordered_roots);
674 spin_unlock(&fs_info->ordered_root_lock);
675 mutex_unlock(&fs_info->ordered_operations_mutex);
679 * Used to start IO or wait for a given ordered extent to finish.
681 * If wait is one, this effectively waits on page writeback for all the pages
682 * in the extent, and it waits on the io completion code to insert
683 * metadata into the btree corresponding to the extent
685 void btrfs_start_ordered_extent(struct inode *inode,
686 struct btrfs_ordered_extent *entry,
689 u64 start = entry->file_offset;
690 u64 end = start + entry->len - 1;
692 trace_btrfs_ordered_extent_start(inode, entry);
695 * pages in the range can be dirty, clean or writeback. We
696 * start IO on any dirty ones so the wait doesn't stall waiting
697 * for the flusher thread to find them
699 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
700 filemap_fdatawrite_range(inode->i_mapping, start, end);
702 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
708 * Used to wait on ordered extents across a large range of bytes.
710 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
715 struct btrfs_ordered_extent *ordered;
717 if (start + len < start) {
718 orig_end = INT_LIMIT(loff_t);
720 orig_end = start + len - 1;
721 if (orig_end > INT_LIMIT(loff_t))
722 orig_end = INT_LIMIT(loff_t);
725 /* start IO across the range first to instantiate any delalloc
728 ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
732 * So with compression we will find and lock a dirty page and clear the
733 * first one as dirty, setup an async extent, and immediately return
734 * with the entire range locked but with nobody actually marked with
735 * writeback. So we can't just filemap_write_and_wait_range() and
736 * expect it to work since it will just kick off a thread to do the
737 * actual work. So we need to call filemap_fdatawrite_range _again_
738 * since it will wait on the page lock, which won't be unlocked until
739 * after the pages have been marked as writeback and so we're good to go
740 * from there. We have to do this otherwise we'll miss the ordered
741 * extents and that results in badness. Please Josef, do not think you
742 * know better and pull this out at some point in the future, it is
743 * right and you are wrong.
745 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
746 &BTRFS_I(inode)->runtime_flags)) {
747 ret = filemap_fdatawrite_range(inode->i_mapping, start,
752 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
758 ordered = btrfs_lookup_first_ordered_extent(inode, end);
761 if (ordered->file_offset > orig_end) {
762 btrfs_put_ordered_extent(ordered);
765 if (ordered->file_offset + ordered->len <= start) {
766 btrfs_put_ordered_extent(ordered);
769 btrfs_start_ordered_extent(inode, ordered, 1);
770 end = ordered->file_offset;
771 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
773 btrfs_put_ordered_extent(ordered);
774 if (ret || end == 0 || end == start)
782 * find an ordered extent corresponding to file_offset. return NULL if
783 * nothing is found, otherwise take a reference on the extent and return it
785 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
788 struct btrfs_ordered_inode_tree *tree;
789 struct rb_node *node;
790 struct btrfs_ordered_extent *entry = NULL;
792 tree = &BTRFS_I(inode)->ordered_tree;
793 spin_lock_irq(&tree->lock);
794 node = tree_search(tree, file_offset);
798 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
799 if (!offset_in_entry(entry, file_offset))
802 atomic_inc(&entry->refs);
804 spin_unlock_irq(&tree->lock);
808 /* Since the DIO code tries to lock a wide area we need to look for any ordered
809 * extents that exist in the range, rather than just the start of the range.
811 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
815 struct btrfs_ordered_inode_tree *tree;
816 struct rb_node *node;
817 struct btrfs_ordered_extent *entry = NULL;
819 tree = &BTRFS_I(inode)->ordered_tree;
820 spin_lock_irq(&tree->lock);
821 node = tree_search(tree, file_offset);
823 node = tree_search(tree, file_offset + len);
829 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
830 if (range_overlaps(entry, file_offset, len))
833 if (entry->file_offset >= file_offset + len) {
838 node = rb_next(node);
844 atomic_inc(&entry->refs);
845 spin_unlock_irq(&tree->lock);
850 * lookup and return any extent before 'file_offset'. NULL is returned
853 struct btrfs_ordered_extent *
854 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
856 struct btrfs_ordered_inode_tree *tree;
857 struct rb_node *node;
858 struct btrfs_ordered_extent *entry = NULL;
860 tree = &BTRFS_I(inode)->ordered_tree;
861 spin_lock_irq(&tree->lock);
862 node = tree_search(tree, file_offset);
866 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
867 atomic_inc(&entry->refs);
869 spin_unlock_irq(&tree->lock);
874 * After an extent is done, call this to conditionally update the on disk
875 * i_size. i_size is updated to cover any fully written part of the file.
877 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
878 struct btrfs_ordered_extent *ordered)
880 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
883 u64 i_size = i_size_read(inode);
884 struct rb_node *node;
885 struct rb_node *prev = NULL;
886 struct btrfs_ordered_extent *test;
889 spin_lock_irq(&tree->lock);
891 offset = entry_end(ordered);
892 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
894 ordered->file_offset +
895 ordered->truncated_len);
897 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
899 disk_i_size = BTRFS_I(inode)->disk_i_size;
902 if (disk_i_size > i_size) {
903 BTRFS_I(inode)->disk_i_size = i_size;
909 * if the disk i_size is already at the inode->i_size, or
910 * this ordered extent is inside the disk i_size, we're done
912 if (disk_i_size == i_size)
916 * We still need to update disk_i_size if outstanding_isize is greater
919 if (offset <= disk_i_size &&
920 (!ordered || ordered->outstanding_isize <= disk_i_size))
924 * walk backward from this ordered extent to disk_i_size.
925 * if we find an ordered extent then we can't update disk i_size
929 node = rb_prev(&ordered->rb_node);
931 prev = tree_search(tree, offset);
933 * we insert file extents without involving ordered struct,
934 * so there should be no ordered struct cover this offset
937 test = rb_entry(prev, struct btrfs_ordered_extent,
939 BUG_ON(offset_in_entry(test, offset));
943 for (; node; node = rb_prev(node)) {
944 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
946 /* We treat this entry as if it doesnt exist */
947 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
949 if (test->file_offset + test->len <= disk_i_size)
951 if (test->file_offset >= i_size)
953 if (entry_end(test) > disk_i_size) {
955 * we don't update disk_i_size now, so record this
956 * undealt i_size. Or we will not know the real
959 if (test->outstanding_isize < offset)
960 test->outstanding_isize = offset;
962 ordered->outstanding_isize >
963 test->outstanding_isize)
964 test->outstanding_isize =
965 ordered->outstanding_isize;
969 new_i_size = min_t(u64, offset, i_size);
972 * Some ordered extents may completed before the current one, and
973 * we hold the real i_size in ->outstanding_isize.
975 if (ordered && ordered->outstanding_isize > new_i_size)
976 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
977 BTRFS_I(inode)->disk_i_size = new_i_size;
981 * We need to do this because we can't remove ordered extents until
982 * after the i_disk_size has been updated and then the inode has been
983 * updated to reflect the change, so we need to tell anybody who finds
984 * this ordered extent that we've already done all the real work, we
985 * just haven't completed all the other work.
988 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
989 spin_unlock_irq(&tree->lock);
994 * search the ordered extents for one corresponding to 'offset' and
995 * try to find a checksum. This is used because we allow pages to
996 * be reclaimed before their checksum is actually put into the btree
998 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1001 struct btrfs_ordered_sum *ordered_sum;
1002 struct btrfs_ordered_extent *ordered;
1003 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1004 unsigned long num_sectors;
1006 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1009 ordered = btrfs_lookup_ordered_extent(inode, offset);
1013 spin_lock_irq(&tree->lock);
1014 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1015 if (disk_bytenr >= ordered_sum->bytenr &&
1016 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1017 i = (disk_bytenr - ordered_sum->bytenr) >>
1018 inode->i_sb->s_blocksize_bits;
1019 num_sectors = ordered_sum->len >>
1020 inode->i_sb->s_blocksize_bits;
1021 num_sectors = min_t(int, len - index, num_sectors - i);
1022 memcpy(sum + index, ordered_sum->sums + i,
1025 index += (int)num_sectors;
1028 disk_bytenr += num_sectors * sectorsize;
1032 spin_unlock_irq(&tree->lock);
1033 btrfs_put_ordered_extent(ordered);
1037 int __init ordered_data_init(void)
1039 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1040 sizeof(struct btrfs_ordered_extent), 0,
1041 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1043 if (!btrfs_ordered_extent_cache)
1049 void ordered_data_exit(void)
1051 if (btrfs_ordered_extent_cache)
1052 kmem_cache_destroy(btrfs_ordered_extent_cache);