1 #include <linux/bitops.h>
2 #include <linux/slab.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 #include "check-integrity.h"
23 #include "rcu-string.h"
25 static struct kmem_cache *extent_state_cache;
26 static struct kmem_cache *extent_buffer_cache;
28 static LIST_HEAD(buffers);
29 static LIST_HEAD(states);
33 static DEFINE_SPINLOCK(leak_lock);
36 #define BUFFER_LRU_MAX 64
41 struct rb_node rb_node;
44 struct extent_page_data {
46 struct extent_io_tree *tree;
47 get_extent_t *get_extent;
49 /* tells writepage not to lock the state bits for this range
50 * it still does the unlocking
52 unsigned int extent_locked:1;
54 /* tells the submit_bio code to use a WRITE_SYNC */
55 unsigned int sync_io:1;
58 static noinline void flush_write_bio(void *data);
59 static inline struct btrfs_fs_info *
60 tree_fs_info(struct extent_io_tree *tree)
62 return btrfs_sb(tree->mapping->host->i_sb);
65 int __init extent_io_init(void)
67 extent_state_cache = kmem_cache_create("extent_state",
68 sizeof(struct extent_state), 0,
69 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
70 if (!extent_state_cache)
73 extent_buffer_cache = kmem_cache_create("extent_buffers",
74 sizeof(struct extent_buffer), 0,
75 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
76 if (!extent_buffer_cache)
77 goto free_state_cache;
81 kmem_cache_destroy(extent_state_cache);
85 void extent_io_exit(void)
87 struct extent_state *state;
88 struct extent_buffer *eb;
90 while (!list_empty(&states)) {
91 state = list_entry(states.next, struct extent_state, leak_list);
92 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
93 "state %lu in tree %p refs %d\n",
94 (unsigned long long)state->start,
95 (unsigned long long)state->end,
96 state->state, state->tree, atomic_read(&state->refs));
97 list_del(&state->leak_list);
98 kmem_cache_free(extent_state_cache, state);
102 while (!list_empty(&buffers)) {
103 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
104 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
105 "refs %d\n", (unsigned long long)eb->start,
106 eb->len, atomic_read(&eb->refs));
107 list_del(&eb->leak_list);
108 kmem_cache_free(extent_buffer_cache, eb);
112 * Make sure all delayed rcu free are flushed before we
116 if (extent_state_cache)
117 kmem_cache_destroy(extent_state_cache);
118 if (extent_buffer_cache)
119 kmem_cache_destroy(extent_buffer_cache);
122 void extent_io_tree_init(struct extent_io_tree *tree,
123 struct address_space *mapping)
125 tree->state = RB_ROOT;
126 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
128 tree->dirty_bytes = 0;
129 spin_lock_init(&tree->lock);
130 spin_lock_init(&tree->buffer_lock);
131 tree->mapping = mapping;
134 static struct extent_state *alloc_extent_state(gfp_t mask)
136 struct extent_state *state;
141 state = kmem_cache_alloc(extent_state_cache, mask);
148 spin_lock_irqsave(&leak_lock, flags);
149 list_add(&state->leak_list, &states);
150 spin_unlock_irqrestore(&leak_lock, flags);
152 atomic_set(&state->refs, 1);
153 init_waitqueue_head(&state->wq);
154 trace_alloc_extent_state(state, mask, _RET_IP_);
158 void free_extent_state(struct extent_state *state)
162 if (atomic_dec_and_test(&state->refs)) {
166 WARN_ON(state->tree);
168 spin_lock_irqsave(&leak_lock, flags);
169 list_del(&state->leak_list);
170 spin_unlock_irqrestore(&leak_lock, flags);
172 trace_free_extent_state(state, _RET_IP_);
173 kmem_cache_free(extent_state_cache, state);
177 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
178 struct rb_node *node)
180 struct rb_node **p = &root->rb_node;
181 struct rb_node *parent = NULL;
182 struct tree_entry *entry;
186 entry = rb_entry(parent, struct tree_entry, rb_node);
188 if (offset < entry->start)
190 else if (offset > entry->end)
196 rb_link_node(node, parent, p);
197 rb_insert_color(node, root);
201 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
202 struct rb_node **prev_ret,
203 struct rb_node **next_ret)
205 struct rb_root *root = &tree->state;
206 struct rb_node *n = root->rb_node;
207 struct rb_node *prev = NULL;
208 struct rb_node *orig_prev = NULL;
209 struct tree_entry *entry;
210 struct tree_entry *prev_entry = NULL;
213 entry = rb_entry(n, struct tree_entry, rb_node);
217 if (offset < entry->start)
219 else if (offset > entry->end)
227 while (prev && offset > prev_entry->end) {
228 prev = rb_next(prev);
229 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
236 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
237 while (prev && offset < prev_entry->start) {
238 prev = rb_prev(prev);
239 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
246 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
249 struct rb_node *prev = NULL;
252 ret = __etree_search(tree, offset, &prev, NULL);
258 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
259 struct extent_state *other)
261 if (tree->ops && tree->ops->merge_extent_hook)
262 tree->ops->merge_extent_hook(tree->mapping->host, new,
267 * utility function to look for merge candidates inside a given range.
268 * Any extents with matching state are merged together into a single
269 * extent in the tree. Extents with EXTENT_IO in their state field
270 * are not merged because the end_io handlers need to be able to do
271 * operations on them without sleeping (or doing allocations/splits).
273 * This should be called with the tree lock held.
275 static void merge_state(struct extent_io_tree *tree,
276 struct extent_state *state)
278 struct extent_state *other;
279 struct rb_node *other_node;
281 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
284 other_node = rb_prev(&state->rb_node);
286 other = rb_entry(other_node, struct extent_state, rb_node);
287 if (other->end == state->start - 1 &&
288 other->state == state->state) {
289 merge_cb(tree, state, other);
290 state->start = other->start;
292 rb_erase(&other->rb_node, &tree->state);
293 free_extent_state(other);
296 other_node = rb_next(&state->rb_node);
298 other = rb_entry(other_node, struct extent_state, rb_node);
299 if (other->start == state->end + 1 &&
300 other->state == state->state) {
301 merge_cb(tree, state, other);
302 state->end = other->end;
304 rb_erase(&other->rb_node, &tree->state);
305 free_extent_state(other);
310 static void set_state_cb(struct extent_io_tree *tree,
311 struct extent_state *state, int *bits)
313 if (tree->ops && tree->ops->set_bit_hook)
314 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
317 static void clear_state_cb(struct extent_io_tree *tree,
318 struct extent_state *state, int *bits)
320 if (tree->ops && tree->ops->clear_bit_hook)
321 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
324 static void set_state_bits(struct extent_io_tree *tree,
325 struct extent_state *state, int *bits);
328 * insert an extent_state struct into the tree. 'bits' are set on the
329 * struct before it is inserted.
331 * This may return -EEXIST if the extent is already there, in which case the
332 * state struct is freed.
334 * The tree lock is not taken internally. This is a utility function and
335 * probably isn't what you want to call (see set/clear_extent_bit).
337 static int insert_state(struct extent_io_tree *tree,
338 struct extent_state *state, u64 start, u64 end,
341 struct rb_node *node;
344 printk(KERN_ERR "btrfs end < start %llu %llu\n",
345 (unsigned long long)end,
346 (unsigned long long)start);
349 state->start = start;
352 set_state_bits(tree, state, bits);
354 node = tree_insert(&tree->state, end, &state->rb_node);
356 struct extent_state *found;
357 found = rb_entry(node, struct extent_state, rb_node);
358 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
359 "%llu %llu\n", (unsigned long long)found->start,
360 (unsigned long long)found->end,
361 (unsigned long long)start, (unsigned long long)end);
365 merge_state(tree, state);
369 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
372 if (tree->ops && tree->ops->split_extent_hook)
373 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
377 * split a given extent state struct in two, inserting the preallocated
378 * struct 'prealloc' as the newly created second half. 'split' indicates an
379 * offset inside 'orig' where it should be split.
382 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
383 * are two extent state structs in the tree:
384 * prealloc: [orig->start, split - 1]
385 * orig: [ split, orig->end ]
387 * The tree locks are not taken by this function. They need to be held
390 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
391 struct extent_state *prealloc, u64 split)
393 struct rb_node *node;
395 split_cb(tree, orig, split);
397 prealloc->start = orig->start;
398 prealloc->end = split - 1;
399 prealloc->state = orig->state;
402 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
404 free_extent_state(prealloc);
407 prealloc->tree = tree;
411 static struct extent_state *next_state(struct extent_state *state)
413 struct rb_node *next = rb_next(&state->rb_node);
415 return rb_entry(next, struct extent_state, rb_node);
421 * utility function to clear some bits in an extent state struct.
422 * it will optionally wake up any one waiting on this state (wake == 1).
424 * If no bits are set on the state struct after clearing things, the
425 * struct is freed and removed from the tree
427 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
428 struct extent_state *state,
431 struct extent_state *next;
432 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
434 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
435 u64 range = state->end - state->start + 1;
436 WARN_ON(range > tree->dirty_bytes);
437 tree->dirty_bytes -= range;
439 clear_state_cb(tree, state, bits);
440 state->state &= ~bits_to_clear;
443 if (state->state == 0) {
444 next = next_state(state);
446 rb_erase(&state->rb_node, &tree->state);
448 free_extent_state(state);
453 merge_state(tree, state);
454 next = next_state(state);
459 static struct extent_state *
460 alloc_extent_state_atomic(struct extent_state *prealloc)
463 prealloc = alloc_extent_state(GFP_ATOMIC);
468 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
470 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
471 "Extent tree was modified by another "
472 "thread while locked.");
476 * clear some bits on a range in the tree. This may require splitting
477 * or inserting elements in the tree, so the gfp mask is used to
478 * indicate which allocations or sleeping are allowed.
480 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
481 * the given range from the tree regardless of state (ie for truncate).
483 * the range [start, end] is inclusive.
485 * This takes the tree lock, and returns 0 on success and < 0 on error.
487 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
488 int bits, int wake, int delete,
489 struct extent_state **cached_state,
492 struct extent_state *state;
493 struct extent_state *cached;
494 struct extent_state *prealloc = NULL;
495 struct rb_node *node;
501 bits |= ~EXTENT_CTLBITS;
502 bits |= EXTENT_FIRST_DELALLOC;
504 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
507 if (!prealloc && (mask & __GFP_WAIT)) {
508 prealloc = alloc_extent_state(mask);
513 spin_lock(&tree->lock);
515 cached = *cached_state;
518 *cached_state = NULL;
522 if (cached && cached->tree && cached->start <= start &&
523 cached->end > start) {
525 atomic_dec(&cached->refs);
530 free_extent_state(cached);
533 * this search will find the extents that end after
536 node = tree_search(tree, start);
539 state = rb_entry(node, struct extent_state, rb_node);
541 if (state->start > end)
543 WARN_ON(state->end < start);
544 last_end = state->end;
546 /* the state doesn't have the wanted bits, go ahead */
547 if (!(state->state & bits)) {
548 state = next_state(state);
553 * | ---- desired range ---- |
555 * | ------------- state -------------- |
557 * We need to split the extent we found, and may flip
558 * bits on second half.
560 * If the extent we found extends past our range, we
561 * just split and search again. It'll get split again
562 * the next time though.
564 * If the extent we found is inside our range, we clear
565 * the desired bit on it.
568 if (state->start < start) {
569 prealloc = alloc_extent_state_atomic(prealloc);
571 err = split_state(tree, state, prealloc, start);
573 extent_io_tree_panic(tree, err);
578 if (state->end <= end) {
579 state = clear_state_bit(tree, state, &bits, wake);
585 * | ---- desired range ---- |
587 * We need to split the extent, and clear the bit
590 if (state->start <= end && state->end > end) {
591 prealloc = alloc_extent_state_atomic(prealloc);
593 err = split_state(tree, state, prealloc, end + 1);
595 extent_io_tree_panic(tree, err);
600 clear_state_bit(tree, prealloc, &bits, wake);
606 state = clear_state_bit(tree, state, &bits, wake);
608 if (last_end == (u64)-1)
610 start = last_end + 1;
611 if (start <= end && state && !need_resched())
616 spin_unlock(&tree->lock);
618 free_extent_state(prealloc);
625 spin_unlock(&tree->lock);
626 if (mask & __GFP_WAIT)
631 static void wait_on_state(struct extent_io_tree *tree,
632 struct extent_state *state)
633 __releases(tree->lock)
634 __acquires(tree->lock)
637 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
638 spin_unlock(&tree->lock);
640 spin_lock(&tree->lock);
641 finish_wait(&state->wq, &wait);
645 * waits for one or more bits to clear on a range in the state tree.
646 * The range [start, end] is inclusive.
647 * The tree lock is taken by this function
649 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
651 struct extent_state *state;
652 struct rb_node *node;
654 spin_lock(&tree->lock);
658 * this search will find all the extents that end after
661 node = tree_search(tree, start);
665 state = rb_entry(node, struct extent_state, rb_node);
667 if (state->start > end)
670 if (state->state & bits) {
671 start = state->start;
672 atomic_inc(&state->refs);
673 wait_on_state(tree, state);
674 free_extent_state(state);
677 start = state->end + 1;
682 cond_resched_lock(&tree->lock);
685 spin_unlock(&tree->lock);
688 static void set_state_bits(struct extent_io_tree *tree,
689 struct extent_state *state,
692 int bits_to_set = *bits & ~EXTENT_CTLBITS;
694 set_state_cb(tree, state, bits);
695 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
696 u64 range = state->end - state->start + 1;
697 tree->dirty_bytes += range;
699 state->state |= bits_to_set;
702 static void cache_state(struct extent_state *state,
703 struct extent_state **cached_ptr)
705 if (cached_ptr && !(*cached_ptr)) {
706 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
708 atomic_inc(&state->refs);
713 static void uncache_state(struct extent_state **cached_ptr)
715 if (cached_ptr && (*cached_ptr)) {
716 struct extent_state *state = *cached_ptr;
718 free_extent_state(state);
723 * set some bits on a range in the tree. This may require allocations or
724 * sleeping, so the gfp mask is used to indicate what is allowed.
726 * If any of the exclusive bits are set, this will fail with -EEXIST if some
727 * part of the range already has the desired bits set. The start of the
728 * existing range is returned in failed_start in this case.
730 * [start, end] is inclusive This takes the tree lock.
733 static int __must_check
734 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
735 int bits, int exclusive_bits, u64 *failed_start,
736 struct extent_state **cached_state, gfp_t mask)
738 struct extent_state *state;
739 struct extent_state *prealloc = NULL;
740 struct rb_node *node;
745 bits |= EXTENT_FIRST_DELALLOC;
747 if (!prealloc && (mask & __GFP_WAIT)) {
748 prealloc = alloc_extent_state(mask);
752 spin_lock(&tree->lock);
753 if (cached_state && *cached_state) {
754 state = *cached_state;
755 if (state->start <= start && state->end > start &&
757 node = &state->rb_node;
762 * this search will find all the extents that end after
765 node = tree_search(tree, start);
767 prealloc = alloc_extent_state_atomic(prealloc);
769 err = insert_state(tree, prealloc, start, end, &bits);
771 extent_io_tree_panic(tree, err);
776 state = rb_entry(node, struct extent_state, rb_node);
778 last_start = state->start;
779 last_end = state->end;
782 * | ---- desired range ---- |
785 * Just lock what we found and keep going
787 if (state->start == start && state->end <= end) {
788 if (state->state & exclusive_bits) {
789 *failed_start = state->start;
794 set_state_bits(tree, state, &bits);
795 cache_state(state, cached_state);
796 merge_state(tree, state);
797 if (last_end == (u64)-1)
799 start = last_end + 1;
800 state = next_state(state);
801 if (start < end && state && state->start == start &&
808 * | ---- desired range ---- |
811 * | ------------- state -------------- |
813 * We need to split the extent we found, and may flip bits on
816 * If the extent we found extends past our
817 * range, we just split and search again. It'll get split
818 * again the next time though.
820 * If the extent we found is inside our range, we set the
823 if (state->start < start) {
824 if (state->state & exclusive_bits) {
825 *failed_start = start;
830 prealloc = alloc_extent_state_atomic(prealloc);
832 err = split_state(tree, state, prealloc, start);
834 extent_io_tree_panic(tree, err);
839 if (state->end <= end) {
840 set_state_bits(tree, state, &bits);
841 cache_state(state, cached_state);
842 merge_state(tree, state);
843 if (last_end == (u64)-1)
845 start = last_end + 1;
846 state = next_state(state);
847 if (start < end && state && state->start == start &&
854 * | ---- desired range ---- |
855 * | state | or | state |
857 * There's a hole, we need to insert something in it and
858 * ignore the extent we found.
860 if (state->start > start) {
862 if (end < last_start)
865 this_end = last_start - 1;
867 prealloc = alloc_extent_state_atomic(prealloc);
871 * Avoid to free 'prealloc' if it can be merged with
874 err = insert_state(tree, prealloc, start, this_end,
877 extent_io_tree_panic(tree, err);
879 cache_state(prealloc, cached_state);
881 start = this_end + 1;
885 * | ---- desired range ---- |
887 * We need to split the extent, and set the bit
890 if (state->start <= end && state->end > end) {
891 if (state->state & exclusive_bits) {
892 *failed_start = start;
897 prealloc = alloc_extent_state_atomic(prealloc);
899 err = split_state(tree, state, prealloc, end + 1);
901 extent_io_tree_panic(tree, err);
903 set_state_bits(tree, prealloc, &bits);
904 cache_state(prealloc, cached_state);
905 merge_state(tree, prealloc);
913 spin_unlock(&tree->lock);
915 free_extent_state(prealloc);
922 spin_unlock(&tree->lock);
923 if (mask & __GFP_WAIT)
928 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
929 u64 *failed_start, struct extent_state **cached_state,
932 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
938 * convert_extent_bit - convert all bits in a given range from one bit to
940 * @tree: the io tree to search
941 * @start: the start offset in bytes
942 * @end: the end offset in bytes (inclusive)
943 * @bits: the bits to set in this range
944 * @clear_bits: the bits to clear in this range
945 * @mask: the allocation mask
947 * This will go through and set bits for the given range. If any states exist
948 * already in this range they are set with the given bit and cleared of the
949 * clear_bits. This is only meant to be used by things that are mergeable, ie
950 * converting from say DELALLOC to DIRTY. This is not meant to be used with
951 * boundary bits like LOCK.
953 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
954 int bits, int clear_bits, gfp_t mask)
956 struct extent_state *state;
957 struct extent_state *prealloc = NULL;
958 struct rb_node *node;
964 if (!prealloc && (mask & __GFP_WAIT)) {
965 prealloc = alloc_extent_state(mask);
970 spin_lock(&tree->lock);
972 * this search will find all the extents that end after
975 node = tree_search(tree, start);
977 prealloc = alloc_extent_state_atomic(prealloc);
982 err = insert_state(tree, prealloc, start, end, &bits);
985 extent_io_tree_panic(tree, err);
988 state = rb_entry(node, struct extent_state, rb_node);
990 last_start = state->start;
991 last_end = state->end;
994 * | ---- desired range ---- |
997 * Just lock what we found and keep going
999 if (state->start == start && state->end <= end) {
1000 set_state_bits(tree, state, &bits);
1001 state = clear_state_bit(tree, state, &clear_bits, 0);
1002 if (last_end == (u64)-1)
1004 start = last_end + 1;
1005 if (start < end && state && state->start == start &&
1012 * | ---- desired range ---- |
1015 * | ------------- state -------------- |
1017 * We need to split the extent we found, and may flip bits on
1020 * If the extent we found extends past our
1021 * range, we just split and search again. It'll get split
1022 * again the next time though.
1024 * If the extent we found is inside our range, we set the
1025 * desired bit on it.
1027 if (state->start < start) {
1028 prealloc = alloc_extent_state_atomic(prealloc);
1033 err = split_state(tree, state, prealloc, start);
1035 extent_io_tree_panic(tree, err);
1039 if (state->end <= end) {
1040 set_state_bits(tree, state, &bits);
1041 state = clear_state_bit(tree, state, &clear_bits, 0);
1042 if (last_end == (u64)-1)
1044 start = last_end + 1;
1045 if (start < end && state && state->start == start &&
1052 * | ---- desired range ---- |
1053 * | state | or | state |
1055 * There's a hole, we need to insert something in it and
1056 * ignore the extent we found.
1058 if (state->start > start) {
1060 if (end < last_start)
1063 this_end = last_start - 1;
1065 prealloc = alloc_extent_state_atomic(prealloc);
1072 * Avoid to free 'prealloc' if it can be merged with
1075 err = insert_state(tree, prealloc, start, this_end,
1078 extent_io_tree_panic(tree, err);
1080 start = this_end + 1;
1084 * | ---- desired range ---- |
1086 * We need to split the extent, and set the bit
1089 if (state->start <= end && state->end > end) {
1090 prealloc = alloc_extent_state_atomic(prealloc);
1096 err = split_state(tree, state, prealloc, end + 1);
1098 extent_io_tree_panic(tree, err);
1100 set_state_bits(tree, prealloc, &bits);
1101 clear_state_bit(tree, prealloc, &clear_bits, 0);
1109 spin_unlock(&tree->lock);
1111 free_extent_state(prealloc);
1118 spin_unlock(&tree->lock);
1119 if (mask & __GFP_WAIT)
1124 /* wrappers around set/clear extent bit */
1125 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1128 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1132 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1133 int bits, gfp_t mask)
1135 return set_extent_bit(tree, start, end, bits, NULL,
1139 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1140 int bits, gfp_t mask)
1142 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1145 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1146 struct extent_state **cached_state, gfp_t mask)
1148 return set_extent_bit(tree, start, end,
1149 EXTENT_DELALLOC | EXTENT_UPTODATE,
1150 NULL, cached_state, mask);
1153 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1156 return clear_extent_bit(tree, start, end,
1157 EXTENT_DIRTY | EXTENT_DELALLOC |
1158 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1161 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1164 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1168 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1169 struct extent_state **cached_state, gfp_t mask)
1171 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1172 cached_state, mask);
1175 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1176 struct extent_state **cached_state, gfp_t mask)
1178 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1179 cached_state, mask);
1183 * either insert or lock state struct between start and end use mask to tell
1184 * us if waiting is desired.
1186 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1187 int bits, struct extent_state **cached_state)
1192 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1193 EXTENT_LOCKED, &failed_start,
1194 cached_state, GFP_NOFS);
1195 if (err == -EEXIST) {
1196 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1197 start = failed_start;
1200 WARN_ON(start > end);
1205 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1207 return lock_extent_bits(tree, start, end, 0, NULL);
1210 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1215 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1216 &failed_start, NULL, GFP_NOFS);
1217 if (err == -EEXIST) {
1218 if (failed_start > start)
1219 clear_extent_bit(tree, start, failed_start - 1,
1220 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1226 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1227 struct extent_state **cached, gfp_t mask)
1229 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1233 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1235 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1240 * helper function to set both pages and extents in the tree writeback
1242 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1244 unsigned long index = start >> PAGE_CACHE_SHIFT;
1245 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1248 while (index <= end_index) {
1249 page = find_get_page(tree->mapping, index);
1250 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1251 set_page_writeback(page);
1252 page_cache_release(page);
1258 /* find the first state struct with 'bits' set after 'start', and
1259 * return it. tree->lock must be held. NULL will returned if
1260 * nothing was found after 'start'
1262 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1263 u64 start, int bits)
1265 struct rb_node *node;
1266 struct extent_state *state;
1269 * this search will find all the extents that end after
1272 node = tree_search(tree, start);
1277 state = rb_entry(node, struct extent_state, rb_node);
1278 if (state->end >= start && (state->state & bits))
1281 node = rb_next(node);
1290 * find the first offset in the io tree with 'bits' set. zero is
1291 * returned if we find something, and *start_ret and *end_ret are
1292 * set to reflect the state struct that was found.
1294 * If nothing was found, 1 is returned. If found something, return 0.
1296 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1297 u64 *start_ret, u64 *end_ret, int bits)
1299 struct extent_state *state;
1302 spin_lock(&tree->lock);
1303 state = find_first_extent_bit_state(tree, start, bits);
1305 *start_ret = state->start;
1306 *end_ret = state->end;
1309 spin_unlock(&tree->lock);
1314 * find a contiguous range of bytes in the file marked as delalloc, not
1315 * more than 'max_bytes'. start and end are used to return the range,
1317 * 1 is returned if we find something, 0 if nothing was in the tree
1319 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1320 u64 *start, u64 *end, u64 max_bytes,
1321 struct extent_state **cached_state)
1323 struct rb_node *node;
1324 struct extent_state *state;
1325 u64 cur_start = *start;
1327 u64 total_bytes = 0;
1329 spin_lock(&tree->lock);
1332 * this search will find all the extents that end after
1335 node = tree_search(tree, cur_start);
1343 state = rb_entry(node, struct extent_state, rb_node);
1344 if (found && (state->start != cur_start ||
1345 (state->state & EXTENT_BOUNDARY))) {
1348 if (!(state->state & EXTENT_DELALLOC)) {
1354 *start = state->start;
1355 *cached_state = state;
1356 atomic_inc(&state->refs);
1360 cur_start = state->end + 1;
1361 node = rb_next(node);
1364 total_bytes += state->end - state->start + 1;
1365 if (total_bytes >= max_bytes)
1369 spin_unlock(&tree->lock);
1373 static noinline void __unlock_for_delalloc(struct inode *inode,
1374 struct page *locked_page,
1378 struct page *pages[16];
1379 unsigned long index = start >> PAGE_CACHE_SHIFT;
1380 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1381 unsigned long nr_pages = end_index - index + 1;
1384 if (index == locked_page->index && end_index == index)
1387 while (nr_pages > 0) {
1388 ret = find_get_pages_contig(inode->i_mapping, index,
1389 min_t(unsigned long, nr_pages,
1390 ARRAY_SIZE(pages)), pages);
1391 for (i = 0; i < ret; i++) {
1392 if (pages[i] != locked_page)
1393 unlock_page(pages[i]);
1394 page_cache_release(pages[i]);
1402 static noinline int lock_delalloc_pages(struct inode *inode,
1403 struct page *locked_page,
1407 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1408 unsigned long start_index = index;
1409 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1410 unsigned long pages_locked = 0;
1411 struct page *pages[16];
1412 unsigned long nrpages;
1416 /* the caller is responsible for locking the start index */
1417 if (index == locked_page->index && index == end_index)
1420 /* skip the page at the start index */
1421 nrpages = end_index - index + 1;
1422 while (nrpages > 0) {
1423 ret = find_get_pages_contig(inode->i_mapping, index,
1424 min_t(unsigned long,
1425 nrpages, ARRAY_SIZE(pages)), pages);
1430 /* now we have an array of pages, lock them all */
1431 for (i = 0; i < ret; i++) {
1433 * the caller is taking responsibility for
1436 if (pages[i] != locked_page) {
1437 lock_page(pages[i]);
1438 if (!PageDirty(pages[i]) ||
1439 pages[i]->mapping != inode->i_mapping) {
1441 unlock_page(pages[i]);
1442 page_cache_release(pages[i]);
1446 page_cache_release(pages[i]);
1455 if (ret && pages_locked) {
1456 __unlock_for_delalloc(inode, locked_page,
1458 ((u64)(start_index + pages_locked - 1)) <<
1465 * find a contiguous range of bytes in the file marked as delalloc, not
1466 * more than 'max_bytes'. start and end are used to return the range,
1468 * 1 is returned if we find something, 0 if nothing was in the tree
1470 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1471 struct extent_io_tree *tree,
1472 struct page *locked_page,
1473 u64 *start, u64 *end,
1479 struct extent_state *cached_state = NULL;
1484 /* step one, find a bunch of delalloc bytes starting at start */
1485 delalloc_start = *start;
1487 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1488 max_bytes, &cached_state);
1489 if (!found || delalloc_end <= *start) {
1490 *start = delalloc_start;
1491 *end = delalloc_end;
1492 free_extent_state(cached_state);
1497 * start comes from the offset of locked_page. We have to lock
1498 * pages in order, so we can't process delalloc bytes before
1501 if (delalloc_start < *start)
1502 delalloc_start = *start;
1505 * make sure to limit the number of pages we try to lock down
1508 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1509 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1511 /* step two, lock all the pages after the page that has start */
1512 ret = lock_delalloc_pages(inode, locked_page,
1513 delalloc_start, delalloc_end);
1514 if (ret == -EAGAIN) {
1515 /* some of the pages are gone, lets avoid looping by
1516 * shortening the size of the delalloc range we're searching
1518 free_extent_state(cached_state);
1520 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1521 max_bytes = PAGE_CACHE_SIZE - offset;
1529 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1531 /* step three, lock the state bits for the whole range */
1532 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1534 /* then test to make sure it is all still delalloc */
1535 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1536 EXTENT_DELALLOC, 1, cached_state);
1538 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1539 &cached_state, GFP_NOFS);
1540 __unlock_for_delalloc(inode, locked_page,
1541 delalloc_start, delalloc_end);
1545 free_extent_state(cached_state);
1546 *start = delalloc_start;
1547 *end = delalloc_end;
1552 int extent_clear_unlock_delalloc(struct inode *inode,
1553 struct extent_io_tree *tree,
1554 u64 start, u64 end, struct page *locked_page,
1558 struct page *pages[16];
1559 unsigned long index = start >> PAGE_CACHE_SHIFT;
1560 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1561 unsigned long nr_pages = end_index - index + 1;
1565 if (op & EXTENT_CLEAR_UNLOCK)
1566 clear_bits |= EXTENT_LOCKED;
1567 if (op & EXTENT_CLEAR_DIRTY)
1568 clear_bits |= EXTENT_DIRTY;
1570 if (op & EXTENT_CLEAR_DELALLOC)
1571 clear_bits |= EXTENT_DELALLOC;
1573 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1574 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1575 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1576 EXTENT_SET_PRIVATE2)))
1579 while (nr_pages > 0) {
1580 ret = find_get_pages_contig(inode->i_mapping, index,
1581 min_t(unsigned long,
1582 nr_pages, ARRAY_SIZE(pages)), pages);
1583 for (i = 0; i < ret; i++) {
1585 if (op & EXTENT_SET_PRIVATE2)
1586 SetPagePrivate2(pages[i]);
1588 if (pages[i] == locked_page) {
1589 page_cache_release(pages[i]);
1592 if (op & EXTENT_CLEAR_DIRTY)
1593 clear_page_dirty_for_io(pages[i]);
1594 if (op & EXTENT_SET_WRITEBACK)
1595 set_page_writeback(pages[i]);
1596 if (op & EXTENT_END_WRITEBACK)
1597 end_page_writeback(pages[i]);
1598 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1599 unlock_page(pages[i]);
1600 page_cache_release(pages[i]);
1610 * count the number of bytes in the tree that have a given bit(s)
1611 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1612 * cached. The total number found is returned.
1614 u64 count_range_bits(struct extent_io_tree *tree,
1615 u64 *start, u64 search_end, u64 max_bytes,
1616 unsigned long bits, int contig)
1618 struct rb_node *node;
1619 struct extent_state *state;
1620 u64 cur_start = *start;
1621 u64 total_bytes = 0;
1625 if (search_end <= cur_start) {
1630 spin_lock(&tree->lock);
1631 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1632 total_bytes = tree->dirty_bytes;
1636 * this search will find all the extents that end after
1639 node = tree_search(tree, cur_start);
1644 state = rb_entry(node, struct extent_state, rb_node);
1645 if (state->start > search_end)
1647 if (contig && found && state->start > last + 1)
1649 if (state->end >= cur_start && (state->state & bits) == bits) {
1650 total_bytes += min(search_end, state->end) + 1 -
1651 max(cur_start, state->start);
1652 if (total_bytes >= max_bytes)
1655 *start = max(cur_start, state->start);
1659 } else if (contig && found) {
1662 node = rb_next(node);
1667 spin_unlock(&tree->lock);
1672 * set the private field for a given byte offset in the tree. If there isn't
1673 * an extent_state there already, this does nothing.
1675 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1677 struct rb_node *node;
1678 struct extent_state *state;
1681 spin_lock(&tree->lock);
1683 * this search will find all the extents that end after
1686 node = tree_search(tree, start);
1691 state = rb_entry(node, struct extent_state, rb_node);
1692 if (state->start != start) {
1696 state->private = private;
1698 spin_unlock(&tree->lock);
1702 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1704 struct rb_node *node;
1705 struct extent_state *state;
1708 spin_lock(&tree->lock);
1710 * this search will find all the extents that end after
1713 node = tree_search(tree, start);
1718 state = rb_entry(node, struct extent_state, rb_node);
1719 if (state->start != start) {
1723 *private = state->private;
1725 spin_unlock(&tree->lock);
1730 * searches a range in the state tree for a given mask.
1731 * If 'filled' == 1, this returns 1 only if every extent in the tree
1732 * has the bits set. Otherwise, 1 is returned if any bit in the
1733 * range is found set.
1735 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1736 int bits, int filled, struct extent_state *cached)
1738 struct extent_state *state = NULL;
1739 struct rb_node *node;
1742 spin_lock(&tree->lock);
1743 if (cached && cached->tree && cached->start <= start &&
1744 cached->end > start)
1745 node = &cached->rb_node;
1747 node = tree_search(tree, start);
1748 while (node && start <= end) {
1749 state = rb_entry(node, struct extent_state, rb_node);
1751 if (filled && state->start > start) {
1756 if (state->start > end)
1759 if (state->state & bits) {
1763 } else if (filled) {
1768 if (state->end == (u64)-1)
1771 start = state->end + 1;
1774 node = rb_next(node);
1781 spin_unlock(&tree->lock);
1786 * helper function to set a given page up to date if all the
1787 * extents in the tree for that page are up to date
1789 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1791 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1792 u64 end = start + PAGE_CACHE_SIZE - 1;
1793 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1794 SetPageUptodate(page);
1798 * helper function to unlock a page if all the extents in the tree
1799 * for that page are unlocked
1801 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1803 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1804 u64 end = start + PAGE_CACHE_SIZE - 1;
1805 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1810 * helper function to end page writeback if all the extents
1811 * in the tree for that page are done with writeback
1813 static void check_page_writeback(struct extent_io_tree *tree,
1816 end_page_writeback(page);
1820 * When IO fails, either with EIO or csum verification fails, we
1821 * try other mirrors that might have a good copy of the data. This
1822 * io_failure_record is used to record state as we go through all the
1823 * mirrors. If another mirror has good data, the page is set up to date
1824 * and things continue. If a good mirror can't be found, the original
1825 * bio end_io callback is called to indicate things have failed.
1827 struct io_failure_record {
1832 unsigned long bio_flags;
1838 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1843 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1845 set_state_private(failure_tree, rec->start, 0);
1846 ret = clear_extent_bits(failure_tree, rec->start,
1847 rec->start + rec->len - 1,
1848 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1853 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1854 rec->start + rec->len - 1,
1855 EXTENT_DAMAGED, GFP_NOFS);
1864 static void repair_io_failure_callback(struct bio *bio, int err)
1866 complete(bio->bi_private);
1870 * this bypasses the standard btrfs submit functions deliberately, as
1871 * the standard behavior is to write all copies in a raid setup. here we only
1872 * want to write the one bad copy. so we do the mapping for ourselves and issue
1873 * submit_bio directly.
1874 * to avoid any synchonization issues, wait for the data after writing, which
1875 * actually prevents the read that triggered the error from finishing.
1876 * currently, there can be no more than two copies of every data bit. thus,
1877 * exactly one rewrite is required.
1879 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1880 u64 length, u64 logical, struct page *page,
1884 struct btrfs_device *dev;
1885 DECLARE_COMPLETION_ONSTACK(compl);
1888 struct btrfs_bio *bbio = NULL;
1891 BUG_ON(!mirror_num);
1893 bio = bio_alloc(GFP_NOFS, 1);
1896 bio->bi_private = &compl;
1897 bio->bi_end_io = repair_io_failure_callback;
1899 map_length = length;
1901 ret = btrfs_map_block(map_tree, WRITE, logical,
1902 &map_length, &bbio, mirror_num);
1907 BUG_ON(mirror_num != bbio->mirror_num);
1908 sector = bbio->stripes[mirror_num-1].physical >> 9;
1909 bio->bi_sector = sector;
1910 dev = bbio->stripes[mirror_num-1].dev;
1912 if (!dev || !dev->bdev || !dev->writeable) {
1916 bio->bi_bdev = dev->bdev;
1917 bio_add_page(bio, page, length, start-page_offset(page));
1918 btrfsic_submit_bio(WRITE_SYNC, bio);
1919 wait_for_completion(&compl);
1921 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1922 /* try to remap that extent elsewhere? */
1924 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
1928 printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
1929 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1930 start, rcu_str_deref(dev->name), sector);
1936 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1939 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1940 u64 start = eb->start;
1941 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1944 for (i = 0; i < num_pages; i++) {
1945 struct page *p = extent_buffer_page(eb, i);
1946 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1947 start, p, mirror_num);
1950 start += PAGE_CACHE_SIZE;
1957 * each time an IO finishes, we do a fast check in the IO failure tree
1958 * to see if we need to process or clean up an io_failure_record
1960 static int clean_io_failure(u64 start, struct page *page)
1963 u64 private_failure;
1964 struct io_failure_record *failrec;
1965 struct btrfs_mapping_tree *map_tree;
1966 struct extent_state *state;
1970 struct inode *inode = page->mapping->host;
1973 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1974 (u64)-1, 1, EXTENT_DIRTY, 0);
1978 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1983 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1984 BUG_ON(!failrec->this_mirror);
1986 if (failrec->in_validation) {
1987 /* there was no real error, just free the record */
1988 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1994 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1995 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1998 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2000 if (state && state->start == failrec->start) {
2001 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
2002 num_copies = btrfs_num_copies(map_tree, failrec->logical,
2004 if (num_copies > 1) {
2005 ret = repair_io_failure(map_tree, start, failrec->len,
2006 failrec->logical, page,
2007 failrec->failed_mirror);
2014 ret = free_io_failure(inode, failrec, did_repair);
2020 * this is a generic handler for readpage errors (default
2021 * readpage_io_failed_hook). if other copies exist, read those and write back
2022 * good data to the failed position. does not investigate in remapping the
2023 * failed extent elsewhere, hoping the device will be smart enough to do this as
2027 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2028 u64 start, u64 end, int failed_mirror,
2029 struct extent_state *state)
2031 struct io_failure_record *failrec = NULL;
2033 struct extent_map *em;
2034 struct inode *inode = page->mapping->host;
2035 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2036 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2037 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2044 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2046 ret = get_state_private(failure_tree, start, &private);
2048 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2051 failrec->start = start;
2052 failrec->len = end - start + 1;
2053 failrec->this_mirror = 0;
2054 failrec->bio_flags = 0;
2055 failrec->in_validation = 0;
2057 read_lock(&em_tree->lock);
2058 em = lookup_extent_mapping(em_tree, start, failrec->len);
2060 read_unlock(&em_tree->lock);
2065 if (em->start > start || em->start + em->len < start) {
2066 free_extent_map(em);
2069 read_unlock(&em_tree->lock);
2071 if (!em || IS_ERR(em)) {
2075 logical = start - em->start;
2076 logical = em->block_start + logical;
2077 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2078 logical = em->block_start;
2079 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2080 extent_set_compress_type(&failrec->bio_flags,
2083 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2084 "len=%llu\n", logical, start, failrec->len);
2085 failrec->logical = logical;
2086 free_extent_map(em);
2088 /* set the bits in the private failure tree */
2089 ret = set_extent_bits(failure_tree, start, end,
2090 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2092 ret = set_state_private(failure_tree, start,
2093 (u64)(unsigned long)failrec);
2094 /* set the bits in the inode's tree */
2096 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2103 failrec = (struct io_failure_record *)(unsigned long)private;
2104 pr_debug("bio_readpage_error: (found) logical=%llu, "
2105 "start=%llu, len=%llu, validation=%d\n",
2106 failrec->logical, failrec->start, failrec->len,
2107 failrec->in_validation);
2109 * when data can be on disk more than twice, add to failrec here
2110 * (e.g. with a list for failed_mirror) to make
2111 * clean_io_failure() clean all those errors at once.
2114 num_copies = btrfs_num_copies(
2115 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2116 failrec->logical, failrec->len);
2117 if (num_copies == 1) {
2119 * we only have a single copy of the data, so don't bother with
2120 * all the retry and error correction code that follows. no
2121 * matter what the error is, it is very likely to persist.
2123 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2124 "state=%p, num_copies=%d, next_mirror %d, "
2125 "failed_mirror %d\n", state, num_copies,
2126 failrec->this_mirror, failed_mirror);
2127 free_io_failure(inode, failrec, 0);
2132 spin_lock(&tree->lock);
2133 state = find_first_extent_bit_state(tree, failrec->start,
2135 if (state && state->start != failrec->start)
2137 spin_unlock(&tree->lock);
2141 * there are two premises:
2142 * a) deliver good data to the caller
2143 * b) correct the bad sectors on disk
2145 if (failed_bio->bi_vcnt > 1) {
2147 * to fulfill b), we need to know the exact failing sectors, as
2148 * we don't want to rewrite any more than the failed ones. thus,
2149 * we need separate read requests for the failed bio
2151 * if the following BUG_ON triggers, our validation request got
2152 * merged. we need separate requests for our algorithm to work.
2154 BUG_ON(failrec->in_validation);
2155 failrec->in_validation = 1;
2156 failrec->this_mirror = failed_mirror;
2157 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2160 * we're ready to fulfill a) and b) alongside. get a good copy
2161 * of the failed sector and if we succeed, we have setup
2162 * everything for repair_io_failure to do the rest for us.
2164 if (failrec->in_validation) {
2165 BUG_ON(failrec->this_mirror != failed_mirror);
2166 failrec->in_validation = 0;
2167 failrec->this_mirror = 0;
2169 failrec->failed_mirror = failed_mirror;
2170 failrec->this_mirror++;
2171 if (failrec->this_mirror == failed_mirror)
2172 failrec->this_mirror++;
2173 read_mode = READ_SYNC;
2176 if (!state || failrec->this_mirror > num_copies) {
2177 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2178 "next_mirror %d, failed_mirror %d\n", state,
2179 num_copies, failrec->this_mirror, failed_mirror);
2180 free_io_failure(inode, failrec, 0);
2184 bio = bio_alloc(GFP_NOFS, 1);
2186 free_io_failure(inode, failrec, 0);
2189 bio->bi_private = state;
2190 bio->bi_end_io = failed_bio->bi_end_io;
2191 bio->bi_sector = failrec->logical >> 9;
2192 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2195 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2197 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2198 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2199 failrec->this_mirror, num_copies, failrec->in_validation);
2201 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2202 failrec->this_mirror,
2203 failrec->bio_flags, 0);
2207 /* lots and lots of room for performance fixes in the end_bio funcs */
2209 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2211 int uptodate = (err == 0);
2212 struct extent_io_tree *tree;
2215 tree = &BTRFS_I(page->mapping->host)->io_tree;
2217 if (tree->ops && tree->ops->writepage_end_io_hook) {
2218 ret = tree->ops->writepage_end_io_hook(page, start,
2219 end, NULL, uptodate);
2225 ClearPageUptodate(page);
2232 * after a writepage IO is done, we need to:
2233 * clear the uptodate bits on error
2234 * clear the writeback bits in the extent tree for this IO
2235 * end_page_writeback if the page has no more pending IO
2237 * Scheduling is not allowed, so the extent state tree is expected
2238 * to have one and only one object corresponding to this IO.
2240 static void end_bio_extent_writepage(struct bio *bio, int err)
2242 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2243 struct extent_io_tree *tree;
2249 struct page *page = bvec->bv_page;
2250 tree = &BTRFS_I(page->mapping->host)->io_tree;
2252 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2254 end = start + bvec->bv_len - 1;
2256 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2261 if (--bvec >= bio->bi_io_vec)
2262 prefetchw(&bvec->bv_page->flags);
2264 if (end_extent_writepage(page, err, start, end))
2268 end_page_writeback(page);
2270 check_page_writeback(tree, page);
2271 } while (bvec >= bio->bi_io_vec);
2277 * after a readpage IO is done, we need to:
2278 * clear the uptodate bits on error
2279 * set the uptodate bits if things worked
2280 * set the page up to date if all extents in the tree are uptodate
2281 * clear the lock bit in the extent tree
2282 * unlock the page if there are no other extents locked for it
2284 * Scheduling is not allowed, so the extent state tree is expected
2285 * to have one and only one object corresponding to this IO.
2287 static void end_bio_extent_readpage(struct bio *bio, int err)
2289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2290 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2291 struct bio_vec *bvec = bio->bi_io_vec;
2292 struct extent_io_tree *tree;
2303 struct page *page = bvec->bv_page;
2304 struct extent_state *cached = NULL;
2305 struct extent_state *state;
2307 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2308 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2309 (long int)bio->bi_bdev);
2310 tree = &BTRFS_I(page->mapping->host)->io_tree;
2312 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2314 end = start + bvec->bv_len - 1;
2316 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2321 if (++bvec <= bvec_end)
2322 prefetchw(&bvec->bv_page->flags);
2324 spin_lock(&tree->lock);
2325 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2326 if (state && state->start == start) {
2328 * take a reference on the state, unlock will drop
2331 cache_state(state, &cached);
2333 spin_unlock(&tree->lock);
2335 mirror = (int)(unsigned long)bio->bi_bdev;
2336 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2337 ret = tree->ops->readpage_end_io_hook(page, start, end,
2340 /* no IO indicated but software detected errors
2341 * in the block, either checksum errors or
2342 * issues with the contents */
2343 struct btrfs_root *root =
2344 BTRFS_I(page->mapping->host)->root;
2345 struct btrfs_device *device;
2348 device = btrfs_find_device_for_logical(
2349 root, start, mirror);
2351 btrfs_dev_stat_inc_and_print(device,
2352 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2354 clean_io_failure(start, page);
2358 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2359 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2361 test_bit(BIO_UPTODATE, &bio->bi_flags))
2363 } else if (!uptodate) {
2365 * The generic bio_readpage_error handles errors the
2366 * following way: If possible, new read requests are
2367 * created and submitted and will end up in
2368 * end_bio_extent_readpage as well (if we're lucky, not
2369 * in the !uptodate case). In that case it returns 0 and
2370 * we just go on with the next page in our bio. If it
2371 * can't handle the error it will return -EIO and we
2372 * remain responsible for that page.
2374 ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2377 test_bit(BIO_UPTODATE, &bio->bi_flags);
2380 uncache_state(&cached);
2385 if (uptodate && tree->track_uptodate) {
2386 set_extent_uptodate(tree, start, end, &cached,
2389 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2393 SetPageUptodate(page);
2395 ClearPageUptodate(page);
2401 check_page_uptodate(tree, page);
2403 ClearPageUptodate(page);
2406 check_page_locked(tree, page);
2408 } while (bvec <= bvec_end);
2414 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2419 bio = bio_alloc(gfp_flags, nr_vecs);
2421 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2422 while (!bio && (nr_vecs /= 2))
2423 bio = bio_alloc(gfp_flags, nr_vecs);
2428 bio->bi_bdev = bdev;
2429 bio->bi_sector = first_sector;
2435 * Since writes are async, they will only return -ENOMEM.
2436 * Reads can return the full range of I/O error conditions.
2438 static int __must_check submit_one_bio(int rw, struct bio *bio,
2439 int mirror_num, unsigned long bio_flags)
2442 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2443 struct page *page = bvec->bv_page;
2444 struct extent_io_tree *tree = bio->bi_private;
2447 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2449 bio->bi_private = NULL;
2453 if (tree->ops && tree->ops->submit_bio_hook)
2454 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2455 mirror_num, bio_flags, start);
2457 btrfsic_submit_bio(rw, bio);
2459 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2465 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2466 unsigned long offset, size_t size, struct bio *bio,
2467 unsigned long bio_flags)
2470 if (tree->ops && tree->ops->merge_bio_hook)
2471 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2478 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2479 struct page *page, sector_t sector,
2480 size_t size, unsigned long offset,
2481 struct block_device *bdev,
2482 struct bio **bio_ret,
2483 unsigned long max_pages,
2484 bio_end_io_t end_io_func,
2486 unsigned long prev_bio_flags,
2487 unsigned long bio_flags)
2493 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2494 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2495 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2497 if (bio_ret && *bio_ret) {
2500 contig = bio->bi_sector == sector;
2502 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2505 if (prev_bio_flags != bio_flags || !contig ||
2506 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2507 bio_add_page(bio, page, page_size, offset) < page_size) {
2508 ret = submit_one_bio(rw, bio, mirror_num,
2517 if (this_compressed)
2520 nr = bio_get_nr_vecs(bdev);
2522 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2526 bio_add_page(bio, page, page_size, offset);
2527 bio->bi_end_io = end_io_func;
2528 bio->bi_private = tree;
2533 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2538 void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2540 if (!PagePrivate(page)) {
2541 SetPagePrivate(page);
2542 page_cache_get(page);
2543 set_page_private(page, (unsigned long)eb);
2545 WARN_ON(page->private != (unsigned long)eb);
2549 void set_page_extent_mapped(struct page *page)
2551 if (!PagePrivate(page)) {
2552 SetPagePrivate(page);
2553 page_cache_get(page);
2554 set_page_private(page, EXTENT_PAGE_PRIVATE);
2559 * basic readpage implementation. Locked extent state structs are inserted
2560 * into the tree that are removed when the IO is done (by the end_io
2562 * XXX JDM: This needs looking at to ensure proper page locking
2564 static int __extent_read_full_page(struct extent_io_tree *tree,
2566 get_extent_t *get_extent,
2567 struct bio **bio, int mirror_num,
2568 unsigned long *bio_flags)
2570 struct inode *inode = page->mapping->host;
2571 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2572 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2576 u64 last_byte = i_size_read(inode);
2580 struct extent_map *em;
2581 struct block_device *bdev;
2582 struct btrfs_ordered_extent *ordered;
2585 size_t pg_offset = 0;
2587 size_t disk_io_size;
2588 size_t blocksize = inode->i_sb->s_blocksize;
2589 unsigned long this_bio_flag = 0;
2591 set_page_extent_mapped(page);
2593 if (!PageUptodate(page)) {
2594 if (cleancache_get_page(page) == 0) {
2595 BUG_ON(blocksize != PAGE_SIZE);
2602 lock_extent(tree, start, end);
2603 ordered = btrfs_lookup_ordered_extent(inode, start);
2606 unlock_extent(tree, start, end);
2607 btrfs_start_ordered_extent(inode, ordered, 1);
2608 btrfs_put_ordered_extent(ordered);
2611 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2613 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2616 iosize = PAGE_CACHE_SIZE - zero_offset;
2617 userpage = kmap_atomic(page);
2618 memset(userpage + zero_offset, 0, iosize);
2619 flush_dcache_page(page);
2620 kunmap_atomic(userpage);
2623 while (cur <= end) {
2624 if (cur >= last_byte) {
2626 struct extent_state *cached = NULL;
2628 iosize = PAGE_CACHE_SIZE - pg_offset;
2629 userpage = kmap_atomic(page);
2630 memset(userpage + pg_offset, 0, iosize);
2631 flush_dcache_page(page);
2632 kunmap_atomic(userpage);
2633 set_extent_uptodate(tree, cur, cur + iosize - 1,
2635 unlock_extent_cached(tree, cur, cur + iosize - 1,
2639 em = get_extent(inode, page, pg_offset, cur,
2641 if (IS_ERR_OR_NULL(em)) {
2643 unlock_extent(tree, cur, end);
2646 extent_offset = cur - em->start;
2647 BUG_ON(extent_map_end(em) <= cur);
2650 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2651 this_bio_flag = EXTENT_BIO_COMPRESSED;
2652 extent_set_compress_type(&this_bio_flag,
2656 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2657 cur_end = min(extent_map_end(em) - 1, end);
2658 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2659 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2660 disk_io_size = em->block_len;
2661 sector = em->block_start >> 9;
2663 sector = (em->block_start + extent_offset) >> 9;
2664 disk_io_size = iosize;
2667 block_start = em->block_start;
2668 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2669 block_start = EXTENT_MAP_HOLE;
2670 free_extent_map(em);
2673 /* we've found a hole, just zero and go on */
2674 if (block_start == EXTENT_MAP_HOLE) {
2676 struct extent_state *cached = NULL;
2678 userpage = kmap_atomic(page);
2679 memset(userpage + pg_offset, 0, iosize);
2680 flush_dcache_page(page);
2681 kunmap_atomic(userpage);
2683 set_extent_uptodate(tree, cur, cur + iosize - 1,
2685 unlock_extent_cached(tree, cur, cur + iosize - 1,
2688 pg_offset += iosize;
2691 /* the get_extent function already copied into the page */
2692 if (test_range_bit(tree, cur, cur_end,
2693 EXTENT_UPTODATE, 1, NULL)) {
2694 check_page_uptodate(tree, page);
2695 unlock_extent(tree, cur, cur + iosize - 1);
2697 pg_offset += iosize;
2700 /* we have an inline extent but it didn't get marked up
2701 * to date. Error out
2703 if (block_start == EXTENT_MAP_INLINE) {
2705 unlock_extent(tree, cur, cur + iosize - 1);
2707 pg_offset += iosize;
2712 if (tree->ops && tree->ops->readpage_io_hook) {
2713 ret = tree->ops->readpage_io_hook(page, cur,
2717 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2719 ret = submit_extent_page(READ, tree, page,
2720 sector, disk_io_size, pg_offset,
2722 end_bio_extent_readpage, mirror_num,
2725 BUG_ON(ret == -ENOMEM);
2727 *bio_flags = this_bio_flag;
2732 pg_offset += iosize;
2736 if (!PageError(page))
2737 SetPageUptodate(page);
2743 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2744 get_extent_t *get_extent, int mirror_num)
2746 struct bio *bio = NULL;
2747 unsigned long bio_flags = 0;
2750 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2753 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2757 static noinline void update_nr_written(struct page *page,
2758 struct writeback_control *wbc,
2759 unsigned long nr_written)
2761 wbc->nr_to_write -= nr_written;
2762 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2763 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2764 page->mapping->writeback_index = page->index + nr_written;
2768 * the writepage semantics are similar to regular writepage. extent
2769 * records are inserted to lock ranges in the tree, and as dirty areas
2770 * are found, they are marked writeback. Then the lock bits are removed
2771 * and the end_io handler clears the writeback ranges
2773 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2776 struct inode *inode = page->mapping->host;
2777 struct extent_page_data *epd = data;
2778 struct extent_io_tree *tree = epd->tree;
2779 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2781 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2785 u64 last_byte = i_size_read(inode);
2789 struct extent_state *cached_state = NULL;
2790 struct extent_map *em;
2791 struct block_device *bdev;
2794 size_t pg_offset = 0;
2796 loff_t i_size = i_size_read(inode);
2797 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2803 unsigned long nr_written = 0;
2804 bool fill_delalloc = true;
2806 if (wbc->sync_mode == WB_SYNC_ALL)
2807 write_flags = WRITE_SYNC;
2809 write_flags = WRITE;
2811 trace___extent_writepage(page, inode, wbc);
2813 WARN_ON(!PageLocked(page));
2815 ClearPageError(page);
2817 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2818 if (page->index > end_index ||
2819 (page->index == end_index && !pg_offset)) {
2820 page->mapping->a_ops->invalidatepage(page, 0);
2825 if (page->index == end_index) {
2828 userpage = kmap_atomic(page);
2829 memset(userpage + pg_offset, 0,
2830 PAGE_CACHE_SIZE - pg_offset);
2831 kunmap_atomic(userpage);
2832 flush_dcache_page(page);
2836 set_page_extent_mapped(page);
2838 if (!tree->ops || !tree->ops->fill_delalloc)
2839 fill_delalloc = false;
2841 delalloc_start = start;
2844 if (!epd->extent_locked && fill_delalloc) {
2845 u64 delalloc_to_write = 0;
2847 * make sure the wbc mapping index is at least updated
2850 update_nr_written(page, wbc, 0);
2852 while (delalloc_end < page_end) {
2853 nr_delalloc = find_lock_delalloc_range(inode, tree,
2858 if (nr_delalloc == 0) {
2859 delalloc_start = delalloc_end + 1;
2862 ret = tree->ops->fill_delalloc(inode, page,
2867 /* File system has been set read-only */
2873 * delalloc_end is already one less than the total
2874 * length, so we don't subtract one from
2877 delalloc_to_write += (delalloc_end - delalloc_start +
2880 delalloc_start = delalloc_end + 1;
2882 if (wbc->nr_to_write < delalloc_to_write) {
2885 if (delalloc_to_write < thresh * 2)
2886 thresh = delalloc_to_write;
2887 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2891 /* did the fill delalloc function already unlock and start
2897 * we've unlocked the page, so we can't update
2898 * the mapping's writeback index, just update
2901 wbc->nr_to_write -= nr_written;
2905 if (tree->ops && tree->ops->writepage_start_hook) {
2906 ret = tree->ops->writepage_start_hook(page, start,
2909 /* Fixup worker will requeue */
2911 wbc->pages_skipped++;
2913 redirty_page_for_writepage(wbc, page);
2914 update_nr_written(page, wbc, nr_written);
2922 * we don't want to touch the inode after unlocking the page,
2923 * so we update the mapping writeback index now
2925 update_nr_written(page, wbc, nr_written + 1);
2928 if (last_byte <= start) {
2929 if (tree->ops && tree->ops->writepage_end_io_hook)
2930 tree->ops->writepage_end_io_hook(page, start,
2935 blocksize = inode->i_sb->s_blocksize;
2937 while (cur <= end) {
2938 if (cur >= last_byte) {
2939 if (tree->ops && tree->ops->writepage_end_io_hook)
2940 tree->ops->writepage_end_io_hook(page, cur,
2944 em = epd->get_extent(inode, page, pg_offset, cur,
2946 if (IS_ERR_OR_NULL(em)) {
2951 extent_offset = cur - em->start;
2952 BUG_ON(extent_map_end(em) <= cur);
2954 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2955 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2956 sector = (em->block_start + extent_offset) >> 9;
2958 block_start = em->block_start;
2959 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2960 free_extent_map(em);
2964 * compressed and inline extents are written through other
2967 if (compressed || block_start == EXTENT_MAP_HOLE ||
2968 block_start == EXTENT_MAP_INLINE) {
2970 * end_io notification does not happen here for
2971 * compressed extents
2973 if (!compressed && tree->ops &&
2974 tree->ops->writepage_end_io_hook)
2975 tree->ops->writepage_end_io_hook(page, cur,
2978 else if (compressed) {
2979 /* we don't want to end_page_writeback on
2980 * a compressed extent. this happens
2987 pg_offset += iosize;
2990 /* leave this out until we have a page_mkwrite call */
2991 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2992 EXTENT_DIRTY, 0, NULL)) {
2994 pg_offset += iosize;
2998 if (tree->ops && tree->ops->writepage_io_hook) {
2999 ret = tree->ops->writepage_io_hook(page, cur,
3007 unsigned long max_nr = end_index + 1;
3009 set_range_writeback(tree, cur, cur + iosize - 1);
3010 if (!PageWriteback(page)) {
3011 printk(KERN_ERR "btrfs warning page %lu not "
3012 "writeback, cur %llu end %llu\n",
3013 page->index, (unsigned long long)cur,
3014 (unsigned long long)end);
3017 ret = submit_extent_page(write_flags, tree, page,
3018 sector, iosize, pg_offset,
3019 bdev, &epd->bio, max_nr,
3020 end_bio_extent_writepage,
3026 pg_offset += iosize;
3031 /* make sure the mapping tag for page dirty gets cleared */
3032 set_page_writeback(page);
3033 end_page_writeback(page);
3039 /* drop our reference on any cached states */
3040 free_extent_state(cached_state);
3044 static int eb_wait(void *word)
3050 static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3052 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3053 TASK_UNINTERRUPTIBLE);
3056 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3057 struct btrfs_fs_info *fs_info,
3058 struct extent_page_data *epd)
3060 unsigned long i, num_pages;
3064 if (!btrfs_try_tree_write_lock(eb)) {
3066 flush_write_bio(epd);
3067 btrfs_tree_lock(eb);
3070 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3071 btrfs_tree_unlock(eb);
3075 flush_write_bio(epd);
3079 wait_on_extent_buffer_writeback(eb);
3080 btrfs_tree_lock(eb);
3081 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3083 btrfs_tree_unlock(eb);
3088 * We need to do this to prevent races in people who check if the eb is
3089 * under IO since we can end up having no IO bits set for a short period
3092 spin_lock(&eb->refs_lock);
3093 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3094 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3095 spin_unlock(&eb->refs_lock);
3096 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3097 spin_lock(&fs_info->delalloc_lock);
3098 if (fs_info->dirty_metadata_bytes >= eb->len)
3099 fs_info->dirty_metadata_bytes -= eb->len;
3102 spin_unlock(&fs_info->delalloc_lock);
3105 spin_unlock(&eb->refs_lock);
3108 btrfs_tree_unlock(eb);
3113 num_pages = num_extent_pages(eb->start, eb->len);
3114 for (i = 0; i < num_pages; i++) {
3115 struct page *p = extent_buffer_page(eb, i);
3117 if (!trylock_page(p)) {
3119 flush_write_bio(epd);
3129 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3131 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3132 smp_mb__after_clear_bit();
3133 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3136 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3138 int uptodate = err == 0;
3139 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3140 struct extent_buffer *eb;
3144 struct page *page = bvec->bv_page;
3147 eb = (struct extent_buffer *)page->private;
3149 done = atomic_dec_and_test(&eb->io_pages);
3151 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3152 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3153 ClearPageUptodate(page);
3157 end_page_writeback(page);
3162 end_extent_buffer_writeback(eb);
3163 } while (bvec >= bio->bi_io_vec);
3169 static int write_one_eb(struct extent_buffer *eb,
3170 struct btrfs_fs_info *fs_info,
3171 struct writeback_control *wbc,
3172 struct extent_page_data *epd)
3174 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3175 u64 offset = eb->start;
3176 unsigned long i, num_pages;
3177 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3180 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3181 num_pages = num_extent_pages(eb->start, eb->len);
3182 atomic_set(&eb->io_pages, num_pages);
3183 for (i = 0; i < num_pages; i++) {
3184 struct page *p = extent_buffer_page(eb, i);
3186 clear_page_dirty_for_io(p);
3187 set_page_writeback(p);
3188 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3189 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3190 -1, end_bio_extent_buffer_writepage,
3193 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3195 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3196 end_extent_buffer_writeback(eb);
3200 offset += PAGE_CACHE_SIZE;
3201 update_nr_written(p, wbc, 1);
3205 if (unlikely(ret)) {
3206 for (; i < num_pages; i++) {
3207 struct page *p = extent_buffer_page(eb, i);
3215 int btree_write_cache_pages(struct address_space *mapping,
3216 struct writeback_control *wbc)
3218 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3219 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3220 struct extent_buffer *eb, *prev_eb = NULL;
3221 struct extent_page_data epd = {
3225 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3229 int nr_to_write_done = 0;
3230 struct pagevec pvec;
3233 pgoff_t end; /* Inclusive */
3237 pagevec_init(&pvec, 0);
3238 if (wbc->range_cyclic) {
3239 index = mapping->writeback_index; /* Start from prev offset */
3242 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3243 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3246 if (wbc->sync_mode == WB_SYNC_ALL)
3247 tag = PAGECACHE_TAG_TOWRITE;
3249 tag = PAGECACHE_TAG_DIRTY;
3251 if (wbc->sync_mode == WB_SYNC_ALL)
3252 tag_pages_for_writeback(mapping, index, end);
3253 while (!done && !nr_to_write_done && (index <= end) &&
3254 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3255 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3259 for (i = 0; i < nr_pages; i++) {
3260 struct page *page = pvec.pages[i];
3262 if (!PagePrivate(page))
3265 if (!wbc->range_cyclic && page->index > end) {
3270 eb = (struct extent_buffer *)page->private;
3279 if (!atomic_inc_not_zero(&eb->refs)) {
3285 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3287 free_extent_buffer(eb);
3291 ret = write_one_eb(eb, fs_info, wbc, &epd);
3294 free_extent_buffer(eb);
3297 free_extent_buffer(eb);
3300 * the filesystem may choose to bump up nr_to_write.
3301 * We have to make sure to honor the new nr_to_write
3304 nr_to_write_done = wbc->nr_to_write <= 0;
3306 pagevec_release(&pvec);
3309 if (!scanned && !done) {
3311 * We hit the last page and there is more work to be done: wrap
3312 * back to the start of the file
3318 flush_write_bio(&epd);
3323 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3324 * @mapping: address space structure to write
3325 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3326 * @writepage: function called for each page
3327 * @data: data passed to writepage function
3329 * If a page is already under I/O, write_cache_pages() skips it, even
3330 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3331 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3332 * and msync() need to guarantee that all the data which was dirty at the time
3333 * the call was made get new I/O started against them. If wbc->sync_mode is
3334 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3335 * existing IO to complete.
3337 static int extent_write_cache_pages(struct extent_io_tree *tree,
3338 struct address_space *mapping,
3339 struct writeback_control *wbc,
3340 writepage_t writepage, void *data,
3341 void (*flush_fn)(void *))
3343 struct inode *inode = mapping->host;
3346 int nr_to_write_done = 0;
3347 struct pagevec pvec;
3350 pgoff_t end; /* Inclusive */
3355 * We have to hold onto the inode so that ordered extents can do their
3356 * work when the IO finishes. The alternative to this is failing to add
3357 * an ordered extent if the igrab() fails there and that is a huge pain
3358 * to deal with, so instead just hold onto the inode throughout the
3359 * writepages operation. If it fails here we are freeing up the inode
3360 * anyway and we'd rather not waste our time writing out stuff that is
3361 * going to be truncated anyway.
3366 pagevec_init(&pvec, 0);
3367 if (wbc->range_cyclic) {
3368 index = mapping->writeback_index; /* Start from prev offset */
3371 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3372 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3375 if (wbc->sync_mode == WB_SYNC_ALL)
3376 tag = PAGECACHE_TAG_TOWRITE;
3378 tag = PAGECACHE_TAG_DIRTY;
3380 if (wbc->sync_mode == WB_SYNC_ALL)
3381 tag_pages_for_writeback(mapping, index, end);
3382 while (!done && !nr_to_write_done && (index <= end) &&
3383 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3384 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3388 for (i = 0; i < nr_pages; i++) {
3389 struct page *page = pvec.pages[i];
3392 * At this point we hold neither mapping->tree_lock nor
3393 * lock on the page itself: the page may be truncated or
3394 * invalidated (changing page->mapping to NULL), or even
3395 * swizzled back from swapper_space to tmpfs file
3399 tree->ops->write_cache_pages_lock_hook) {
3400 tree->ops->write_cache_pages_lock_hook(page,
3403 if (!trylock_page(page)) {
3409 if (unlikely(page->mapping != mapping)) {
3414 if (!wbc->range_cyclic && page->index > end) {
3420 if (wbc->sync_mode != WB_SYNC_NONE) {
3421 if (PageWriteback(page))
3423 wait_on_page_writeback(page);
3426 if (PageWriteback(page) ||
3427 !clear_page_dirty_for_io(page)) {
3432 ret = (*writepage)(page, wbc, data);
3434 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3442 * the filesystem may choose to bump up nr_to_write.
3443 * We have to make sure to honor the new nr_to_write
3446 nr_to_write_done = wbc->nr_to_write <= 0;
3448 pagevec_release(&pvec);
3451 if (!scanned && !done) {
3453 * We hit the last page and there is more work to be done: wrap
3454 * back to the start of the file
3460 btrfs_add_delayed_iput(inode);
3464 static void flush_epd_write_bio(struct extent_page_data *epd)
3473 ret = submit_one_bio(rw, epd->bio, 0, 0);
3474 BUG_ON(ret < 0); /* -ENOMEM */
3479 static noinline void flush_write_bio(void *data)
3481 struct extent_page_data *epd = data;
3482 flush_epd_write_bio(epd);
3485 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3486 get_extent_t *get_extent,
3487 struct writeback_control *wbc)
3490 struct extent_page_data epd = {
3493 .get_extent = get_extent,
3495 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3498 ret = __extent_writepage(page, wbc, &epd);
3500 flush_epd_write_bio(&epd);
3504 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3505 u64 start, u64 end, get_extent_t *get_extent,
3509 struct address_space *mapping = inode->i_mapping;
3511 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3514 struct extent_page_data epd = {
3517 .get_extent = get_extent,
3519 .sync_io = mode == WB_SYNC_ALL,
3521 struct writeback_control wbc_writepages = {
3523 .nr_to_write = nr_pages * 2,
3524 .range_start = start,
3525 .range_end = end + 1,
3528 while (start <= end) {
3529 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3530 if (clear_page_dirty_for_io(page))
3531 ret = __extent_writepage(page, &wbc_writepages, &epd);
3533 if (tree->ops && tree->ops->writepage_end_io_hook)
3534 tree->ops->writepage_end_io_hook(page, start,
3535 start + PAGE_CACHE_SIZE - 1,
3539 page_cache_release(page);
3540 start += PAGE_CACHE_SIZE;
3543 flush_epd_write_bio(&epd);
3547 int extent_writepages(struct extent_io_tree *tree,
3548 struct address_space *mapping,
3549 get_extent_t *get_extent,
3550 struct writeback_control *wbc)
3553 struct extent_page_data epd = {
3556 .get_extent = get_extent,
3558 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3561 ret = extent_write_cache_pages(tree, mapping, wbc,
3562 __extent_writepage, &epd,
3564 flush_epd_write_bio(&epd);
3568 int extent_readpages(struct extent_io_tree *tree,
3569 struct address_space *mapping,
3570 struct list_head *pages, unsigned nr_pages,
3571 get_extent_t get_extent)
3573 struct bio *bio = NULL;
3575 unsigned long bio_flags = 0;
3576 struct page *pagepool[16];
3581 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3582 page = list_entry(pages->prev, struct page, lru);
3584 prefetchw(&page->flags);
3585 list_del(&page->lru);
3586 if (add_to_page_cache_lru(page, mapping,
3587 page->index, GFP_NOFS)) {
3588 page_cache_release(page);
3592 pagepool[nr++] = page;
3593 if (nr < ARRAY_SIZE(pagepool))
3595 for (i = 0; i < nr; i++) {
3596 __extent_read_full_page(tree, pagepool[i], get_extent,
3597 &bio, 0, &bio_flags);
3598 page_cache_release(pagepool[i]);
3602 for (i = 0; i < nr; i++) {
3603 __extent_read_full_page(tree, pagepool[i], get_extent,
3604 &bio, 0, &bio_flags);
3605 page_cache_release(pagepool[i]);
3608 BUG_ON(!list_empty(pages));
3610 return submit_one_bio(READ, bio, 0, bio_flags);
3615 * basic invalidatepage code, this waits on any locked or writeback
3616 * ranges corresponding to the page, and then deletes any extent state
3617 * records from the tree
3619 int extent_invalidatepage(struct extent_io_tree *tree,
3620 struct page *page, unsigned long offset)
3622 struct extent_state *cached_state = NULL;
3623 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3624 u64 end = start + PAGE_CACHE_SIZE - 1;
3625 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3627 start += (offset + blocksize - 1) & ~(blocksize - 1);
3631 lock_extent_bits(tree, start, end, 0, &cached_state);
3632 wait_on_page_writeback(page);
3633 clear_extent_bit(tree, start, end,
3634 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3635 EXTENT_DO_ACCOUNTING,
3636 1, 1, &cached_state, GFP_NOFS);
3641 * a helper for releasepage, this tests for areas of the page that
3642 * are locked or under IO and drops the related state bits if it is safe
3645 int try_release_extent_state(struct extent_map_tree *map,
3646 struct extent_io_tree *tree, struct page *page,
3649 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3650 u64 end = start + PAGE_CACHE_SIZE - 1;
3653 if (test_range_bit(tree, start, end,
3654 EXTENT_IOBITS, 0, NULL))
3657 if ((mask & GFP_NOFS) == GFP_NOFS)
3660 * at this point we can safely clear everything except the
3661 * locked bit and the nodatasum bit
3663 ret = clear_extent_bit(tree, start, end,
3664 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3667 /* if clear_extent_bit failed for enomem reasons,
3668 * we can't allow the release to continue.
3679 * a helper for releasepage. As long as there are no locked extents
3680 * in the range corresponding to the page, both state records and extent
3681 * map records are removed
3683 int try_release_extent_mapping(struct extent_map_tree *map,
3684 struct extent_io_tree *tree, struct page *page,
3687 struct extent_map *em;
3688 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3689 u64 end = start + PAGE_CACHE_SIZE - 1;
3691 if ((mask & __GFP_WAIT) &&
3692 page->mapping->host->i_size > 16 * 1024 * 1024) {
3694 while (start <= end) {
3695 len = end - start + 1;
3696 write_lock(&map->lock);
3697 em = lookup_extent_mapping(map, start, len);
3699 write_unlock(&map->lock);
3702 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3703 em->start != start) {
3704 write_unlock(&map->lock);
3705 free_extent_map(em);
3708 if (!test_range_bit(tree, em->start,
3709 extent_map_end(em) - 1,
3710 EXTENT_LOCKED | EXTENT_WRITEBACK,
3712 remove_extent_mapping(map, em);
3713 /* once for the rb tree */
3714 free_extent_map(em);
3716 start = extent_map_end(em);
3717 write_unlock(&map->lock);
3720 free_extent_map(em);
3723 return try_release_extent_state(map, tree, page, mask);
3727 * helper function for fiemap, which doesn't want to see any holes.
3728 * This maps until we find something past 'last'
3730 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3733 get_extent_t *get_extent)
3735 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3736 struct extent_map *em;
3743 len = last - offset;
3746 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3747 em = get_extent(inode, NULL, 0, offset, len, 0);
3748 if (IS_ERR_OR_NULL(em))
3751 /* if this isn't a hole return it */
3752 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3753 em->block_start != EXTENT_MAP_HOLE) {
3757 /* this is a hole, advance to the next extent */
3758 offset = extent_map_end(em);
3759 free_extent_map(em);
3766 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3767 __u64 start, __u64 len, get_extent_t *get_extent)
3771 u64 max = start + len;
3775 u64 last_for_get_extent = 0;
3777 u64 isize = i_size_read(inode);
3778 struct btrfs_key found_key;
3779 struct extent_map *em = NULL;
3780 struct extent_state *cached_state = NULL;
3781 struct btrfs_path *path;
3782 struct btrfs_file_extent_item *item;
3787 unsigned long emflags;
3792 path = btrfs_alloc_path();
3795 path->leave_spinning = 1;
3797 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3798 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3801 * lookup the last file extent. We're not using i_size here
3802 * because there might be preallocation past i_size
3804 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3805 path, btrfs_ino(inode), -1, 0);
3807 btrfs_free_path(path);
3812 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3813 struct btrfs_file_extent_item);
3814 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3815 found_type = btrfs_key_type(&found_key);
3817 /* No extents, but there might be delalloc bits */
3818 if (found_key.objectid != btrfs_ino(inode) ||
3819 found_type != BTRFS_EXTENT_DATA_KEY) {
3820 /* have to trust i_size as the end */
3822 last_for_get_extent = isize;
3825 * remember the start of the last extent. There are a
3826 * bunch of different factors that go into the length of the
3827 * extent, so its much less complex to remember where it started
3829 last = found_key.offset;
3830 last_for_get_extent = last + 1;
3832 btrfs_free_path(path);
3835 * we might have some extents allocated but more delalloc past those
3836 * extents. so, we trust isize unless the start of the last extent is
3841 last_for_get_extent = isize;
3844 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3847 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3857 u64 offset_in_extent;
3859 /* break if the extent we found is outside the range */
3860 if (em->start >= max || extent_map_end(em) < off)
3864 * get_extent may return an extent that starts before our
3865 * requested range. We have to make sure the ranges
3866 * we return to fiemap always move forward and don't
3867 * overlap, so adjust the offsets here
3869 em_start = max(em->start, off);
3872 * record the offset from the start of the extent
3873 * for adjusting the disk offset below
3875 offset_in_extent = em_start - em->start;
3876 em_end = extent_map_end(em);
3877 em_len = em_end - em_start;
3878 emflags = em->flags;
3883 * bump off for our next call to get_extent
3885 off = extent_map_end(em);
3889 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3891 flags |= FIEMAP_EXTENT_LAST;
3892 } else if (em->block_start == EXTENT_MAP_INLINE) {
3893 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3894 FIEMAP_EXTENT_NOT_ALIGNED);
3895 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3896 flags |= (FIEMAP_EXTENT_DELALLOC |
3897 FIEMAP_EXTENT_UNKNOWN);
3899 disko = em->block_start + offset_in_extent;
3901 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3902 flags |= FIEMAP_EXTENT_ENCODED;
3904 free_extent_map(em);
3906 if ((em_start >= last) || em_len == (u64)-1 ||
3907 (last == (u64)-1 && isize <= em_end)) {
3908 flags |= FIEMAP_EXTENT_LAST;
3912 /* now scan forward to see if this is really the last extent. */
3913 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3920 flags |= FIEMAP_EXTENT_LAST;
3923 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3929 free_extent_map(em);
3931 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3932 &cached_state, GFP_NOFS);
3936 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3939 return eb->pages[i];
3942 inline unsigned long num_extent_pages(u64 start, u64 len)
3944 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3945 (start >> PAGE_CACHE_SHIFT);
3948 static void __free_extent_buffer(struct extent_buffer *eb)
3951 unsigned long flags;
3952 spin_lock_irqsave(&leak_lock, flags);
3953 list_del(&eb->leak_list);
3954 spin_unlock_irqrestore(&leak_lock, flags);
3956 if (eb->pages && eb->pages != eb->inline_pages)
3958 kmem_cache_free(extent_buffer_cache, eb);
3961 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3966 struct extent_buffer *eb = NULL;
3968 unsigned long flags;
3971 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3978 rwlock_init(&eb->lock);
3979 atomic_set(&eb->write_locks, 0);
3980 atomic_set(&eb->read_locks, 0);
3981 atomic_set(&eb->blocking_readers, 0);
3982 atomic_set(&eb->blocking_writers, 0);
3983 atomic_set(&eb->spinning_readers, 0);
3984 atomic_set(&eb->spinning_writers, 0);
3985 eb->lock_nested = 0;
3986 init_waitqueue_head(&eb->write_lock_wq);
3987 init_waitqueue_head(&eb->read_lock_wq);
3990 spin_lock_irqsave(&leak_lock, flags);
3991 list_add(&eb->leak_list, &buffers);
3992 spin_unlock_irqrestore(&leak_lock, flags);
3994 spin_lock_init(&eb->refs_lock);
3995 atomic_set(&eb->refs, 1);
3996 atomic_set(&eb->io_pages, 0);
3998 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3999 struct page **pages;
4000 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
4002 pages = kzalloc(num_pages, mask);
4004 __free_extent_buffer(eb);
4009 eb->pages = eb->inline_pages;
4015 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4019 struct extent_buffer *new;
4020 unsigned long num_pages = num_extent_pages(src->start, src->len);
4022 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
4026 for (i = 0; i < num_pages; i++) {
4027 p = alloc_page(GFP_ATOMIC);
4029 attach_extent_buffer_page(new, p);
4030 WARN_ON(PageDirty(p));
4035 copy_extent_buffer(new, src, 0, 0, src->len);
4036 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4037 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4042 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4044 struct extent_buffer *eb;
4045 unsigned long num_pages = num_extent_pages(0, len);
4048 eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4052 for (i = 0; i < num_pages; i++) {
4053 eb->pages[i] = alloc_page(GFP_ATOMIC);
4057 set_extent_buffer_uptodate(eb);
4058 btrfs_set_header_nritems(eb, 0);
4059 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4063 for (i--; i > 0; i--)
4064 __free_page(eb->pages[i]);
4065 __free_extent_buffer(eb);
4069 static int extent_buffer_under_io(struct extent_buffer *eb)
4071 return (atomic_read(&eb->io_pages) ||
4072 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4073 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4077 * Helper for releasing extent buffer page.
4079 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4080 unsigned long start_idx)
4082 unsigned long index;
4083 unsigned long num_pages;
4085 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4087 BUG_ON(extent_buffer_under_io(eb));
4089 num_pages = num_extent_pages(eb->start, eb->len);
4090 index = start_idx + num_pages;
4091 if (start_idx >= index)
4096 page = extent_buffer_page(eb, index);
4097 if (page && mapped) {
4098 spin_lock(&page->mapping->private_lock);
4100 * We do this since we'll remove the pages after we've
4101 * removed the eb from the radix tree, so we could race
4102 * and have this page now attached to the new eb. So
4103 * only clear page_private if it's still connected to
4106 if (PagePrivate(page) &&
4107 page->private == (unsigned long)eb) {
4108 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4109 BUG_ON(PageDirty(page));
4110 BUG_ON(PageWriteback(page));
4112 * We need to make sure we haven't be attached
4115 ClearPagePrivate(page);
4116 set_page_private(page, 0);
4117 /* One for the page private */
4118 page_cache_release(page);
4120 spin_unlock(&page->mapping->private_lock);
4124 /* One for when we alloced the page */
4125 page_cache_release(page);
4127 } while (index != start_idx);
4131 * Helper for releasing the extent buffer.
4133 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4135 btrfs_release_extent_buffer_page(eb, 0);
4136 __free_extent_buffer(eb);
4139 static void check_buffer_tree_ref(struct extent_buffer *eb)
4141 /* the ref bit is tricky. We have to make sure it is set
4142 * if we have the buffer dirty. Otherwise the
4143 * code to free a buffer can end up dropping a dirty
4146 * Once the ref bit is set, it won't go away while the
4147 * buffer is dirty or in writeback, and it also won't
4148 * go away while we have the reference count on the
4151 * We can't just set the ref bit without bumping the
4152 * ref on the eb because free_extent_buffer might
4153 * see the ref bit and try to clear it. If this happens
4154 * free_extent_buffer might end up dropping our original
4155 * ref by mistake and freeing the page before we are able
4156 * to add one more ref.
4158 * So bump the ref count first, then set the bit. If someone
4159 * beat us to it, drop the ref we added.
4161 spin_lock(&eb->refs_lock);
4162 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4163 atomic_inc(&eb->refs);
4164 spin_unlock(&eb->refs_lock);
4167 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4169 unsigned long num_pages, i;
4171 check_buffer_tree_ref(eb);
4173 num_pages = num_extent_pages(eb->start, eb->len);
4174 for (i = 0; i < num_pages; i++) {
4175 struct page *p = extent_buffer_page(eb, i);
4176 mark_page_accessed(p);
4180 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4181 u64 start, unsigned long len)
4183 unsigned long num_pages = num_extent_pages(start, len);
4185 unsigned long index = start >> PAGE_CACHE_SHIFT;
4186 struct extent_buffer *eb;
4187 struct extent_buffer *exists = NULL;
4189 struct address_space *mapping = tree->mapping;
4194 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4195 if (eb && atomic_inc_not_zero(&eb->refs)) {
4197 mark_extent_buffer_accessed(eb);
4202 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4206 for (i = 0; i < num_pages; i++, index++) {
4207 p = find_or_create_page(mapping, index, GFP_NOFS);
4213 spin_lock(&mapping->private_lock);
4214 if (PagePrivate(p)) {
4216 * We could have already allocated an eb for this page
4217 * and attached one so lets see if we can get a ref on
4218 * the existing eb, and if we can we know it's good and
4219 * we can just return that one, else we know we can just
4220 * overwrite page->private.
4222 exists = (struct extent_buffer *)p->private;
4223 if (atomic_inc_not_zero(&exists->refs)) {
4224 spin_unlock(&mapping->private_lock);
4226 page_cache_release(p);
4227 mark_extent_buffer_accessed(exists);
4232 * Do this so attach doesn't complain and we need to
4233 * drop the ref the old guy had.
4235 ClearPagePrivate(p);
4236 WARN_ON(PageDirty(p));
4237 page_cache_release(p);
4239 attach_extent_buffer_page(eb, p);
4240 spin_unlock(&mapping->private_lock);
4241 WARN_ON(PageDirty(p));
4242 mark_page_accessed(p);
4244 if (!PageUptodate(p))
4248 * see below about how we avoid a nasty race with release page
4249 * and why we unlock later
4253 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4255 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4259 spin_lock(&tree->buffer_lock);
4260 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4261 if (ret == -EEXIST) {
4262 exists = radix_tree_lookup(&tree->buffer,
4263 start >> PAGE_CACHE_SHIFT);
4264 if (!atomic_inc_not_zero(&exists->refs)) {
4265 spin_unlock(&tree->buffer_lock);
4266 radix_tree_preload_end();
4270 spin_unlock(&tree->buffer_lock);
4271 radix_tree_preload_end();
4272 mark_extent_buffer_accessed(exists);
4275 /* add one reference for the tree */
4276 check_buffer_tree_ref(eb);
4277 spin_unlock(&tree->buffer_lock);
4278 radix_tree_preload_end();
4281 * there is a race where release page may have
4282 * tried to find this extent buffer in the radix
4283 * but failed. It will tell the VM it is safe to
4284 * reclaim the, and it will clear the page private bit.
4285 * We must make sure to set the page private bit properly
4286 * after the extent buffer is in the radix tree so
4287 * it doesn't get lost
4289 SetPageChecked(eb->pages[0]);
4290 for (i = 1; i < num_pages; i++) {
4291 p = extent_buffer_page(eb, i);
4292 ClearPageChecked(p);
4295 unlock_page(eb->pages[0]);
4299 for (i = 0; i < num_pages; i++) {
4301 unlock_page(eb->pages[i]);
4304 WARN_ON(!atomic_dec_and_test(&eb->refs));
4305 btrfs_release_extent_buffer(eb);
4309 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4310 u64 start, unsigned long len)
4312 struct extent_buffer *eb;
4315 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4316 if (eb && atomic_inc_not_zero(&eb->refs)) {
4318 mark_extent_buffer_accessed(eb);
4326 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4328 struct extent_buffer *eb =
4329 container_of(head, struct extent_buffer, rcu_head);
4331 __free_extent_buffer(eb);
4334 /* Expects to have eb->eb_lock already held */
4335 static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4337 WARN_ON(atomic_read(&eb->refs) == 0);
4338 if (atomic_dec_and_test(&eb->refs)) {
4339 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4340 spin_unlock(&eb->refs_lock);
4342 struct extent_io_tree *tree = eb->tree;
4344 spin_unlock(&eb->refs_lock);
4346 spin_lock(&tree->buffer_lock);
4347 radix_tree_delete(&tree->buffer,
4348 eb->start >> PAGE_CACHE_SHIFT);
4349 spin_unlock(&tree->buffer_lock);
4352 /* Should be safe to release our pages at this point */
4353 btrfs_release_extent_buffer_page(eb, 0);
4355 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4358 spin_unlock(&eb->refs_lock);
4363 void free_extent_buffer(struct extent_buffer *eb)
4368 spin_lock(&eb->refs_lock);
4369 if (atomic_read(&eb->refs) == 2 &&
4370 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4371 atomic_dec(&eb->refs);
4373 if (atomic_read(&eb->refs) == 2 &&
4374 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4375 !extent_buffer_under_io(eb) &&
4376 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4377 atomic_dec(&eb->refs);
4380 * I know this is terrible, but it's temporary until we stop tracking
4381 * the uptodate bits and such for the extent buffers.
4383 release_extent_buffer(eb, GFP_ATOMIC);
4386 void free_extent_buffer_stale(struct extent_buffer *eb)
4391 spin_lock(&eb->refs_lock);
4392 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4394 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4395 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4396 atomic_dec(&eb->refs);
4397 release_extent_buffer(eb, GFP_NOFS);
4400 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4403 unsigned long num_pages;
4406 num_pages = num_extent_pages(eb->start, eb->len);
4408 for (i = 0; i < num_pages; i++) {
4409 page = extent_buffer_page(eb, i);
4410 if (!PageDirty(page))
4414 WARN_ON(!PagePrivate(page));
4416 clear_page_dirty_for_io(page);
4417 spin_lock_irq(&page->mapping->tree_lock);
4418 if (!PageDirty(page)) {
4419 radix_tree_tag_clear(&page->mapping->page_tree,
4421 PAGECACHE_TAG_DIRTY);
4423 spin_unlock_irq(&page->mapping->tree_lock);
4424 ClearPageError(page);
4427 WARN_ON(atomic_read(&eb->refs) == 0);
4430 int set_extent_buffer_dirty(struct extent_buffer *eb)
4433 unsigned long num_pages;
4436 check_buffer_tree_ref(eb);
4438 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4440 num_pages = num_extent_pages(eb->start, eb->len);
4441 WARN_ON(atomic_read(&eb->refs) == 0);
4442 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4444 for (i = 0; i < num_pages; i++)
4445 set_page_dirty(extent_buffer_page(eb, i));
4449 static int range_straddles_pages(u64 start, u64 len)
4451 if (len < PAGE_CACHE_SIZE)
4453 if (start & (PAGE_CACHE_SIZE - 1))
4455 if ((start + len) & (PAGE_CACHE_SIZE - 1))
4460 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4464 unsigned long num_pages;
4466 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4467 num_pages = num_extent_pages(eb->start, eb->len);
4468 for (i = 0; i < num_pages; i++) {
4469 page = extent_buffer_page(eb, i);
4471 ClearPageUptodate(page);
4476 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4480 unsigned long num_pages;
4482 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4483 num_pages = num_extent_pages(eb->start, eb->len);
4484 for (i = 0; i < num_pages; i++) {
4485 page = extent_buffer_page(eb, i);
4486 SetPageUptodate(page);
4491 int extent_range_uptodate(struct extent_io_tree *tree,
4496 int pg_uptodate = 1;
4498 unsigned long index;
4500 if (range_straddles_pages(start, end - start + 1)) {
4501 ret = test_range_bit(tree, start, end,
4502 EXTENT_UPTODATE, 1, NULL);
4506 while (start <= end) {
4507 index = start >> PAGE_CACHE_SHIFT;
4508 page = find_get_page(tree->mapping, index);
4511 uptodate = PageUptodate(page);
4512 page_cache_release(page);
4517 start += PAGE_CACHE_SIZE;
4522 int extent_buffer_uptodate(struct extent_buffer *eb)
4524 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4527 int read_extent_buffer_pages(struct extent_io_tree *tree,
4528 struct extent_buffer *eb, u64 start, int wait,
4529 get_extent_t *get_extent, int mirror_num)
4532 unsigned long start_i;
4536 int locked_pages = 0;
4537 int all_uptodate = 1;
4538 unsigned long num_pages;
4539 unsigned long num_reads = 0;
4540 struct bio *bio = NULL;
4541 unsigned long bio_flags = 0;
4543 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4547 WARN_ON(start < eb->start);
4548 start_i = (start >> PAGE_CACHE_SHIFT) -
4549 (eb->start >> PAGE_CACHE_SHIFT);
4554 num_pages = num_extent_pages(eb->start, eb->len);
4555 for (i = start_i; i < num_pages; i++) {
4556 page = extent_buffer_page(eb, i);
4557 if (wait == WAIT_NONE) {
4558 if (!trylock_page(page))
4564 if (!PageUptodate(page)) {
4571 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4575 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4576 eb->read_mirror = 0;
4577 atomic_set(&eb->io_pages, num_reads);
4578 for (i = start_i; i < num_pages; i++) {
4579 page = extent_buffer_page(eb, i);
4580 if (!PageUptodate(page)) {
4581 ClearPageError(page);
4582 err = __extent_read_full_page(tree, page,
4584 mirror_num, &bio_flags);
4593 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4598 if (ret || wait != WAIT_COMPLETE)
4601 for (i = start_i; i < num_pages; i++) {
4602 page = extent_buffer_page(eb, i);
4603 wait_on_page_locked(page);
4604 if (!PageUptodate(page))
4612 while (locked_pages > 0) {
4613 page = extent_buffer_page(eb, i);
4621 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4622 unsigned long start,
4629 char *dst = (char *)dstv;
4630 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4631 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4633 WARN_ON(start > eb->len);
4634 WARN_ON(start + len > eb->start + eb->len);
4636 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4639 page = extent_buffer_page(eb, i);
4641 cur = min(len, (PAGE_CACHE_SIZE - offset));
4642 kaddr = page_address(page);
4643 memcpy(dst, kaddr + offset, cur);
4652 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4653 unsigned long min_len, char **map,
4654 unsigned long *map_start,
4655 unsigned long *map_len)
4657 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4660 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4661 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4662 unsigned long end_i = (start_offset + start + min_len - 1) >>
4669 offset = start_offset;
4673 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4676 if (start + min_len > eb->len) {
4677 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4678 "wanted %lu %lu\n", (unsigned long long)eb->start,
4679 eb->len, start, min_len);
4684 p = extent_buffer_page(eb, i);
4685 kaddr = page_address(p);
4686 *map = kaddr + offset;
4687 *map_len = PAGE_CACHE_SIZE - offset;
4691 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4692 unsigned long start,
4699 char *ptr = (char *)ptrv;
4700 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4701 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4704 WARN_ON(start > eb->len);
4705 WARN_ON(start + len > eb->start + eb->len);
4707 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4710 page = extent_buffer_page(eb, i);
4712 cur = min(len, (PAGE_CACHE_SIZE - offset));
4714 kaddr = page_address(page);
4715 ret = memcmp(ptr, kaddr + offset, cur);
4727 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4728 unsigned long start, unsigned long len)
4734 char *src = (char *)srcv;
4735 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4736 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4738 WARN_ON(start > eb->len);
4739 WARN_ON(start + len > eb->start + eb->len);
4741 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4744 page = extent_buffer_page(eb, i);
4745 WARN_ON(!PageUptodate(page));
4747 cur = min(len, PAGE_CACHE_SIZE - offset);
4748 kaddr = page_address(page);
4749 memcpy(kaddr + offset, src, cur);
4758 void memset_extent_buffer(struct extent_buffer *eb, char c,
4759 unsigned long start, unsigned long len)
4765 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4766 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4768 WARN_ON(start > eb->len);
4769 WARN_ON(start + len > eb->start + eb->len);
4771 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4774 page = extent_buffer_page(eb, i);
4775 WARN_ON(!PageUptodate(page));
4777 cur = min(len, PAGE_CACHE_SIZE - offset);
4778 kaddr = page_address(page);
4779 memset(kaddr + offset, c, cur);
4787 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4788 unsigned long dst_offset, unsigned long src_offset,
4791 u64 dst_len = dst->len;
4796 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4797 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4799 WARN_ON(src->len != dst_len);
4801 offset = (start_offset + dst_offset) &
4802 ((unsigned long)PAGE_CACHE_SIZE - 1);
4805 page = extent_buffer_page(dst, i);
4806 WARN_ON(!PageUptodate(page));
4808 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4810 kaddr = page_address(page);
4811 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4820 static void move_pages(struct page *dst_page, struct page *src_page,
4821 unsigned long dst_off, unsigned long src_off,
4824 char *dst_kaddr = page_address(dst_page);
4825 if (dst_page == src_page) {
4826 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4828 char *src_kaddr = page_address(src_page);
4829 char *p = dst_kaddr + dst_off + len;
4830 char *s = src_kaddr + src_off + len;
4837 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4839 unsigned long distance = (src > dst) ? src - dst : dst - src;
4840 return distance < len;
4843 static void copy_pages(struct page *dst_page, struct page *src_page,
4844 unsigned long dst_off, unsigned long src_off,
4847 char *dst_kaddr = page_address(dst_page);
4849 int must_memmove = 0;
4851 if (dst_page != src_page) {
4852 src_kaddr = page_address(src_page);
4854 src_kaddr = dst_kaddr;
4855 if (areas_overlap(src_off, dst_off, len))
4860 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4862 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4865 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4866 unsigned long src_offset, unsigned long len)
4869 size_t dst_off_in_page;
4870 size_t src_off_in_page;
4871 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4872 unsigned long dst_i;
4873 unsigned long src_i;
4875 if (src_offset + len > dst->len) {
4876 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4877 "len %lu dst len %lu\n", src_offset, len, dst->len);
4880 if (dst_offset + len > dst->len) {
4881 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4882 "len %lu dst len %lu\n", dst_offset, len, dst->len);
4887 dst_off_in_page = (start_offset + dst_offset) &
4888 ((unsigned long)PAGE_CACHE_SIZE - 1);
4889 src_off_in_page = (start_offset + src_offset) &
4890 ((unsigned long)PAGE_CACHE_SIZE - 1);
4892 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4893 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4895 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4897 cur = min_t(unsigned long, cur,
4898 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4900 copy_pages(extent_buffer_page(dst, dst_i),
4901 extent_buffer_page(dst, src_i),
4902 dst_off_in_page, src_off_in_page, cur);
4910 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4911 unsigned long src_offset, unsigned long len)
4914 size_t dst_off_in_page;
4915 size_t src_off_in_page;
4916 unsigned long dst_end = dst_offset + len - 1;
4917 unsigned long src_end = src_offset + len - 1;
4918 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4919 unsigned long dst_i;
4920 unsigned long src_i;
4922 if (src_offset + len > dst->len) {
4923 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4924 "len %lu len %lu\n", src_offset, len, dst->len);
4927 if (dst_offset + len > dst->len) {
4928 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4929 "len %lu len %lu\n", dst_offset, len, dst->len);
4932 if (dst_offset < src_offset) {
4933 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4937 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4938 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4940 dst_off_in_page = (start_offset + dst_end) &
4941 ((unsigned long)PAGE_CACHE_SIZE - 1);
4942 src_off_in_page = (start_offset + src_end) &
4943 ((unsigned long)PAGE_CACHE_SIZE - 1);
4945 cur = min_t(unsigned long, len, src_off_in_page + 1);
4946 cur = min(cur, dst_off_in_page + 1);
4947 move_pages(extent_buffer_page(dst, dst_i),
4948 extent_buffer_page(dst, src_i),
4949 dst_off_in_page - cur + 1,
4950 src_off_in_page - cur + 1, cur);
4958 int try_release_extent_buffer(struct page *page, gfp_t mask)
4960 struct extent_buffer *eb;
4963 * We need to make sure noboody is attaching this page to an eb right
4966 spin_lock(&page->mapping->private_lock);
4967 if (!PagePrivate(page)) {
4968 spin_unlock(&page->mapping->private_lock);
4972 eb = (struct extent_buffer *)page->private;
4976 * This is a little awful but should be ok, we need to make sure that
4977 * the eb doesn't disappear out from under us while we're looking at
4980 spin_lock(&eb->refs_lock);
4981 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4982 spin_unlock(&eb->refs_lock);
4983 spin_unlock(&page->mapping->private_lock);
4986 spin_unlock(&page->mapping->private_lock);
4988 if ((mask & GFP_NOFS) == GFP_NOFS)
4992 * If tree ref isn't set then we know the ref on this eb is a real ref,
4993 * so just return, this page will likely be freed soon anyway.
4995 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4996 spin_unlock(&eb->refs_lock);
5000 return release_extent_buffer(eb, mask);