1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include "extent_map.h"
15 /* temporary define until extent_map moves out of btrfs */
16 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
17 unsigned long extra_flags,
18 void (*ctor)(void *, struct kmem_cache *,
21 static struct kmem_cache *extent_map_cache;
22 static struct kmem_cache *extent_state_cache;
23 static struct kmem_cache *extent_buffer_cache;
25 static LIST_HEAD(buffers);
26 static LIST_HEAD(states);
28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
29 #define BUFFER_LRU_MAX 64
35 struct rb_node rb_node;
38 void __init extent_map_init(void)
40 extent_map_cache = btrfs_cache_create("extent_map",
41 sizeof(struct extent_map), 0,
43 extent_state_cache = btrfs_cache_create("extent_state",
44 sizeof(struct extent_state), 0,
46 extent_buffer_cache = btrfs_cache_create("extent_buffers",
47 sizeof(struct extent_buffer), 0,
51 void __exit extent_map_exit(void)
53 struct extent_state *state;
55 while (!list_empty(&states)) {
56 state = list_entry(states.next, struct extent_state, list);
57 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
58 list_del(&state->list);
59 kmem_cache_free(extent_state_cache, state);
64 kmem_cache_destroy(extent_map_cache);
65 if (extent_state_cache)
66 kmem_cache_destroy(extent_state_cache);
67 if (extent_buffer_cache)
68 kmem_cache_destroy(extent_buffer_cache);
71 void extent_map_tree_init(struct extent_map_tree *tree,
72 struct address_space *mapping, gfp_t mask)
74 tree->map.rb_node = NULL;
75 tree->state.rb_node = NULL;
77 rwlock_init(&tree->lock);
78 spin_lock_init(&tree->lru_lock);
79 tree->mapping = mapping;
80 INIT_LIST_HEAD(&tree->buffer_lru);
83 EXPORT_SYMBOL(extent_map_tree_init);
85 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
87 struct extent_buffer *eb;
88 while(!list_empty(&tree->buffer_lru)) {
89 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
92 free_extent_buffer(eb);
95 EXPORT_SYMBOL(extent_map_tree_empty_lru);
97 struct extent_map *alloc_extent_map(gfp_t mask)
99 struct extent_map *em;
100 em = kmem_cache_alloc(extent_map_cache, mask);
101 if (!em || IS_ERR(em))
104 atomic_set(&em->refs, 1);
107 EXPORT_SYMBOL(alloc_extent_map);
109 void free_extent_map(struct extent_map *em)
113 if (atomic_dec_and_test(&em->refs)) {
114 WARN_ON(em->in_tree);
115 kmem_cache_free(extent_map_cache, em);
118 EXPORT_SYMBOL(free_extent_map);
121 struct extent_state *alloc_extent_state(gfp_t mask)
123 struct extent_state *state;
126 state = kmem_cache_alloc(extent_state_cache, mask);
127 if (!state || IS_ERR(state))
133 spin_lock_irqsave(&state_lock, flags);
134 list_add(&state->list, &states);
135 spin_unlock_irqrestore(&state_lock, flags);
137 atomic_set(&state->refs, 1);
138 init_waitqueue_head(&state->wq);
141 EXPORT_SYMBOL(alloc_extent_state);
143 void free_extent_state(struct extent_state *state)
148 if (atomic_dec_and_test(&state->refs)) {
149 WARN_ON(state->in_tree);
150 spin_lock_irqsave(&state_lock, flags);
151 list_del(&state->list);
152 spin_unlock_irqrestore(&state_lock, flags);
153 kmem_cache_free(extent_state_cache, state);
156 EXPORT_SYMBOL(free_extent_state);
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node)
161 struct rb_node ** p = &root->rb_node;
162 struct rb_node * parent = NULL;
163 struct tree_entry *entry;
167 entry = rb_entry(parent, struct tree_entry, rb_node);
169 if (offset < entry->start)
171 else if (offset > entry->end)
177 entry = rb_entry(node, struct tree_entry, rb_node);
179 rb_link_node(node, parent, p);
180 rb_insert_color(node, root);
184 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
185 struct rb_node **prev_ret)
187 struct rb_node * n = root->rb_node;
188 struct rb_node *prev = NULL;
189 struct tree_entry *entry;
190 struct tree_entry *prev_entry = NULL;
193 entry = rb_entry(n, struct tree_entry, rb_node);
197 if (offset < entry->start)
199 else if (offset > entry->end)
206 while(prev && offset > prev_entry->end) {
207 prev = rb_next(prev);
208 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
216 struct rb_node *prev;
218 ret = __tree_search(root, offset, &prev);
224 static int tree_delete(struct rb_root *root, u64 offset)
226 struct rb_node *node;
227 struct tree_entry *entry;
229 node = __tree_search(root, offset, NULL);
232 entry = rb_entry(node, struct tree_entry, rb_node);
234 rb_erase(node, root);
239 * add_extent_mapping tries a simple backward merge with existing
240 * mappings. The extent_map struct passed in will be inserted into
241 * the tree directly (no copies made, just a reference taken).
243 int add_extent_mapping(struct extent_map_tree *tree,
244 struct extent_map *em)
247 struct extent_map *prev = NULL;
250 write_lock_irq(&tree->lock);
251 rb = tree_insert(&tree->map, em->end, &em->rb_node);
253 prev = rb_entry(rb, struct extent_map, rb_node);
254 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
258 atomic_inc(&em->refs);
259 if (em->start != 0) {
260 rb = rb_prev(&em->rb_node);
262 prev = rb_entry(rb, struct extent_map, rb_node);
263 if (prev && prev->end + 1 == em->start &&
264 ((em->block_start == EXTENT_MAP_HOLE &&
265 prev->block_start == EXTENT_MAP_HOLE) ||
266 (em->block_start == EXTENT_MAP_INLINE &&
267 prev->block_start == EXTENT_MAP_INLINE) ||
268 (em->block_start == EXTENT_MAP_DELALLOC &&
269 prev->block_start == EXTENT_MAP_DELALLOC) ||
270 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
271 em->block_start == prev->block_end + 1))) {
272 em->start = prev->start;
273 em->block_start = prev->block_start;
274 rb_erase(&prev->rb_node, &tree->map);
276 free_extent_map(prev);
280 write_unlock_irq(&tree->lock);
283 EXPORT_SYMBOL(add_extent_mapping);
286 * lookup_extent_mapping returns the first extent_map struct in the
287 * tree that intersects the [start, end] (inclusive) range. There may
288 * be additional objects in the tree that intersect, so check the object
289 * returned carefully to make sure you don't need additional lookups.
291 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
294 struct extent_map *em;
295 struct rb_node *rb_node;
297 read_lock_irq(&tree->lock);
298 rb_node = tree_search(&tree->map, start);
303 if (IS_ERR(rb_node)) {
304 em = ERR_PTR(PTR_ERR(rb_node));
307 em = rb_entry(rb_node, struct extent_map, rb_node);
308 if (em->end < start || em->start > end) {
312 atomic_inc(&em->refs);
314 read_unlock_irq(&tree->lock);
317 EXPORT_SYMBOL(lookup_extent_mapping);
320 * removes an extent_map struct from the tree. No reference counts are
321 * dropped, and no checks are done to see if the range is in use
323 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
327 write_lock_irq(&tree->lock);
328 ret = tree_delete(&tree->map, em->end);
329 write_unlock_irq(&tree->lock);
332 EXPORT_SYMBOL(remove_extent_mapping);
335 * utility function to look for merge candidates inside a given range.
336 * Any extents with matching state are merged together into a single
337 * extent in the tree. Extents with EXTENT_IO in their state field
338 * are not merged because the end_io handlers need to be able to do
339 * operations on them without sleeping (or doing allocations/splits).
341 * This should be called with the tree lock held.
343 static int merge_state(struct extent_map_tree *tree,
344 struct extent_state *state)
346 struct extent_state *other;
347 struct rb_node *other_node;
349 if (state->state & EXTENT_IOBITS)
352 other_node = rb_prev(&state->rb_node);
354 other = rb_entry(other_node, struct extent_state, rb_node);
355 if (other->end == state->start - 1 &&
356 other->state == state->state) {
357 state->start = other->start;
359 rb_erase(&other->rb_node, &tree->state);
360 free_extent_state(other);
363 other_node = rb_next(&state->rb_node);
365 other = rb_entry(other_node, struct extent_state, rb_node);
366 if (other->start == state->end + 1 &&
367 other->state == state->state) {
368 other->start = state->start;
370 rb_erase(&state->rb_node, &tree->state);
371 free_extent_state(state);
378 * insert an extent_state struct into the tree. 'bits' are set on the
379 * struct before it is inserted.
381 * This may return -EEXIST if the extent is already there, in which case the
382 * state struct is freed.
384 * The tree lock is not taken internally. This is a utility function and
385 * probably isn't what you want to call (see set/clear_extent_bit).
387 static int insert_state(struct extent_map_tree *tree,
388 struct extent_state *state, u64 start, u64 end,
391 struct rb_node *node;
394 printk("end < start %Lu %Lu\n", end, start);
397 state->state |= bits;
398 state->start = start;
400 node = tree_insert(&tree->state, end, &state->rb_node);
402 struct extent_state *found;
403 found = rb_entry(node, struct extent_state, rb_node);
404 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
405 free_extent_state(state);
408 merge_state(tree, state);
413 * split a given extent state struct in two, inserting the preallocated
414 * struct 'prealloc' as the newly created second half. 'split' indicates an
415 * offset inside 'orig' where it should be split.
418 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
419 * are two extent state structs in the tree:
420 * prealloc: [orig->start, split - 1]
421 * orig: [ split, orig->end ]
423 * The tree locks are not taken by this function. They need to be held
426 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
427 struct extent_state *prealloc, u64 split)
429 struct rb_node *node;
430 prealloc->start = orig->start;
431 prealloc->end = split - 1;
432 prealloc->state = orig->state;
435 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
437 struct extent_state *found;
438 found = rb_entry(node, struct extent_state, rb_node);
439 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
440 free_extent_state(prealloc);
447 * utility function to clear some bits in an extent state struct.
448 * it will optionally wake up any one waiting on this state (wake == 1), or
449 * forcibly remove the state from the tree (delete == 1).
451 * If no bits are set on the state struct after clearing things, the
452 * struct is freed and removed from the tree
454 static int clear_state_bit(struct extent_map_tree *tree,
455 struct extent_state *state, int bits, int wake,
458 int ret = state->state & bits;
459 state->state &= ~bits;
462 if (delete || state->state == 0) {
463 if (state->in_tree) {
464 rb_erase(&state->rb_node, &tree->state);
466 free_extent_state(state);
471 merge_state(tree, state);
477 * clear some bits on a range in the tree. This may require splitting
478 * or inserting elements in the tree, so the gfp mask is used to
479 * indicate which allocations or sleeping are allowed.
481 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
482 * the given range from the tree regardless of state (ie for truncate).
484 * the range [start, end] is inclusive.
486 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
487 * bits were already set, or zero if none of the bits were already set.
489 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
490 int bits, int wake, int delete, gfp_t mask)
492 struct extent_state *state;
493 struct extent_state *prealloc = NULL;
494 struct rb_node *node;
500 if (!prealloc && (mask & __GFP_WAIT)) {
501 prealloc = alloc_extent_state(mask);
506 write_lock_irqsave(&tree->lock, flags);
508 * this search will find the extents that end after
511 node = tree_search(&tree->state, start);
514 state = rb_entry(node, struct extent_state, rb_node);
515 if (state->start > end)
517 WARN_ON(state->end < start);
520 * | ---- desired range ---- |
522 * | ------------- state -------------- |
524 * We need to split the extent we found, and may flip
525 * bits on second half.
527 * If the extent we found extends past our range, we
528 * just split and search again. It'll get split again
529 * the next time though.
531 * If the extent we found is inside our range, we clear
532 * the desired bit on it.
535 if (state->start < start) {
536 err = split_state(tree, state, prealloc, start);
537 BUG_ON(err == -EEXIST);
541 if (state->end <= end) {
542 start = state->end + 1;
543 set |= clear_state_bit(tree, state, bits,
546 start = state->start;
551 * | ---- desired range ---- |
553 * We need to split the extent, and clear the bit
556 if (state->start <= end && state->end > end) {
557 err = split_state(tree, state, prealloc, end + 1);
558 BUG_ON(err == -EEXIST);
562 set |= clear_state_bit(tree, prealloc, bits,
568 start = state->end + 1;
569 set |= clear_state_bit(tree, state, bits, wake, delete);
573 write_unlock_irqrestore(&tree->lock, flags);
575 free_extent_state(prealloc);
582 write_unlock_irqrestore(&tree->lock, flags);
583 if (mask & __GFP_WAIT)
587 EXPORT_SYMBOL(clear_extent_bit);
589 static int wait_on_state(struct extent_map_tree *tree,
590 struct extent_state *state)
593 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
594 read_unlock_irq(&tree->lock);
596 read_lock_irq(&tree->lock);
597 finish_wait(&state->wq, &wait);
602 * waits for one or more bits to clear on a range in the state tree.
603 * The range [start, end] is inclusive.
604 * The tree lock is taken by this function
606 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
608 struct extent_state *state;
609 struct rb_node *node;
611 read_lock_irq(&tree->lock);
615 * this search will find all the extents that end after
618 node = tree_search(&tree->state, start);
622 state = rb_entry(node, struct extent_state, rb_node);
624 if (state->start > end)
627 if (state->state & bits) {
628 start = state->start;
629 atomic_inc(&state->refs);
630 wait_on_state(tree, state);
631 free_extent_state(state);
634 start = state->end + 1;
639 if (need_resched()) {
640 read_unlock_irq(&tree->lock);
642 read_lock_irq(&tree->lock);
646 read_unlock_irq(&tree->lock);
649 EXPORT_SYMBOL(wait_extent_bit);
652 * set some bits on a range in the tree. This may require allocations
653 * or sleeping, so the gfp mask is used to indicate what is allowed.
655 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
656 * range already has the desired bits set. The start of the existing
657 * range is returned in failed_start in this case.
659 * [start, end] is inclusive
660 * This takes the tree lock.
662 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
663 int exclusive, u64 *failed_start, gfp_t mask)
665 struct extent_state *state;
666 struct extent_state *prealloc = NULL;
667 struct rb_node *node;
674 if (!prealloc && (mask & __GFP_WAIT)) {
675 prealloc = alloc_extent_state(mask);
680 write_lock_irqsave(&tree->lock, flags);
682 * this search will find all the extents that end after
685 node = tree_search(&tree->state, start);
687 err = insert_state(tree, prealloc, start, end, bits);
689 BUG_ON(err == -EEXIST);
693 state = rb_entry(node, struct extent_state, rb_node);
694 last_start = state->start;
695 last_end = state->end;
698 * | ---- desired range ---- |
701 * Just lock what we found and keep going
703 if (state->start == start && state->end <= end) {
704 set = state->state & bits;
705 if (set && exclusive) {
706 *failed_start = state->start;
710 state->state |= bits;
711 start = state->end + 1;
712 merge_state(tree, state);
717 * | ---- desired range ---- |
720 * | ------------- state -------------- |
722 * We need to split the extent we found, and may flip bits on
725 * If the extent we found extends past our
726 * range, we just split and search again. It'll get split
727 * again the next time though.
729 * If the extent we found is inside our range, we set the
732 if (state->start < start) {
733 set = state->state & bits;
734 if (exclusive && set) {
735 *failed_start = start;
739 err = split_state(tree, state, prealloc, start);
740 BUG_ON(err == -EEXIST);
744 if (state->end <= end) {
745 state->state |= bits;
746 start = state->end + 1;
747 merge_state(tree, state);
749 start = state->start;
754 * | ---- desired range ---- |
755 * | state | or | state |
757 * There's a hole, we need to insert something in it and
758 * ignore the extent we found.
760 if (state->start > start) {
762 if (end < last_start)
765 this_end = last_start -1;
766 err = insert_state(tree, prealloc, start, this_end,
769 BUG_ON(err == -EEXIST);
772 start = this_end + 1;
776 * | ---- desired range ---- |
778 * We need to split the extent, and set the bit
781 if (state->start <= end && state->end > end) {
782 set = state->state & bits;
783 if (exclusive && set) {
784 *failed_start = start;
788 err = split_state(tree, state, prealloc, end + 1);
789 BUG_ON(err == -EEXIST);
791 prealloc->state |= bits;
792 merge_state(tree, prealloc);
800 write_unlock_irqrestore(&tree->lock, flags);
802 free_extent_state(prealloc);
809 write_unlock_irqrestore(&tree->lock, flags);
810 if (mask & __GFP_WAIT)
814 EXPORT_SYMBOL(set_extent_bit);
816 /* wrappers around set/clear extent bit */
817 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
820 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
823 EXPORT_SYMBOL(set_extent_dirty);
825 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
826 int bits, gfp_t mask)
828 return set_extent_bit(tree, start, end, bits, 0, NULL,
831 EXPORT_SYMBOL(set_extent_bits);
833 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
834 int bits, gfp_t mask)
836 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
838 EXPORT_SYMBOL(clear_extent_bits);
840 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
843 return set_extent_bit(tree, start, end,
844 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
847 EXPORT_SYMBOL(set_extent_delalloc);
849 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
852 return clear_extent_bit(tree, start, end,
853 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
855 EXPORT_SYMBOL(clear_extent_dirty);
857 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
860 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
863 EXPORT_SYMBOL(set_extent_new);
865 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
868 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
870 EXPORT_SYMBOL(clear_extent_new);
872 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
875 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
878 EXPORT_SYMBOL(set_extent_uptodate);
880 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
883 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
885 EXPORT_SYMBOL(clear_extent_uptodate);
887 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
890 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
893 EXPORT_SYMBOL(set_extent_writeback);
895 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
898 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
900 EXPORT_SYMBOL(clear_extent_writeback);
902 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
904 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
906 EXPORT_SYMBOL(wait_on_extent_writeback);
909 * locks a range in ascending order, waiting for any locked regions
910 * it hits on the way. [start,end] are inclusive, and this will sleep.
912 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
917 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
918 &failed_start, mask);
919 if (err == -EEXIST && (mask & __GFP_WAIT)) {
920 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
921 start = failed_start;
925 WARN_ON(start > end);
929 EXPORT_SYMBOL(lock_extent);
931 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
934 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
936 EXPORT_SYMBOL(unlock_extent);
939 * helper function to set pages and extents in the tree dirty
941 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
943 unsigned long index = start >> PAGE_CACHE_SHIFT;
944 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
947 while (index <= end_index) {
948 page = find_get_page(tree->mapping, index);
950 __set_page_dirty_nobuffers(page);
951 page_cache_release(page);
954 set_extent_dirty(tree, start, end, GFP_NOFS);
957 EXPORT_SYMBOL(set_range_dirty);
960 * helper function to set both pages and extents in the tree writeback
962 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
964 unsigned long index = start >> PAGE_CACHE_SHIFT;
965 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
968 while (index <= end_index) {
969 page = find_get_page(tree->mapping, index);
971 set_page_writeback(page);
972 page_cache_release(page);
975 set_extent_writeback(tree, start, end, GFP_NOFS);
978 EXPORT_SYMBOL(set_range_writeback);
980 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
981 u64 *start_ret, u64 *end_ret, int bits)
983 struct rb_node *node;
984 struct extent_state *state;
987 read_lock_irq(&tree->lock);
989 * this search will find all the extents that end after
992 node = tree_search(&tree->state, start);
993 if (!node || IS_ERR(node)) {
998 state = rb_entry(node, struct extent_state, rb_node);
999 if (state->end >= start && (state->state & bits)) {
1000 *start_ret = state->start;
1001 *end_ret = state->end;
1005 node = rb_next(node);
1010 read_unlock_irq(&tree->lock);
1013 EXPORT_SYMBOL(find_first_extent_bit);
1015 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1016 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1018 struct rb_node *node;
1019 struct extent_state *state;
1020 u64 cur_start = start;
1022 u64 total_bytes = 0;
1024 write_lock_irq(&tree->lock);
1026 * this search will find all the extents that end after
1030 node = tree_search(&tree->state, cur_start);
1031 if (!node || IS_ERR(node)) {
1036 state = rb_entry(node, struct extent_state, rb_node);
1037 if (state->start != cur_start) {
1040 if (!(state->state & EXTENT_DELALLOC)) {
1043 if (state->start >= lock_start) {
1044 if (state->state & EXTENT_LOCKED) {
1046 atomic_inc(&state->refs);
1047 write_unlock_irq(&tree->lock);
1049 write_lock_irq(&tree->lock);
1050 finish_wait(&state->wq, &wait);
1051 free_extent_state(state);
1054 state->state |= EXTENT_LOCKED;
1058 cur_start = state->end + 1;
1059 node = rb_next(node);
1062 total_bytes = state->end - state->start + 1;
1063 if (total_bytes >= max_bytes)
1067 write_unlock_irq(&tree->lock);
1072 * helper function to lock both pages and extents in the tree.
1073 * pages must be locked first.
1075 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1077 unsigned long index = start >> PAGE_CACHE_SHIFT;
1078 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1082 while (index <= end_index) {
1083 page = grab_cache_page(tree->mapping, index);
1089 err = PTR_ERR(page);
1094 lock_extent(tree, start, end, GFP_NOFS);
1099 * we failed above in getting the page at 'index', so we undo here
1100 * up to but not including the page at 'index'
1103 index = start >> PAGE_CACHE_SHIFT;
1104 while (index < end_index) {
1105 page = find_get_page(tree->mapping, index);
1107 page_cache_release(page);
1112 EXPORT_SYMBOL(lock_range);
1115 * helper function to unlock both pages and extents in the tree.
1117 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1119 unsigned long index = start >> PAGE_CACHE_SHIFT;
1120 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1123 while (index <= end_index) {
1124 page = find_get_page(tree->mapping, index);
1126 page_cache_release(page);
1129 unlock_extent(tree, start, end, GFP_NOFS);
1132 EXPORT_SYMBOL(unlock_range);
1134 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1136 struct rb_node *node;
1137 struct extent_state *state;
1140 write_lock_irq(&tree->lock);
1142 * this search will find all the extents that end after
1145 node = tree_search(&tree->state, start);
1146 if (!node || IS_ERR(node)) {
1150 state = rb_entry(node, struct extent_state, rb_node);
1151 if (state->start != start) {
1155 state->private = private;
1157 write_unlock_irq(&tree->lock);
1161 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1163 struct rb_node *node;
1164 struct extent_state *state;
1167 read_lock_irq(&tree->lock);
1169 * this search will find all the extents that end after
1172 node = tree_search(&tree->state, start);
1173 if (!node || IS_ERR(node)) {
1177 state = rb_entry(node, struct extent_state, rb_node);
1178 if (state->start != start) {
1182 *private = state->private;
1184 read_unlock_irq(&tree->lock);
1189 * searches a range in the state tree for a given mask.
1190 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1191 * has the bits set. Otherwise, 1 is returned if any bit in the
1192 * range is found set.
1194 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1195 int bits, int filled)
1197 struct extent_state *state = NULL;
1198 struct rb_node *node;
1201 read_lock_irq(&tree->lock);
1202 node = tree_search(&tree->state, start);
1203 while (node && start <= end) {
1204 state = rb_entry(node, struct extent_state, rb_node);
1205 if (state->start > end)
1208 if (filled && state->start > start) {
1212 if (state->state & bits) {
1216 } else if (filled) {
1220 start = state->end + 1;
1223 node = rb_next(node);
1225 read_unlock_irq(&tree->lock);
1228 EXPORT_SYMBOL(test_range_bit);
1231 * helper function to set a given page up to date if all the
1232 * extents in the tree for that page are up to date
1234 static int check_page_uptodate(struct extent_map_tree *tree,
1237 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1238 u64 end = start + PAGE_CACHE_SIZE - 1;
1239 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1240 SetPageUptodate(page);
1245 * helper function to unlock a page if all the extents in the tree
1246 * for that page are unlocked
1248 static int check_page_locked(struct extent_map_tree *tree,
1251 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1252 u64 end = start + PAGE_CACHE_SIZE - 1;
1253 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1259 * helper function to end page writeback if all the extents
1260 * in the tree for that page are done with writeback
1262 static int check_page_writeback(struct extent_map_tree *tree,
1265 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1266 u64 end = start + PAGE_CACHE_SIZE - 1;
1267 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1268 end_page_writeback(page);
1272 /* lots and lots of room for performance fixes in the end_bio funcs */
1275 * after a writepage IO is done, we need to:
1276 * clear the uptodate bits on error
1277 * clear the writeback bits in the extent tree for this IO
1278 * end_page_writeback if the page has no more pending IO
1280 * Scheduling is not allowed, so the extent state tree is expected
1281 * to have one and only one object corresponding to this IO.
1283 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1284 static void end_bio_extent_writepage(struct bio *bio, int err)
1286 static int end_bio_extent_writepage(struct bio *bio,
1287 unsigned int bytes_done, int err)
1290 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1291 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1292 struct extent_map_tree *tree = bio->bi_private;
1297 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1303 struct page *page = bvec->bv_page;
1304 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1306 end = start + bvec->bv_len - 1;
1308 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1313 if (--bvec >= bio->bi_io_vec)
1314 prefetchw(&bvec->bv_page->flags);
1317 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1318 ClearPageUptodate(page);
1321 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1324 end_page_writeback(page);
1326 check_page_writeback(tree, page);
1327 if (tree->ops && tree->ops->writepage_end_io_hook)
1328 tree->ops->writepage_end_io_hook(page, start, end);
1329 } while (bvec >= bio->bi_io_vec);
1332 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1338 * after a readpage IO is done, we need to:
1339 * clear the uptodate bits on error
1340 * set the uptodate bits if things worked
1341 * set the page up to date if all extents in the tree are uptodate
1342 * clear the lock bit in the extent tree
1343 * unlock the page if there are no other extents locked for it
1345 * Scheduling is not allowed, so the extent state tree is expected
1346 * to have one and only one object corresponding to this IO.
1348 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1349 static void end_bio_extent_readpage(struct bio *bio, int err)
1351 static int end_bio_extent_readpage(struct bio *bio,
1352 unsigned int bytes_done, int err)
1355 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1356 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1357 struct extent_map_tree *tree = bio->bi_private;
1363 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1369 struct page *page = bvec->bv_page;
1370 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1372 end = start + bvec->bv_len - 1;
1374 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1379 if (--bvec >= bio->bi_io_vec)
1380 prefetchw(&bvec->bv_page->flags);
1382 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1383 ret = tree->ops->readpage_end_io_hook(page, start, end);
1388 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1390 SetPageUptodate(page);
1392 check_page_uptodate(tree, page);
1394 ClearPageUptodate(page);
1398 unlock_extent(tree, start, end, GFP_ATOMIC);
1403 check_page_locked(tree, page);
1404 } while (bvec >= bio->bi_io_vec);
1407 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1413 * IO done from prepare_write is pretty simple, we just unlock
1414 * the structs in the extent tree when done, and set the uptodate bits
1417 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1418 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1420 static int end_bio_extent_preparewrite(struct bio *bio,
1421 unsigned int bytes_done, int err)
1424 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1425 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1426 struct extent_map_tree *tree = bio->bi_private;
1430 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1436 struct page *page = bvec->bv_page;
1437 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1439 end = start + bvec->bv_len - 1;
1441 if (--bvec >= bio->bi_io_vec)
1442 prefetchw(&bvec->bv_page->flags);
1445 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1447 ClearPageUptodate(page);
1451 unlock_extent(tree, start, end, GFP_ATOMIC);
1453 } while (bvec >= bio->bi_io_vec);
1456 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1461 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1462 struct page *page, sector_t sector,
1463 size_t size, unsigned long offset,
1464 struct block_device *bdev,
1465 bio_end_io_t end_io_func)
1470 bio = bio_alloc(GFP_NOIO, 1);
1472 bio->bi_sector = sector;
1473 bio->bi_bdev = bdev;
1474 bio->bi_io_vec[0].bv_page = page;
1475 bio->bi_io_vec[0].bv_len = size;
1476 bio->bi_io_vec[0].bv_offset = offset;
1480 bio->bi_size = size;
1482 bio->bi_end_io = end_io_func;
1483 bio->bi_private = tree;
1486 submit_bio(rw, bio);
1488 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1495 void set_page_extent_mapped(struct page *page)
1497 if (!PagePrivate(page)) {
1498 SetPagePrivate(page);
1499 WARN_ON(!page->mapping->a_ops->invalidatepage);
1500 set_page_private(page, EXTENT_PAGE_PRIVATE);
1501 page_cache_get(page);
1506 * basic readpage implementation. Locked extent state structs are inserted
1507 * into the tree that are removed when the IO is done (by the end_io
1510 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1511 get_extent_t *get_extent)
1513 struct inode *inode = page->mapping->host;
1514 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1515 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1519 u64 last_byte = i_size_read(inode);
1523 struct extent_map *em;
1524 struct block_device *bdev;
1527 size_t page_offset = 0;
1529 size_t blocksize = inode->i_sb->s_blocksize;
1531 set_page_extent_mapped(page);
1534 lock_extent(tree, start, end, GFP_NOFS);
1536 while (cur <= end) {
1537 if (cur >= last_byte) {
1538 iosize = PAGE_CACHE_SIZE - page_offset;
1539 zero_user_page(page, page_offset, iosize, KM_USER0);
1540 set_extent_uptodate(tree, cur, cur + iosize - 1,
1542 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1545 em = get_extent(inode, page, page_offset, cur, end, 0);
1546 if (IS_ERR(em) || !em) {
1548 unlock_extent(tree, cur, end, GFP_NOFS);
1552 extent_offset = cur - em->start;
1553 BUG_ON(em->end < cur);
1556 iosize = min(em->end - cur, end - cur) + 1;
1557 cur_end = min(em->end, end);
1558 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1559 sector = (em->block_start + extent_offset) >> 9;
1561 block_start = em->block_start;
1562 free_extent_map(em);
1565 /* we've found a hole, just zero and go on */
1566 if (block_start == EXTENT_MAP_HOLE) {
1567 zero_user_page(page, page_offset, iosize, KM_USER0);
1568 set_extent_uptodate(tree, cur, cur + iosize - 1,
1570 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1572 page_offset += iosize;
1575 /* the get_extent function already copied into the page */
1576 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1577 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1579 page_offset += iosize;
1584 if (tree->ops && tree->ops->readpage_io_hook) {
1585 ret = tree->ops->readpage_io_hook(page, cur,
1589 ret = submit_extent_page(READ, tree, page,
1590 sector, iosize, page_offset,
1591 bdev, end_bio_extent_readpage);
1596 page_offset += iosize;
1600 if (!PageError(page))
1601 SetPageUptodate(page);
1606 EXPORT_SYMBOL(extent_read_full_page);
1609 * the writepage semantics are similar to regular writepage. extent
1610 * records are inserted to lock ranges in the tree, and as dirty areas
1611 * are found, they are marked writeback. Then the lock bits are removed
1612 * and the end_io handler clears the writeback ranges
1614 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1615 get_extent_t *get_extent,
1616 struct writeback_control *wbc)
1618 struct inode *inode = page->mapping->host;
1619 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1620 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1624 u64 last_byte = i_size_read(inode);
1628 struct extent_map *em;
1629 struct block_device *bdev;
1632 size_t page_offset = 0;
1634 loff_t i_size = i_size_read(inode);
1635 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1639 WARN_ON(!PageLocked(page));
1640 if (page->index > end_index) {
1641 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1646 if (page->index == end_index) {
1647 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1648 zero_user_page(page, offset,
1649 PAGE_CACHE_SIZE - offset, KM_USER0);
1652 set_page_extent_mapped(page);
1654 lock_extent(tree, start, page_end, GFP_NOFS);
1655 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1659 tree->ops->fill_delalloc(inode, start, delalloc_end);
1660 if (delalloc_end >= page_end + 1) {
1661 clear_extent_bit(tree, page_end + 1, delalloc_end,
1662 EXTENT_LOCKED | EXTENT_DELALLOC,
1665 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1667 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1668 printk("found delalloc bits after clear extent_bit\n");
1670 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1671 printk("found delalloc bits after find_delalloc_range returns 0\n");
1675 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1676 printk("found delalloc bits after lock_extent\n");
1679 if (last_byte <= start) {
1680 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1684 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1685 blocksize = inode->i_sb->s_blocksize;
1687 while (cur <= end) {
1688 if (cur >= last_byte) {
1689 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1692 em = get_extent(inode, page, page_offset, cur, end, 1);
1693 if (IS_ERR(em) || !em) {
1698 extent_offset = cur - em->start;
1699 BUG_ON(em->end < cur);
1701 iosize = min(em->end - cur, end - cur) + 1;
1702 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1703 sector = (em->block_start + extent_offset) >> 9;
1705 block_start = em->block_start;
1706 free_extent_map(em);
1709 if (block_start == EXTENT_MAP_HOLE ||
1710 block_start == EXTENT_MAP_INLINE) {
1711 clear_extent_dirty(tree, cur,
1712 cur + iosize - 1, GFP_NOFS);
1714 page_offset += iosize;
1718 /* leave this out until we have a page_mkwrite call */
1719 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1722 page_offset += iosize;
1725 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1726 if (tree->ops && tree->ops->writepage_io_hook) {
1727 ret = tree->ops->writepage_io_hook(page, cur,
1735 set_range_writeback(tree, cur, cur + iosize - 1);
1736 ret = submit_extent_page(WRITE, tree, page, sector,
1737 iosize, page_offset, bdev,
1738 end_bio_extent_writepage);
1743 page_offset += iosize;
1747 unlock_extent(tree, start, page_end, GFP_NOFS);
1751 EXPORT_SYMBOL(extent_write_full_page);
1754 * basic invalidatepage code, this waits on any locked or writeback
1755 * ranges corresponding to the page, and then deletes any extent state
1756 * records from the tree
1758 int extent_invalidatepage(struct extent_map_tree *tree,
1759 struct page *page, unsigned long offset)
1761 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1762 u64 end = start + PAGE_CACHE_SIZE - 1;
1763 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1765 start += (offset + blocksize -1) & ~(blocksize - 1);
1769 lock_extent(tree, start, end, GFP_NOFS);
1770 wait_on_extent_writeback(tree, start, end);
1771 clear_extent_bit(tree, start, end,
1772 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1776 EXPORT_SYMBOL(extent_invalidatepage);
1779 * simple commit_write call, set_range_dirty is used to mark both
1780 * the pages and the extent records as dirty
1782 int extent_commit_write(struct extent_map_tree *tree,
1783 struct inode *inode, struct page *page,
1784 unsigned from, unsigned to)
1786 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1788 set_page_extent_mapped(page);
1789 set_page_dirty(page);
1791 if (pos > inode->i_size) {
1792 i_size_write(inode, pos);
1793 mark_inode_dirty(inode);
1797 EXPORT_SYMBOL(extent_commit_write);
1799 int extent_prepare_write(struct extent_map_tree *tree,
1800 struct inode *inode, struct page *page,
1801 unsigned from, unsigned to, get_extent_t *get_extent)
1803 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1804 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1806 u64 orig_block_start;
1809 struct extent_map *em;
1810 unsigned blocksize = 1 << inode->i_blkbits;
1811 size_t page_offset = 0;
1812 size_t block_off_start;
1813 size_t block_off_end;
1819 set_page_extent_mapped(page);
1821 block_start = (page_start + from) & ~((u64)blocksize - 1);
1822 block_end = (page_start + to - 1) | (blocksize - 1);
1823 orig_block_start = block_start;
1825 lock_extent(tree, page_start, page_end, GFP_NOFS);
1826 while(block_start <= block_end) {
1827 em = get_extent(inode, page, page_offset, block_start,
1829 if (IS_ERR(em) || !em) {
1832 cur_end = min(block_end, em->end);
1833 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1834 block_off_end = block_off_start + blocksize;
1835 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1837 if (!PageUptodate(page) && isnew &&
1838 (block_off_end > to || block_off_start < from)) {
1841 kaddr = kmap_atomic(page, KM_USER0);
1842 if (block_off_end > to)
1843 memset(kaddr + to, 0, block_off_end - to);
1844 if (block_off_start < from)
1845 memset(kaddr + block_off_start, 0,
1846 from - block_off_start);
1847 flush_dcache_page(page);
1848 kunmap_atomic(kaddr, KM_USER0);
1850 if (!isnew && !PageUptodate(page) &&
1851 (block_off_end > to || block_off_start < from) &&
1852 !test_range_bit(tree, block_start, cur_end,
1853 EXTENT_UPTODATE, 1)) {
1855 u64 extent_offset = block_start - em->start;
1857 sector = (em->block_start + extent_offset) >> 9;
1858 iosize = (cur_end - block_start + blocksize - 1) &
1859 ~((u64)blocksize - 1);
1861 * we've already got the extent locked, but we
1862 * need to split the state such that our end_bio
1863 * handler can clear the lock.
1865 set_extent_bit(tree, block_start,
1866 block_start + iosize - 1,
1867 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1868 ret = submit_extent_page(READ, tree, page,
1869 sector, iosize, page_offset, em->bdev,
1870 end_bio_extent_preparewrite);
1872 block_start = block_start + iosize;
1874 set_extent_uptodate(tree, block_start, cur_end,
1876 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1877 block_start = cur_end + 1;
1879 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1880 free_extent_map(em);
1883 wait_extent_bit(tree, orig_block_start,
1884 block_end, EXTENT_LOCKED);
1886 check_page_uptodate(tree, page);
1888 /* FIXME, zero out newly allocated blocks on error */
1891 EXPORT_SYMBOL(extent_prepare_write);
1894 * a helper for releasepage. As long as there are no locked extents
1895 * in the range corresponding to the page, both state records and extent
1896 * map records are removed
1898 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1900 struct extent_map *em;
1901 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1902 u64 end = start + PAGE_CACHE_SIZE - 1;
1903 u64 orig_start = start;
1906 while (start <= end) {
1907 em = lookup_extent_mapping(tree, start, end);
1908 if (!em || IS_ERR(em))
1910 if (!test_range_bit(tree, em->start, em->end,
1911 EXTENT_LOCKED, 0)) {
1912 remove_extent_mapping(tree, em);
1913 /* once for the rb tree */
1914 free_extent_map(em);
1916 start = em->end + 1;
1918 free_extent_map(em);
1920 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1923 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1927 EXPORT_SYMBOL(try_release_extent_mapping);
1929 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1930 get_extent_t *get_extent)
1932 struct inode *inode = mapping->host;
1933 u64 start = iblock << inode->i_blkbits;
1934 u64 end = start + (1 << inode->i_blkbits) - 1;
1935 sector_t sector = 0;
1936 struct extent_map *em;
1938 em = get_extent(inode, NULL, 0, start, end, 0);
1939 if (!em || IS_ERR(em))
1942 if (em->block_start == EXTENT_MAP_INLINE ||
1943 em->block_start == EXTENT_MAP_HOLE)
1946 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
1948 free_extent_map(em);
1952 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
1954 if (list_empty(&eb->lru)) {
1955 extent_buffer_get(eb);
1956 list_add(&eb->lru, &tree->buffer_lru);
1958 if (tree->lru_size >= BUFFER_LRU_MAX) {
1959 struct extent_buffer *rm;
1960 rm = list_entry(tree->buffer_lru.prev,
1961 struct extent_buffer, lru);
1964 free_extent_buffer(rm);
1967 list_move(&eb->lru, &tree->buffer_lru);
1970 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
1971 u64 start, unsigned long len)
1973 struct list_head *lru = &tree->buffer_lru;
1974 struct list_head *cur = lru->next;
1975 struct extent_buffer *eb;
1977 if (list_empty(lru))
1981 eb = list_entry(cur, struct extent_buffer, lru);
1982 if (eb->start == start && eb->len == len) {
1983 extent_buffer_get(eb);
1987 } while (cur != lru);
1991 static inline unsigned long num_extent_pages(u64 start, u64 len)
1993 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1994 (start >> PAGE_CACHE_SHIFT);
1997 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2001 struct address_space *mapping;
2004 return eb->first_page;
2005 i += eb->start >> PAGE_CACHE_SHIFT;
2006 mapping = eb->first_page->mapping;
2007 read_lock_irq(&mapping->tree_lock);
2008 p = radix_tree_lookup(&mapping->page_tree, i);
2009 read_unlock_irq(&mapping->tree_lock);
2013 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2018 struct extent_buffer *eb = NULL;
2020 spin_lock(&tree->lru_lock);
2021 eb = find_lru(tree, start, len);
2025 spin_unlock(&tree->lru_lock);
2028 memset(eb, 0, sizeof(*eb));
2030 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2032 INIT_LIST_HEAD(&eb->lru);
2035 atomic_set(&eb->refs, 1);
2037 spin_lock(&tree->lru_lock);
2040 spin_unlock(&tree->lru_lock);
2044 static void __free_extent_buffer(struct extent_buffer *eb)
2046 kmem_cache_free(extent_buffer_cache, eb);
2049 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2050 u64 start, unsigned long len,
2054 unsigned long num_pages = num_extent_pages(start, len);
2056 unsigned long index = start >> PAGE_CACHE_SHIFT;
2057 struct extent_buffer *eb;
2059 struct address_space *mapping = tree->mapping;
2062 eb = __alloc_extent_buffer(tree, start, len, mask);
2063 if (!eb || IS_ERR(eb))
2066 if (eb->flags & EXTENT_BUFFER_FILLED)
2070 eb->first_page = page0;
2073 page_cache_get(page0);
2074 mark_page_accessed(page0);
2075 set_page_extent_mapped(page0);
2076 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2081 for (; i < num_pages; i++, index++) {
2082 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2085 /* make sure the free only frees the pages we've
2086 * grabbed a reference on
2088 eb->len = i << PAGE_CACHE_SHIFT;
2089 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2092 set_page_extent_mapped(p);
2093 mark_page_accessed(p);
2096 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2099 set_page_private(p, EXTENT_PAGE_PRIVATE);
2101 if (!PageUptodate(p))
2106 eb->flags |= EXTENT_UPTODATE;
2107 eb->flags |= EXTENT_BUFFER_FILLED;
2110 free_extent_buffer(eb);
2113 EXPORT_SYMBOL(alloc_extent_buffer);
2115 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2116 u64 start, unsigned long len,
2119 unsigned long num_pages = num_extent_pages(start, len);
2120 unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT;
2121 struct extent_buffer *eb;
2123 struct address_space *mapping = tree->mapping;
2126 eb = __alloc_extent_buffer(tree, start, len, mask);
2127 if (!eb || IS_ERR(eb))
2130 if (eb->flags & EXTENT_BUFFER_FILLED)
2133 for (i = 0; i < num_pages; i++, index++) {
2134 p = find_lock_page(mapping, index);
2136 /* make sure the free only frees the pages we've
2137 * grabbed a reference on
2139 eb->len = i << PAGE_CACHE_SHIFT;
2140 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2143 set_page_extent_mapped(p);
2144 mark_page_accessed(p);
2148 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2151 set_page_private(p, EXTENT_PAGE_PRIVATE);
2154 if (!PageUptodate(p))
2159 eb->flags |= EXTENT_UPTODATE;
2160 eb->flags |= EXTENT_BUFFER_FILLED;
2163 free_extent_buffer(eb);
2166 EXPORT_SYMBOL(find_extent_buffer);
2168 void free_extent_buffer(struct extent_buffer *eb)
2171 unsigned long num_pages;
2176 if (!atomic_dec_and_test(&eb->refs))
2179 num_pages = num_extent_pages(eb->start, eb->len);
2181 for (i = 0; i < num_pages; i++) {
2182 page_cache_release(extent_buffer_page(eb, i));
2184 __free_extent_buffer(eb);
2186 EXPORT_SYMBOL(free_extent_buffer);
2188 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2189 struct extent_buffer *eb)
2193 unsigned long num_pages;
2196 u64 start = eb->start;
2197 u64 end = start + eb->len - 1;
2199 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2200 num_pages = num_extent_pages(eb->start, eb->len);
2202 for (i = 0; i < num_pages; i++) {
2203 page = extent_buffer_page(eb, i);
2206 * if we're on the last page or the first page and the
2207 * block isn't aligned on a page boundary, do extra checks
2208 * to make sure we don't clean page that is partially dirty
2210 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2211 ((i == num_pages - 1) &&
2212 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2213 start = (u64)page->index << PAGE_CACHE_SHIFT;
2214 end = start + PAGE_CACHE_SIZE - 1;
2215 if (test_range_bit(tree, start, end,
2221 clear_page_dirty_for_io(page);
2226 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2228 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2229 struct extent_buffer *eb)
2231 return wait_on_extent_writeback(tree, eb->start,
2232 eb->start + eb->len - 1);
2234 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2236 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2237 struct extent_buffer *eb)
2240 unsigned long num_pages;
2242 num_pages = num_extent_pages(eb->start, eb->len);
2243 for (i = 0; i < num_pages; i++) {
2244 struct page *page = extent_buffer_page(eb, i);
2245 /* writepage may need to do something special for the
2246 * first page, we have to make sure page->private is
2247 * properly set. releasepage may drop page->private
2248 * on us if the page isn't already dirty.
2252 set_page_private(page,
2253 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2256 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2260 return set_extent_dirty(tree, eb->start,
2261 eb->start + eb->len - 1, GFP_NOFS);
2263 EXPORT_SYMBOL(set_extent_buffer_dirty);
2265 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2266 struct extent_buffer *eb)
2270 unsigned long num_pages;
2272 num_pages = num_extent_pages(eb->start, eb->len);
2274 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2276 for (i = 0; i < num_pages; i++) {
2277 page = extent_buffer_page(eb, i);
2278 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2279 ((i == num_pages - 1) &&
2280 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2281 check_page_uptodate(tree, page);
2284 SetPageUptodate(page);
2288 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2290 int extent_buffer_uptodate(struct extent_map_tree *tree,
2291 struct extent_buffer *eb)
2293 if (eb->flags & EXTENT_UPTODATE)
2295 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2296 EXTENT_UPTODATE, 1);
2298 EXPORT_SYMBOL(extent_buffer_uptodate);
2300 int read_extent_buffer_pages(struct extent_map_tree *tree,
2301 struct extent_buffer *eb,
2306 unsigned long start_i;
2310 unsigned long num_pages;
2312 if (eb->flags & EXTENT_UPTODATE)
2315 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2316 EXTENT_UPTODATE, 1)) {
2320 WARN_ON(start < eb->start);
2321 start_i = (start >> PAGE_CACHE_SHIFT) -
2322 (eb->start >> PAGE_CACHE_SHIFT);
2327 num_pages = num_extent_pages(eb->start, eb->len);
2328 for (i = start_i; i < num_pages; i++) {
2329 page = extent_buffer_page(eb, i);
2330 if (PageUptodate(page)) {
2334 if (TestSetPageLocked(page)) {
2340 if (!PageUptodate(page)) {
2341 err = page->mapping->a_ops->readpage(NULL, page);
2354 for (i = start_i; i < num_pages; i++) {
2355 page = extent_buffer_page(eb, i);
2356 wait_on_page_locked(page);
2357 if (!PageUptodate(page)) {
2362 eb->flags |= EXTENT_UPTODATE;
2365 EXPORT_SYMBOL(read_extent_buffer_pages);
2367 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2368 unsigned long start,
2375 char *dst = (char *)dstv;
2376 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2377 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2378 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2380 WARN_ON(start > eb->len);
2381 WARN_ON(start + len > eb->start + eb->len);
2383 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2386 page = extent_buffer_page(eb, i);
2387 if (!PageUptodate(page)) {
2388 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2391 WARN_ON(!PageUptodate(page));
2393 cur = min(len, (PAGE_CACHE_SIZE - offset));
2394 kaddr = kmap_atomic(page, KM_USER1);
2395 memcpy(dst, kaddr + offset, cur);
2396 kunmap_atomic(kaddr, KM_USER1);
2404 EXPORT_SYMBOL(read_extent_buffer);
2406 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2407 unsigned long min_len, char **token, char **map,
2408 unsigned long *map_start,
2409 unsigned long *map_len, int km)
2411 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2414 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2415 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2416 unsigned long end_i = (start_offset + start + min_len - 1) >>
2423 offset = start_offset;
2427 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2429 if (start + min_len > eb->len) {
2430 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2434 p = extent_buffer_page(eb, i);
2435 WARN_ON(!PageUptodate(p));
2436 kaddr = kmap_atomic(p, km);
2438 *map = kaddr + offset;
2439 *map_len = PAGE_CACHE_SIZE - offset;
2442 EXPORT_SYMBOL(map_private_extent_buffer);
2444 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2445 unsigned long min_len,
2446 char **token, char **map,
2447 unsigned long *map_start,
2448 unsigned long *map_len, int km)
2452 if (eb->map_token) {
2453 unmap_extent_buffer(eb, eb->map_token, km);
2454 eb->map_token = NULL;
2457 err = map_private_extent_buffer(eb, start, min_len, token, map,
2458 map_start, map_len, km);
2460 eb->map_token = *token;
2462 eb->map_start = *map_start;
2463 eb->map_len = *map_len;
2467 EXPORT_SYMBOL(map_extent_buffer);
2469 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2471 kunmap_atomic(token, km);
2473 EXPORT_SYMBOL(unmap_extent_buffer);
2475 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2476 unsigned long start,
2483 char *ptr = (char *)ptrv;
2484 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2485 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2488 WARN_ON(start > eb->len);
2489 WARN_ON(start + len > eb->start + eb->len);
2491 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2494 page = extent_buffer_page(eb, i);
2495 WARN_ON(!PageUptodate(page));
2497 cur = min(len, (PAGE_CACHE_SIZE - offset));
2499 kaddr = kmap_atomic(page, KM_USER0);
2500 ret = memcmp(ptr, kaddr + offset, cur);
2501 kunmap_atomic(kaddr, KM_USER0);
2512 EXPORT_SYMBOL(memcmp_extent_buffer);
2514 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2515 unsigned long start, unsigned long len)
2521 char *src = (char *)srcv;
2522 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2523 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2525 WARN_ON(start > eb->len);
2526 WARN_ON(start + len > eb->start + eb->len);
2528 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2531 page = extent_buffer_page(eb, i);
2532 WARN_ON(!PageUptodate(page));
2534 cur = min(len, PAGE_CACHE_SIZE - offset);
2535 kaddr = kmap_atomic(page, KM_USER1);
2536 memcpy(kaddr + offset, src, cur);
2537 kunmap_atomic(kaddr, KM_USER1);
2545 EXPORT_SYMBOL(write_extent_buffer);
2547 void memset_extent_buffer(struct extent_buffer *eb, char c,
2548 unsigned long start, unsigned long len)
2554 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2555 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2557 WARN_ON(start > eb->len);
2558 WARN_ON(start + len > eb->start + eb->len);
2560 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2563 page = extent_buffer_page(eb, i);
2564 WARN_ON(!PageUptodate(page));
2566 cur = min(len, PAGE_CACHE_SIZE - offset);
2567 kaddr = kmap_atomic(page, KM_USER0);
2568 memset(kaddr + offset, c, cur);
2569 kunmap_atomic(kaddr, KM_USER0);
2576 EXPORT_SYMBOL(memset_extent_buffer);
2578 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2579 unsigned long dst_offset, unsigned long src_offset,
2582 u64 dst_len = dst->len;
2587 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2588 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2590 WARN_ON(src->len != dst_len);
2592 offset = (start_offset + dst_offset) &
2593 ((unsigned long)PAGE_CACHE_SIZE - 1);
2596 page = extent_buffer_page(dst, i);
2597 WARN_ON(!PageUptodate(page));
2599 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2601 kaddr = kmap_atomic(page, KM_USER0);
2602 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2603 kunmap_atomic(kaddr, KM_USER0);
2611 EXPORT_SYMBOL(copy_extent_buffer);
2613 static void move_pages(struct page *dst_page, struct page *src_page,
2614 unsigned long dst_off, unsigned long src_off,
2617 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2618 if (dst_page == src_page) {
2619 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2621 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2622 char *p = dst_kaddr + dst_off + len;
2623 char *s = src_kaddr + src_off + len;
2628 kunmap_atomic(src_kaddr, KM_USER1);
2630 kunmap_atomic(dst_kaddr, KM_USER0);
2633 static void copy_pages(struct page *dst_page, struct page *src_page,
2634 unsigned long dst_off, unsigned long src_off,
2637 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2640 if (dst_page != src_page)
2641 src_kaddr = kmap_atomic(src_page, KM_USER1);
2643 src_kaddr = dst_kaddr;
2645 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2646 kunmap_atomic(dst_kaddr, KM_USER0);
2647 if (dst_page != src_page)
2648 kunmap_atomic(src_kaddr, KM_USER1);
2651 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2652 unsigned long src_offset, unsigned long len)
2655 size_t dst_off_in_page;
2656 size_t src_off_in_page;
2657 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2658 unsigned long dst_i;
2659 unsigned long src_i;
2661 if (src_offset + len > dst->len) {
2662 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2663 src_offset, len, dst->len);
2666 if (dst_offset + len > dst->len) {
2667 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2668 dst_offset, len, dst->len);
2673 dst_off_in_page = (start_offset + dst_offset) &
2674 ((unsigned long)PAGE_CACHE_SIZE - 1);
2675 src_off_in_page = (start_offset + src_offset) &
2676 ((unsigned long)PAGE_CACHE_SIZE - 1);
2678 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2679 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2681 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2683 cur = min_t(unsigned long, cur,
2684 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2686 copy_pages(extent_buffer_page(dst, dst_i),
2687 extent_buffer_page(dst, src_i),
2688 dst_off_in_page, src_off_in_page, cur);
2695 EXPORT_SYMBOL(memcpy_extent_buffer);
2697 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2698 unsigned long src_offset, unsigned long len)
2701 size_t dst_off_in_page;
2702 size_t src_off_in_page;
2703 unsigned long dst_end = dst_offset + len - 1;
2704 unsigned long src_end = src_offset + len - 1;
2705 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2706 unsigned long dst_i;
2707 unsigned long src_i;
2709 if (src_offset + len > dst->len) {
2710 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2711 src_offset, len, dst->len);
2714 if (dst_offset + len > dst->len) {
2715 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2716 dst_offset, len, dst->len);
2719 if (dst_offset < src_offset) {
2720 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2724 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2725 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2727 dst_off_in_page = (start_offset + dst_end) &
2728 ((unsigned long)PAGE_CACHE_SIZE - 1);
2729 src_off_in_page = (start_offset + src_end) &
2730 ((unsigned long)PAGE_CACHE_SIZE - 1);
2732 cur = min_t(unsigned long, len, src_off_in_page + 1);
2733 cur = min(cur, dst_off_in_page + 1);
2734 move_pages(extent_buffer_page(dst, dst_i),
2735 extent_buffer_page(dst, src_i),
2736 dst_off_in_page - cur + 1,
2737 src_off_in_page - cur + 1, cur);
2744 EXPORT_SYMBOL(memmove_extent_buffer);