1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
37 struct rb_node rb_node;
40 struct extent_page_data {
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
46 int __init extent_map_init(void)
48 extent_map_cache = btrfs_cache_create("extent_map",
49 sizeof(struct extent_map), 0,
51 if (!extent_map_cache)
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
56 if (!extent_state_cache)
58 extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 sizeof(struct extent_buffer), 0,
61 if (!extent_buffer_cache)
62 goto free_state_cache;
66 kmem_cache_destroy(extent_state_cache);
68 kmem_cache_destroy(extent_map_cache);
72 void extent_map_exit(void)
74 struct extent_state *state;
76 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 list_del(&state->list);
80 kmem_cache_free(extent_state_cache, state);
85 kmem_cache_destroy(extent_map_cache);
86 if (extent_state_cache)
87 kmem_cache_destroy(extent_state_cache);
88 if (extent_buffer_cache)
89 kmem_cache_destroy(extent_buffer_cache);
92 void extent_map_tree_init(struct extent_map_tree *tree,
93 struct address_space *mapping, gfp_t mask)
95 tree->map.rb_node = NULL;
96 tree->state.rb_node = NULL;
98 tree->dirty_bytes = 0;
99 rwlock_init(&tree->lock);
100 spin_lock_init(&tree->lru_lock);
101 tree->mapping = mapping;
102 INIT_LIST_HEAD(&tree->buffer_lru);
105 EXPORT_SYMBOL(extent_map_tree_init);
107 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
109 struct extent_buffer *eb;
110 while(!list_empty(&tree->buffer_lru)) {
111 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
113 list_del_init(&eb->lru);
114 free_extent_buffer(eb);
117 EXPORT_SYMBOL(extent_map_tree_empty_lru);
119 struct extent_map *alloc_extent_map(gfp_t mask)
121 struct extent_map *em;
122 em = kmem_cache_alloc(extent_map_cache, mask);
123 if (!em || IS_ERR(em))
126 atomic_set(&em->refs, 1);
129 EXPORT_SYMBOL(alloc_extent_map);
131 void free_extent_map(struct extent_map *em)
135 if (atomic_dec_and_test(&em->refs)) {
136 WARN_ON(em->in_tree);
137 kmem_cache_free(extent_map_cache, em);
140 EXPORT_SYMBOL(free_extent_map);
143 struct extent_state *alloc_extent_state(gfp_t mask)
145 struct extent_state *state;
148 state = kmem_cache_alloc(extent_state_cache, mask);
149 if (!state || IS_ERR(state))
155 spin_lock_irqsave(&state_lock, flags);
156 list_add(&state->list, &states);
157 spin_unlock_irqrestore(&state_lock, flags);
159 atomic_set(&state->refs, 1);
160 init_waitqueue_head(&state->wq);
163 EXPORT_SYMBOL(alloc_extent_state);
165 void free_extent_state(struct extent_state *state)
170 if (atomic_dec_and_test(&state->refs)) {
171 WARN_ON(state->in_tree);
172 spin_lock_irqsave(&state_lock, flags);
173 list_del(&state->list);
174 spin_unlock_irqrestore(&state_lock, flags);
175 kmem_cache_free(extent_state_cache, state);
178 EXPORT_SYMBOL(free_extent_state);
180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 struct rb_node *node)
183 struct rb_node ** p = &root->rb_node;
184 struct rb_node * parent = NULL;
185 struct tree_entry *entry;
189 entry = rb_entry(parent, struct tree_entry, rb_node);
191 if (offset < entry->start)
193 else if (offset > entry->end)
199 entry = rb_entry(node, struct tree_entry, rb_node);
201 rb_link_node(node, parent, p);
202 rb_insert_color(node, root);
206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207 struct rb_node **prev_ret)
209 struct rb_node * n = root->rb_node;
210 struct rb_node *prev = NULL;
211 struct tree_entry *entry;
212 struct tree_entry *prev_entry = NULL;
215 entry = rb_entry(n, struct tree_entry, rb_node);
219 if (offset < entry->start)
221 else if (offset > entry->end)
228 while(prev && offset > prev_entry->end) {
229 prev = rb_next(prev);
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
238 struct rb_node *prev;
240 ret = __tree_search(root, offset, &prev);
246 static int tree_delete(struct rb_root *root, u64 offset)
248 struct rb_node *node;
249 struct tree_entry *entry;
251 node = __tree_search(root, offset, NULL);
254 entry = rb_entry(node, struct tree_entry, rb_node);
256 rb_erase(node, root);
261 * add_extent_mapping tries a simple backward merge with existing
262 * mappings. The extent_map struct passed in will be inserted into
263 * the tree directly (no copies made, just a reference taken).
265 int add_extent_mapping(struct extent_map_tree *tree,
266 struct extent_map *em)
269 struct extent_map *prev = NULL;
272 write_lock_irq(&tree->lock);
273 rb = tree_insert(&tree->map, em->end, &em->rb_node);
275 prev = rb_entry(rb, struct extent_map, rb_node);
276 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
280 atomic_inc(&em->refs);
281 if (em->start != 0) {
282 rb = rb_prev(&em->rb_node);
284 prev = rb_entry(rb, struct extent_map, rb_node);
285 if (prev && prev->end + 1 == em->start &&
286 ((em->block_start == EXTENT_MAP_HOLE &&
287 prev->block_start == EXTENT_MAP_HOLE) ||
288 (em->block_start == EXTENT_MAP_INLINE &&
289 prev->block_start == EXTENT_MAP_INLINE) ||
290 (em->block_start == EXTENT_MAP_DELALLOC &&
291 prev->block_start == EXTENT_MAP_DELALLOC) ||
292 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
293 em->block_start == prev->block_end + 1))) {
294 em->start = prev->start;
295 em->block_start = prev->block_start;
296 rb_erase(&prev->rb_node, &tree->map);
298 free_extent_map(prev);
302 write_unlock_irq(&tree->lock);
305 EXPORT_SYMBOL(add_extent_mapping);
308 * lookup_extent_mapping returns the first extent_map struct in the
309 * tree that intersects the [start, end] (inclusive) range. There may
310 * be additional objects in the tree that intersect, so check the object
311 * returned carefully to make sure you don't need additional lookups.
313 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
316 struct extent_map *em;
317 struct rb_node *rb_node;
319 read_lock_irq(&tree->lock);
320 rb_node = tree_search(&tree->map, start);
325 if (IS_ERR(rb_node)) {
326 em = ERR_PTR(PTR_ERR(rb_node));
329 em = rb_entry(rb_node, struct extent_map, rb_node);
330 if (em->end < start || em->start > end) {
334 atomic_inc(&em->refs);
336 read_unlock_irq(&tree->lock);
339 EXPORT_SYMBOL(lookup_extent_mapping);
342 * removes an extent_map struct from the tree. No reference counts are
343 * dropped, and no checks are done to see if the range is in use
345 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
349 write_lock_irq(&tree->lock);
350 ret = tree_delete(&tree->map, em->end);
351 write_unlock_irq(&tree->lock);
354 EXPORT_SYMBOL(remove_extent_mapping);
357 * utility function to look for merge candidates inside a given range.
358 * Any extents with matching state are merged together into a single
359 * extent in the tree. Extents with EXTENT_IO in their state field
360 * are not merged because the end_io handlers need to be able to do
361 * operations on them without sleeping (or doing allocations/splits).
363 * This should be called with the tree lock held.
365 static int merge_state(struct extent_map_tree *tree,
366 struct extent_state *state)
368 struct extent_state *other;
369 struct rb_node *other_node;
371 if (state->state & EXTENT_IOBITS)
374 other_node = rb_prev(&state->rb_node);
376 other = rb_entry(other_node, struct extent_state, rb_node);
377 if (other->end == state->start - 1 &&
378 other->state == state->state) {
379 state->start = other->start;
381 rb_erase(&other->rb_node, &tree->state);
382 free_extent_state(other);
385 other_node = rb_next(&state->rb_node);
387 other = rb_entry(other_node, struct extent_state, rb_node);
388 if (other->start == state->end + 1 &&
389 other->state == state->state) {
390 other->start = state->start;
392 rb_erase(&state->rb_node, &tree->state);
393 free_extent_state(state);
400 * insert an extent_state struct into the tree. 'bits' are set on the
401 * struct before it is inserted.
403 * This may return -EEXIST if the extent is already there, in which case the
404 * state struct is freed.
406 * The tree lock is not taken internally. This is a utility function and
407 * probably isn't what you want to call (see set/clear_extent_bit).
409 static int insert_state(struct extent_map_tree *tree,
410 struct extent_state *state, u64 start, u64 end,
413 struct rb_node *node;
416 printk("end < start %Lu %Lu\n", end, start);
419 if (bits & EXTENT_DIRTY)
420 tree->dirty_bytes += end - start + 1;
421 state->state |= bits;
422 state->start = start;
424 node = tree_insert(&tree->state, end, &state->rb_node);
426 struct extent_state *found;
427 found = rb_entry(node, struct extent_state, rb_node);
428 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
429 free_extent_state(state);
432 merge_state(tree, state);
437 * split a given extent state struct in two, inserting the preallocated
438 * struct 'prealloc' as the newly created second half. 'split' indicates an
439 * offset inside 'orig' where it should be split.
442 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
443 * are two extent state structs in the tree:
444 * prealloc: [orig->start, split - 1]
445 * orig: [ split, orig->end ]
447 * The tree locks are not taken by this function. They need to be held
450 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
451 struct extent_state *prealloc, u64 split)
453 struct rb_node *node;
454 prealloc->start = orig->start;
455 prealloc->end = split - 1;
456 prealloc->state = orig->state;
459 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
461 struct extent_state *found;
462 found = rb_entry(node, struct extent_state, rb_node);
463 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
464 free_extent_state(prealloc);
471 * utility function to clear some bits in an extent state struct.
472 * it will optionally wake up any one waiting on this state (wake == 1), or
473 * forcibly remove the state from the tree (delete == 1).
475 * If no bits are set on the state struct after clearing things, the
476 * struct is freed and removed from the tree
478 static int clear_state_bit(struct extent_map_tree *tree,
479 struct extent_state *state, int bits, int wake,
482 int ret = state->state & bits;
484 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
485 u64 range = state->end - state->start + 1;
486 WARN_ON(range > tree->dirty_bytes);
487 tree->dirty_bytes -= range;
489 state->state &= ~bits;
492 if (delete || state->state == 0) {
493 if (state->in_tree) {
494 rb_erase(&state->rb_node, &tree->state);
496 free_extent_state(state);
501 merge_state(tree, state);
507 * clear some bits on a range in the tree. This may require splitting
508 * or inserting elements in the tree, so the gfp mask is used to
509 * indicate which allocations or sleeping are allowed.
511 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
512 * the given range from the tree regardless of state (ie for truncate).
514 * the range [start, end] is inclusive.
516 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
517 * bits were already set, or zero if none of the bits were already set.
519 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
520 int bits, int wake, int delete, gfp_t mask)
522 struct extent_state *state;
523 struct extent_state *prealloc = NULL;
524 struct rb_node *node;
530 if (!prealloc && (mask & __GFP_WAIT)) {
531 prealloc = alloc_extent_state(mask);
536 write_lock_irqsave(&tree->lock, flags);
538 * this search will find the extents that end after
541 node = tree_search(&tree->state, start);
544 state = rb_entry(node, struct extent_state, rb_node);
545 if (state->start > end)
547 WARN_ON(state->end < start);
550 * | ---- desired range ---- |
552 * | ------------- state -------------- |
554 * We need to split the extent we found, and may flip
555 * bits on second half.
557 * If the extent we found extends past our range, we
558 * just split and search again. It'll get split again
559 * the next time though.
561 * If the extent we found is inside our range, we clear
562 * the desired bit on it.
565 if (state->start < start) {
566 err = split_state(tree, state, prealloc, start);
567 BUG_ON(err == -EEXIST);
571 if (state->end <= end) {
572 start = state->end + 1;
573 set |= clear_state_bit(tree, state, bits,
576 start = state->start;
581 * | ---- desired range ---- |
583 * We need to split the extent, and clear the bit
586 if (state->start <= end && state->end > end) {
587 err = split_state(tree, state, prealloc, end + 1);
588 BUG_ON(err == -EEXIST);
592 set |= clear_state_bit(tree, prealloc, bits,
598 start = state->end + 1;
599 set |= clear_state_bit(tree, state, bits, wake, delete);
603 write_unlock_irqrestore(&tree->lock, flags);
605 free_extent_state(prealloc);
612 write_unlock_irqrestore(&tree->lock, flags);
613 if (mask & __GFP_WAIT)
617 EXPORT_SYMBOL(clear_extent_bit);
619 static int wait_on_state(struct extent_map_tree *tree,
620 struct extent_state *state)
623 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
624 read_unlock_irq(&tree->lock);
626 read_lock_irq(&tree->lock);
627 finish_wait(&state->wq, &wait);
632 * waits for one or more bits to clear on a range in the state tree.
633 * The range [start, end] is inclusive.
634 * The tree lock is taken by this function
636 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
638 struct extent_state *state;
639 struct rb_node *node;
641 read_lock_irq(&tree->lock);
645 * this search will find all the extents that end after
648 node = tree_search(&tree->state, start);
652 state = rb_entry(node, struct extent_state, rb_node);
654 if (state->start > end)
657 if (state->state & bits) {
658 start = state->start;
659 atomic_inc(&state->refs);
660 wait_on_state(tree, state);
661 free_extent_state(state);
664 start = state->end + 1;
669 if (need_resched()) {
670 read_unlock_irq(&tree->lock);
672 read_lock_irq(&tree->lock);
676 read_unlock_irq(&tree->lock);
679 EXPORT_SYMBOL(wait_extent_bit);
681 static void set_state_bits(struct extent_map_tree *tree,
682 struct extent_state *state,
685 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
686 u64 range = state->end - state->start + 1;
687 tree->dirty_bytes += range;
689 state->state |= bits;
693 * set some bits on a range in the tree. This may require allocations
694 * or sleeping, so the gfp mask is used to indicate what is allowed.
696 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
697 * range already has the desired bits set. The start of the existing
698 * range is returned in failed_start in this case.
700 * [start, end] is inclusive
701 * This takes the tree lock.
703 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
704 int exclusive, u64 *failed_start, gfp_t mask)
706 struct extent_state *state;
707 struct extent_state *prealloc = NULL;
708 struct rb_node *node;
715 if (!prealloc && (mask & __GFP_WAIT)) {
716 prealloc = alloc_extent_state(mask);
721 write_lock_irqsave(&tree->lock, flags);
723 * this search will find all the extents that end after
726 node = tree_search(&tree->state, start);
728 err = insert_state(tree, prealloc, start, end, bits);
730 BUG_ON(err == -EEXIST);
734 state = rb_entry(node, struct extent_state, rb_node);
735 last_start = state->start;
736 last_end = state->end;
739 * | ---- desired range ---- |
742 * Just lock what we found and keep going
744 if (state->start == start && state->end <= end) {
745 set = state->state & bits;
746 if (set && exclusive) {
747 *failed_start = state->start;
751 set_state_bits(tree, state, bits);
752 start = state->end + 1;
753 merge_state(tree, state);
758 * | ---- desired range ---- |
761 * | ------------- state -------------- |
763 * We need to split the extent we found, and may flip bits on
766 * If the extent we found extends past our
767 * range, we just split and search again. It'll get split
768 * again the next time though.
770 * If the extent we found is inside our range, we set the
773 if (state->start < start) {
774 set = state->state & bits;
775 if (exclusive && set) {
776 *failed_start = start;
780 err = split_state(tree, state, prealloc, start);
781 BUG_ON(err == -EEXIST);
785 if (state->end <= end) {
786 set_state_bits(tree, state, bits);
787 start = state->end + 1;
788 merge_state(tree, state);
790 start = state->start;
795 * | ---- desired range ---- |
796 * | state | or | state |
798 * There's a hole, we need to insert something in it and
799 * ignore the extent we found.
801 if (state->start > start) {
803 if (end < last_start)
806 this_end = last_start -1;
807 err = insert_state(tree, prealloc, start, this_end,
810 BUG_ON(err == -EEXIST);
813 start = this_end + 1;
817 * | ---- desired range ---- |
819 * We need to split the extent, and set the bit
822 if (state->start <= end && state->end > end) {
823 set = state->state & bits;
824 if (exclusive && set) {
825 *failed_start = start;
829 err = split_state(tree, state, prealloc, end + 1);
830 BUG_ON(err == -EEXIST);
832 set_state_bits(tree, prealloc, bits);
833 merge_state(tree, prealloc);
841 write_unlock_irqrestore(&tree->lock, flags);
843 free_extent_state(prealloc);
850 write_unlock_irqrestore(&tree->lock, flags);
851 if (mask & __GFP_WAIT)
855 EXPORT_SYMBOL(set_extent_bit);
857 /* wrappers around set/clear extent bit */
858 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
861 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
864 EXPORT_SYMBOL(set_extent_dirty);
866 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
867 int bits, gfp_t mask)
869 return set_extent_bit(tree, start, end, bits, 0, NULL,
872 EXPORT_SYMBOL(set_extent_bits);
874 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
875 int bits, gfp_t mask)
877 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
879 EXPORT_SYMBOL(clear_extent_bits);
881 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
884 return set_extent_bit(tree, start, end,
885 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
888 EXPORT_SYMBOL(set_extent_delalloc);
890 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
893 return clear_extent_bit(tree, start, end,
894 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
896 EXPORT_SYMBOL(clear_extent_dirty);
898 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
901 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
904 EXPORT_SYMBOL(set_extent_new);
906 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
909 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
911 EXPORT_SYMBOL(clear_extent_new);
913 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
916 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
919 EXPORT_SYMBOL(set_extent_uptodate);
921 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
924 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
926 EXPORT_SYMBOL(clear_extent_uptodate);
928 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
931 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
934 EXPORT_SYMBOL(set_extent_writeback);
936 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
939 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
941 EXPORT_SYMBOL(clear_extent_writeback);
943 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
945 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
947 EXPORT_SYMBOL(wait_on_extent_writeback);
950 * locks a range in ascending order, waiting for any locked regions
951 * it hits on the way. [start,end] are inclusive, and this will sleep.
953 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
958 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
959 &failed_start, mask);
960 if (err == -EEXIST && (mask & __GFP_WAIT)) {
961 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
962 start = failed_start;
966 WARN_ON(start > end);
970 EXPORT_SYMBOL(lock_extent);
972 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
975 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
977 EXPORT_SYMBOL(unlock_extent);
980 * helper function to set pages and extents in the tree dirty
982 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
984 unsigned long index = start >> PAGE_CACHE_SHIFT;
985 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
988 while (index <= end_index) {
989 page = find_get_page(tree->mapping, index);
991 __set_page_dirty_nobuffers(page);
992 page_cache_release(page);
995 set_extent_dirty(tree, start, end, GFP_NOFS);
998 EXPORT_SYMBOL(set_range_dirty);
1001 * helper function to set both pages and extents in the tree writeback
1003 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1005 unsigned long index = start >> PAGE_CACHE_SHIFT;
1006 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1009 while (index <= end_index) {
1010 page = find_get_page(tree->mapping, index);
1012 set_page_writeback(page);
1013 page_cache_release(page);
1016 set_extent_writeback(tree, start, end, GFP_NOFS);
1019 EXPORT_SYMBOL(set_range_writeback);
1021 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1022 u64 *start_ret, u64 *end_ret, int bits)
1024 struct rb_node *node;
1025 struct extent_state *state;
1028 read_lock_irq(&tree->lock);
1030 * this search will find all the extents that end after
1033 node = tree_search(&tree->state, start);
1034 if (!node || IS_ERR(node)) {
1039 state = rb_entry(node, struct extent_state, rb_node);
1040 if (state->end >= start && (state->state & bits)) {
1041 *start_ret = state->start;
1042 *end_ret = state->end;
1046 node = rb_next(node);
1051 read_unlock_irq(&tree->lock);
1054 EXPORT_SYMBOL(find_first_extent_bit);
1056 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1057 u64 *start, u64 *end, u64 max_bytes)
1059 struct rb_node *node;
1060 struct extent_state *state;
1061 u64 cur_start = *start;
1063 u64 total_bytes = 0;
1065 write_lock_irq(&tree->lock);
1067 * this search will find all the extents that end after
1071 node = tree_search(&tree->state, cur_start);
1072 if (!node || IS_ERR(node)) {
1077 state = rb_entry(node, struct extent_state, rb_node);
1078 if (found && state->start != cur_start) {
1081 if (!(state->state & EXTENT_DELALLOC)) {
1085 struct extent_state *prev_state;
1086 struct rb_node *prev_node = node;
1088 prev_node = rb_prev(prev_node);
1091 prev_state = rb_entry(prev_node,
1092 struct extent_state,
1094 if (!(prev_state->state & EXTENT_DELALLOC))
1100 if (state->state & EXTENT_LOCKED) {
1102 atomic_inc(&state->refs);
1103 prepare_to_wait(&state->wq, &wait,
1104 TASK_UNINTERRUPTIBLE);
1105 write_unlock_irq(&tree->lock);
1107 write_lock_irq(&tree->lock);
1108 finish_wait(&state->wq, &wait);
1109 free_extent_state(state);
1112 state->state |= EXTENT_LOCKED;
1114 *start = state->start;
1117 cur_start = state->end + 1;
1118 node = rb_next(node);
1121 total_bytes += state->end - state->start + 1;
1122 if (total_bytes >= max_bytes)
1126 write_unlock_irq(&tree->lock);
1130 u64 count_range_bits(struct extent_map_tree *tree,
1131 u64 *start, u64 max_bytes, unsigned long bits)
1133 struct rb_node *node;
1134 struct extent_state *state;
1135 u64 cur_start = *start;
1136 u64 total_bytes = 0;
1139 write_lock_irq(&tree->lock);
1140 if (bits == EXTENT_DIRTY) {
1142 total_bytes = tree->dirty_bytes;
1146 * this search will find all the extents that end after
1149 node = tree_search(&tree->state, cur_start);
1150 if (!node || IS_ERR(node)) {
1155 state = rb_entry(node, struct extent_state, rb_node);
1156 if ((state->state & bits)) {
1157 total_bytes += state->end - state->start + 1;
1158 if (total_bytes >= max_bytes)
1161 *start = state->start;
1165 node = rb_next(node);
1170 write_unlock_irq(&tree->lock);
1175 * helper function to lock both pages and extents in the tree.
1176 * pages must be locked first.
1178 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1180 unsigned long index = start >> PAGE_CACHE_SHIFT;
1181 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1185 while (index <= end_index) {
1186 page = grab_cache_page(tree->mapping, index);
1192 err = PTR_ERR(page);
1197 lock_extent(tree, start, end, GFP_NOFS);
1202 * we failed above in getting the page at 'index', so we undo here
1203 * up to but not including the page at 'index'
1206 index = start >> PAGE_CACHE_SHIFT;
1207 while (index < end_index) {
1208 page = find_get_page(tree->mapping, index);
1210 page_cache_release(page);
1215 EXPORT_SYMBOL(lock_range);
1218 * helper function to unlock both pages and extents in the tree.
1220 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1222 unsigned long index = start >> PAGE_CACHE_SHIFT;
1223 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1226 while (index <= end_index) {
1227 page = find_get_page(tree->mapping, index);
1229 page_cache_release(page);
1232 unlock_extent(tree, start, end, GFP_NOFS);
1235 EXPORT_SYMBOL(unlock_range);
1237 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1239 struct rb_node *node;
1240 struct extent_state *state;
1243 write_lock_irq(&tree->lock);
1245 * this search will find all the extents that end after
1248 node = tree_search(&tree->state, start);
1249 if (!node || IS_ERR(node)) {
1253 state = rb_entry(node, struct extent_state, rb_node);
1254 if (state->start != start) {
1258 state->private = private;
1260 write_unlock_irq(&tree->lock);
1264 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1266 struct rb_node *node;
1267 struct extent_state *state;
1270 read_lock_irq(&tree->lock);
1272 * this search will find all the extents that end after
1275 node = tree_search(&tree->state, start);
1276 if (!node || IS_ERR(node)) {
1280 state = rb_entry(node, struct extent_state, rb_node);
1281 if (state->start != start) {
1285 *private = state->private;
1287 read_unlock_irq(&tree->lock);
1292 * searches a range in the state tree for a given mask.
1293 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1294 * has the bits set. Otherwise, 1 is returned if any bit in the
1295 * range is found set.
1297 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1298 int bits, int filled)
1300 struct extent_state *state = NULL;
1301 struct rb_node *node;
1304 read_lock_irq(&tree->lock);
1305 node = tree_search(&tree->state, start);
1306 while (node && start <= end) {
1307 state = rb_entry(node, struct extent_state, rb_node);
1309 if (filled && state->start > start) {
1314 if (state->start > end)
1317 if (state->state & bits) {
1321 } else if (filled) {
1325 start = state->end + 1;
1328 node = rb_next(node);
1330 read_unlock_irq(&tree->lock);
1333 EXPORT_SYMBOL(test_range_bit);
1336 * helper function to set a given page up to date if all the
1337 * extents in the tree for that page are up to date
1339 static int check_page_uptodate(struct extent_map_tree *tree,
1342 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1343 u64 end = start + PAGE_CACHE_SIZE - 1;
1344 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1345 SetPageUptodate(page);
1350 * helper function to unlock a page if all the extents in the tree
1351 * for that page are unlocked
1353 static int check_page_locked(struct extent_map_tree *tree,
1356 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1357 u64 end = start + PAGE_CACHE_SIZE - 1;
1358 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1364 * helper function to end page writeback if all the extents
1365 * in the tree for that page are done with writeback
1367 static int check_page_writeback(struct extent_map_tree *tree,
1370 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1371 u64 end = start + PAGE_CACHE_SIZE - 1;
1372 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1373 end_page_writeback(page);
1377 /* lots and lots of room for performance fixes in the end_bio funcs */
1380 * after a writepage IO is done, we need to:
1381 * clear the uptodate bits on error
1382 * clear the writeback bits in the extent tree for this IO
1383 * end_page_writeback if the page has no more pending IO
1385 * Scheduling is not allowed, so the extent state tree is expected
1386 * to have one and only one object corresponding to this IO.
1388 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1389 static void end_bio_extent_writepage(struct bio *bio, int err)
1391 static int end_bio_extent_writepage(struct bio *bio,
1392 unsigned int bytes_done, int err)
1395 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1396 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1397 struct extent_map_tree *tree = bio->bi_private;
1402 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1408 struct page *page = bvec->bv_page;
1409 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1411 end = start + bvec->bv_len - 1;
1413 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1418 if (--bvec >= bio->bi_io_vec)
1419 prefetchw(&bvec->bv_page->flags);
1422 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1423 ClearPageUptodate(page);
1426 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1429 end_page_writeback(page);
1431 check_page_writeback(tree, page);
1432 if (tree->ops && tree->ops->writepage_end_io_hook)
1433 tree->ops->writepage_end_io_hook(page, start, end);
1434 } while (bvec >= bio->bi_io_vec);
1437 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1443 * after a readpage IO is done, we need to:
1444 * clear the uptodate bits on error
1445 * set the uptodate bits if things worked
1446 * set the page up to date if all extents in the tree are uptodate
1447 * clear the lock bit in the extent tree
1448 * unlock the page if there are no other extents locked for it
1450 * Scheduling is not allowed, so the extent state tree is expected
1451 * to have one and only one object corresponding to this IO.
1453 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1454 static void end_bio_extent_readpage(struct bio *bio, int err)
1456 static int end_bio_extent_readpage(struct bio *bio,
1457 unsigned int bytes_done, int err)
1460 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1461 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1462 struct extent_map_tree *tree = bio->bi_private;
1468 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1474 struct page *page = bvec->bv_page;
1475 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1477 end = start + bvec->bv_len - 1;
1479 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1484 if (--bvec >= bio->bi_io_vec)
1485 prefetchw(&bvec->bv_page->flags);
1487 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1488 ret = tree->ops->readpage_end_io_hook(page, start, end);
1493 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1495 SetPageUptodate(page);
1497 check_page_uptodate(tree, page);
1499 ClearPageUptodate(page);
1503 unlock_extent(tree, start, end, GFP_ATOMIC);
1508 check_page_locked(tree, page);
1509 } while (bvec >= bio->bi_io_vec);
1512 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1518 * IO done from prepare_write is pretty simple, we just unlock
1519 * the structs in the extent tree when done, and set the uptodate bits
1522 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1523 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1525 static int end_bio_extent_preparewrite(struct bio *bio,
1526 unsigned int bytes_done, int err)
1529 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1530 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1531 struct extent_map_tree *tree = bio->bi_private;
1535 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1541 struct page *page = bvec->bv_page;
1542 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1544 end = start + bvec->bv_len - 1;
1546 if (--bvec >= bio->bi_io_vec)
1547 prefetchw(&bvec->bv_page->flags);
1550 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1552 ClearPageUptodate(page);
1556 unlock_extent(tree, start, end, GFP_ATOMIC);
1558 } while (bvec >= bio->bi_io_vec);
1561 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1567 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1572 bio = bio_alloc(gfp_flags, nr_vecs);
1574 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1575 while (!bio && (nr_vecs /= 2))
1576 bio = bio_alloc(gfp_flags, nr_vecs);
1580 bio->bi_bdev = bdev;
1581 bio->bi_sector = first_sector;
1586 static int submit_one_bio(int rw, struct bio *bio)
1593 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1594 if (maxsector < bio->bi_sector) {
1595 printk("sector too large max %Lu got %llu\n", maxsector,
1596 (unsigned long long)bio->bi_sector);
1600 submit_bio(rw, bio);
1601 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1607 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1608 struct page *page, sector_t sector,
1609 size_t size, unsigned long offset,
1610 struct block_device *bdev,
1611 struct bio **bio_ret,
1612 unsigned long max_pages,
1613 bio_end_io_t end_io_func)
1619 if (bio_ret && *bio_ret) {
1621 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1622 bio_add_page(bio, page, size, offset) < size) {
1623 ret = submit_one_bio(rw, bio);
1629 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1630 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1632 printk("failed to allocate bio nr %d\n", nr);
1634 bio_add_page(bio, page, size, offset);
1635 bio->bi_end_io = end_io_func;
1636 bio->bi_private = tree;
1640 ret = submit_one_bio(rw, bio);
1646 void set_page_extent_mapped(struct page *page)
1648 if (!PagePrivate(page)) {
1649 SetPagePrivate(page);
1650 WARN_ON(!page->mapping->a_ops->invalidatepage);
1651 set_page_private(page, EXTENT_PAGE_PRIVATE);
1652 page_cache_get(page);
1657 * basic readpage implementation. Locked extent state structs are inserted
1658 * into the tree that are removed when the IO is done (by the end_io
1661 static int __extent_read_full_page(struct extent_map_tree *tree,
1663 get_extent_t *get_extent,
1666 struct inode *inode = page->mapping->host;
1667 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1668 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1672 u64 last_byte = i_size_read(inode);
1676 struct extent_map *em;
1677 struct block_device *bdev;
1680 size_t page_offset = 0;
1682 size_t blocksize = inode->i_sb->s_blocksize;
1684 set_page_extent_mapped(page);
1687 lock_extent(tree, start, end, GFP_NOFS);
1689 while (cur <= end) {
1690 if (cur >= last_byte) {
1692 iosize = PAGE_CACHE_SIZE - page_offset;
1693 userpage = kmap_atomic(page, KM_USER0);
1694 memset(userpage + page_offset, 0, iosize);
1695 flush_dcache_page(page);
1696 kunmap_atomic(userpage, KM_USER0);
1697 set_extent_uptodate(tree, cur, cur + iosize - 1,
1699 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1702 em = get_extent(inode, page, page_offset, cur, end, 0);
1703 if (IS_ERR(em) || !em) {
1705 unlock_extent(tree, cur, end, GFP_NOFS);
1709 extent_offset = cur - em->start;
1710 BUG_ON(em->end < cur);
1713 iosize = min(em->end - cur, end - cur) + 1;
1714 cur_end = min(em->end, end);
1715 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1716 sector = (em->block_start + extent_offset) >> 9;
1718 block_start = em->block_start;
1719 free_extent_map(em);
1722 /* we've found a hole, just zero and go on */
1723 if (block_start == EXTENT_MAP_HOLE) {
1725 userpage = kmap_atomic(page, KM_USER0);
1726 memset(userpage + page_offset, 0, iosize);
1727 flush_dcache_page(page);
1728 kunmap_atomic(userpage, KM_USER0);
1730 set_extent_uptodate(tree, cur, cur + iosize - 1,
1732 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1734 page_offset += iosize;
1737 /* the get_extent function already copied into the page */
1738 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1739 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1741 page_offset += iosize;
1746 if (tree->ops && tree->ops->readpage_io_hook) {
1747 ret = tree->ops->readpage_io_hook(page, cur,
1751 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1753 ret = submit_extent_page(READ, tree, page,
1754 sector, iosize, page_offset,
1756 end_bio_extent_readpage);
1761 page_offset += iosize;
1765 if (!PageError(page))
1766 SetPageUptodate(page);
1772 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1773 get_extent_t *get_extent)
1775 struct bio *bio = NULL;
1778 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1780 submit_one_bio(READ, bio);
1783 EXPORT_SYMBOL(extent_read_full_page);
1786 * the writepage semantics are similar to regular writepage. extent
1787 * records are inserted to lock ranges in the tree, and as dirty areas
1788 * are found, they are marked writeback. Then the lock bits are removed
1789 * and the end_io handler clears the writeback ranges
1791 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1794 struct inode *inode = page->mapping->host;
1795 struct extent_page_data *epd = data;
1796 struct extent_map_tree *tree = epd->tree;
1797 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1799 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1803 u64 last_byte = i_size_read(inode);
1807 struct extent_map *em;
1808 struct block_device *bdev;
1811 size_t page_offset = 0;
1813 loff_t i_size = i_size_read(inode);
1814 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1818 WARN_ON(!PageLocked(page));
1819 if (page->index > end_index) {
1820 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1825 if (page->index == end_index) {
1828 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1830 userpage = kmap_atomic(page, KM_USER0);
1831 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1832 flush_dcache_page(page);
1833 kunmap_atomic(userpage, KM_USER0);
1836 set_page_extent_mapped(page);
1838 delalloc_start = start;
1840 while(delalloc_end < page_end) {
1841 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1844 if (nr_delalloc <= 0)
1846 tree->ops->fill_delalloc(inode, delalloc_start,
1848 clear_extent_bit(tree, delalloc_start,
1850 EXTENT_LOCKED | EXTENT_DELALLOC,
1852 delalloc_start = delalloc_end + 1;
1854 lock_extent(tree, start, page_end, GFP_NOFS);
1857 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1858 printk("found delalloc bits after lock_extent\n");
1861 if (last_byte <= start) {
1862 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1866 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1867 blocksize = inode->i_sb->s_blocksize;
1869 while (cur <= end) {
1870 if (cur >= last_byte) {
1871 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1874 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1875 if (IS_ERR(em) || !em) {
1880 extent_offset = cur - em->start;
1881 BUG_ON(em->end < cur);
1883 iosize = min(em->end - cur, end - cur) + 1;
1884 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1885 sector = (em->block_start + extent_offset) >> 9;
1887 block_start = em->block_start;
1888 free_extent_map(em);
1891 if (block_start == EXTENT_MAP_HOLE ||
1892 block_start == EXTENT_MAP_INLINE) {
1893 clear_extent_dirty(tree, cur,
1894 cur + iosize - 1, GFP_NOFS);
1896 page_offset += iosize;
1900 /* leave this out until we have a page_mkwrite call */
1901 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1904 page_offset += iosize;
1907 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1908 if (tree->ops && tree->ops->writepage_io_hook) {
1909 ret = tree->ops->writepage_io_hook(page, cur,
1917 unsigned long max_nr = end_index + 1;
1918 set_range_writeback(tree, cur, cur + iosize - 1);
1919 if (!PageWriteback(page)) {
1920 printk("warning page %lu not writeback, "
1921 "cur %llu end %llu\n", page->index,
1922 (unsigned long long)cur,
1923 (unsigned long long)end);
1926 ret = submit_extent_page(WRITE, tree, page, sector,
1927 iosize, page_offset, bdev,
1929 end_bio_extent_writepage);
1934 page_offset += iosize;
1939 /* make sure the mapping tag for page dirty gets cleared */
1940 set_page_writeback(page);
1941 end_page_writeback(page);
1943 unlock_extent(tree, start, page_end, GFP_NOFS);
1948 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1950 /* Taken directly from 2.6.23 for 2.6.18 back port */
1951 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1955 * write_cache_pages - walk the list of dirty pages of the given address space
1956 * and write all of them.
1957 * @mapping: address space structure to write
1958 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1959 * @writepage: function called for each page
1960 * @data: data passed to writepage function
1962 * If a page is already under I/O, write_cache_pages() skips it, even
1963 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1964 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1965 * and msync() need to guarantee that all the data which was dirty at the time
1966 * the call was made get new I/O started against them. If wbc->sync_mode is
1967 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1968 * existing IO to complete.
1970 static int write_cache_pages(struct address_space *mapping,
1971 struct writeback_control *wbc, writepage_t writepage,
1974 struct backing_dev_info *bdi = mapping->backing_dev_info;
1977 struct pagevec pvec;
1980 pgoff_t end; /* Inclusive */
1982 int range_whole = 0;
1984 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1985 wbc->encountered_congestion = 1;
1989 pagevec_init(&pvec, 0);
1990 if (wbc->range_cyclic) {
1991 index = mapping->writeback_index; /* Start from prev offset */
1994 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1995 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1996 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2001 while (!done && (index <= end) &&
2002 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2003 PAGECACHE_TAG_DIRTY,
2004 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2008 for (i = 0; i < nr_pages; i++) {
2009 struct page *page = pvec.pages[i];
2012 * At this point we hold neither mapping->tree_lock nor
2013 * lock on the page itself: the page may be truncated or
2014 * invalidated (changing page->mapping to NULL), or even
2015 * swizzled back from swapper_space to tmpfs file
2020 if (unlikely(page->mapping != mapping)) {
2025 if (!wbc->range_cyclic && page->index > end) {
2031 if (wbc->sync_mode != WB_SYNC_NONE)
2032 wait_on_page_writeback(page);
2034 if (PageWriteback(page) ||
2035 !clear_page_dirty_for_io(page)) {
2040 ret = (*writepage)(page, wbc, data);
2042 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2046 if (ret || (--(wbc->nr_to_write) <= 0))
2048 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2049 wbc->encountered_congestion = 1;
2053 pagevec_release(&pvec);
2056 if (!scanned && !done) {
2058 * We hit the last page and there is more work to be done: wrap
2059 * back to the start of the file
2065 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2066 mapping->writeback_index = index;
2071 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2072 get_extent_t *get_extent,
2073 struct writeback_control *wbc)
2076 struct address_space *mapping = page->mapping;
2077 struct extent_page_data epd = {
2080 .get_extent = get_extent,
2082 struct writeback_control wbc_writepages = {
2084 .sync_mode = WB_SYNC_NONE,
2085 .older_than_this = NULL,
2087 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2088 .range_end = (loff_t)-1,
2092 ret = __extent_writepage(page, wbc, &epd);
2094 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2096 submit_one_bio(WRITE, epd.bio);
2100 EXPORT_SYMBOL(extent_write_full_page);
2103 int extent_writepages(struct extent_map_tree *tree,
2104 struct address_space *mapping,
2105 get_extent_t *get_extent,
2106 struct writeback_control *wbc)
2109 struct extent_page_data epd = {
2112 .get_extent = get_extent,
2115 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2117 submit_one_bio(WRITE, epd.bio);
2121 EXPORT_SYMBOL(extent_writepages);
2123 int extent_readpages(struct extent_map_tree *tree,
2124 struct address_space *mapping,
2125 struct list_head *pages, unsigned nr_pages,
2126 get_extent_t get_extent)
2128 struct bio *bio = NULL;
2130 struct pagevec pvec;
2132 pagevec_init(&pvec, 0);
2133 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2134 struct page *page = list_entry(pages->prev, struct page, lru);
2136 prefetchw(&page->flags);
2137 list_del(&page->lru);
2139 * what we want to do here is call add_to_page_cache_lru,
2140 * but that isn't exported, so we reproduce it here
2142 if (!add_to_page_cache(page, mapping,
2143 page->index, GFP_KERNEL)) {
2145 /* open coding of lru_cache_add, also not exported */
2146 page_cache_get(page);
2147 if (!pagevec_add(&pvec, page))
2148 __pagevec_lru_add(&pvec);
2149 __extent_read_full_page(tree, page, get_extent, &bio);
2151 page_cache_release(page);
2153 if (pagevec_count(&pvec))
2154 __pagevec_lru_add(&pvec);
2155 BUG_ON(!list_empty(pages));
2157 submit_one_bio(READ, bio);
2160 EXPORT_SYMBOL(extent_readpages);
2163 * basic invalidatepage code, this waits on any locked or writeback
2164 * ranges corresponding to the page, and then deletes any extent state
2165 * records from the tree
2167 int extent_invalidatepage(struct extent_map_tree *tree,
2168 struct page *page, unsigned long offset)
2170 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2171 u64 end = start + PAGE_CACHE_SIZE - 1;
2172 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2174 start += (offset + blocksize -1) & ~(blocksize - 1);
2178 lock_extent(tree, start, end, GFP_NOFS);
2179 wait_on_extent_writeback(tree, start, end);
2180 clear_extent_bit(tree, start, end,
2181 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2185 EXPORT_SYMBOL(extent_invalidatepage);
2188 * simple commit_write call, set_range_dirty is used to mark both
2189 * the pages and the extent records as dirty
2191 int extent_commit_write(struct extent_map_tree *tree,
2192 struct inode *inode, struct page *page,
2193 unsigned from, unsigned to)
2195 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2197 set_page_extent_mapped(page);
2198 set_page_dirty(page);
2200 if (pos > inode->i_size) {
2201 i_size_write(inode, pos);
2202 mark_inode_dirty(inode);
2206 EXPORT_SYMBOL(extent_commit_write);
2208 int extent_prepare_write(struct extent_map_tree *tree,
2209 struct inode *inode, struct page *page,
2210 unsigned from, unsigned to, get_extent_t *get_extent)
2212 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2213 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2215 u64 orig_block_start;
2218 struct extent_map *em;
2219 unsigned blocksize = 1 << inode->i_blkbits;
2220 size_t page_offset = 0;
2221 size_t block_off_start;
2222 size_t block_off_end;
2228 set_page_extent_mapped(page);
2230 block_start = (page_start + from) & ~((u64)blocksize - 1);
2231 block_end = (page_start + to - 1) | (blocksize - 1);
2232 orig_block_start = block_start;
2234 lock_extent(tree, page_start, page_end, GFP_NOFS);
2235 while(block_start <= block_end) {
2236 em = get_extent(inode, page, page_offset, block_start,
2238 if (IS_ERR(em) || !em) {
2241 cur_end = min(block_end, em->end);
2242 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2243 block_off_end = block_off_start + blocksize;
2244 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2246 if (!PageUptodate(page) && isnew &&
2247 (block_off_end > to || block_off_start < from)) {
2250 kaddr = kmap_atomic(page, KM_USER0);
2251 if (block_off_end > to)
2252 memset(kaddr + to, 0, block_off_end - to);
2253 if (block_off_start < from)
2254 memset(kaddr + block_off_start, 0,
2255 from - block_off_start);
2256 flush_dcache_page(page);
2257 kunmap_atomic(kaddr, KM_USER0);
2259 if ((em->block_start != EXTENT_MAP_HOLE &&
2260 em->block_start != EXTENT_MAP_INLINE) &&
2261 !isnew && !PageUptodate(page) &&
2262 (block_off_end > to || block_off_start < from) &&
2263 !test_range_bit(tree, block_start, cur_end,
2264 EXTENT_UPTODATE, 1)) {
2266 u64 extent_offset = block_start - em->start;
2268 sector = (em->block_start + extent_offset) >> 9;
2269 iosize = (cur_end - block_start + blocksize - 1) &
2270 ~((u64)blocksize - 1);
2272 * we've already got the extent locked, but we
2273 * need to split the state such that our end_bio
2274 * handler can clear the lock.
2276 set_extent_bit(tree, block_start,
2277 block_start + iosize - 1,
2278 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2279 ret = submit_extent_page(READ, tree, page,
2280 sector, iosize, page_offset, em->bdev,
2282 end_bio_extent_preparewrite);
2284 block_start = block_start + iosize;
2286 set_extent_uptodate(tree, block_start, cur_end,
2288 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2289 block_start = cur_end + 1;
2291 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2292 free_extent_map(em);
2295 wait_extent_bit(tree, orig_block_start,
2296 block_end, EXTENT_LOCKED);
2298 check_page_uptodate(tree, page);
2300 /* FIXME, zero out newly allocated blocks on error */
2303 EXPORT_SYMBOL(extent_prepare_write);
2306 * a helper for releasepage. As long as there are no locked extents
2307 * in the range corresponding to the page, both state records and extent
2308 * map records are removed
2310 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2312 struct extent_map *em;
2313 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2314 u64 end = start + PAGE_CACHE_SIZE - 1;
2315 u64 orig_start = start;
2318 while (start <= end) {
2319 em = lookup_extent_mapping(tree, start, end);
2320 if (!em || IS_ERR(em))
2322 if (!test_range_bit(tree, em->start, em->end,
2323 EXTENT_LOCKED, 0)) {
2324 remove_extent_mapping(tree, em);
2325 /* once for the rb tree */
2326 free_extent_map(em);
2328 start = em->end + 1;
2330 free_extent_map(em);
2332 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2335 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2339 EXPORT_SYMBOL(try_release_extent_mapping);
2341 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2342 get_extent_t *get_extent)
2344 struct inode *inode = mapping->host;
2345 u64 start = iblock << inode->i_blkbits;
2346 u64 end = start + (1 << inode->i_blkbits) - 1;
2347 sector_t sector = 0;
2348 struct extent_map *em;
2350 em = get_extent(inode, NULL, 0, start, end, 0);
2351 if (!em || IS_ERR(em))
2354 if (em->block_start == EXTENT_MAP_INLINE ||
2355 em->block_start == EXTENT_MAP_HOLE)
2358 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2360 free_extent_map(em);
2364 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2366 if (list_empty(&eb->lru)) {
2367 extent_buffer_get(eb);
2368 list_add(&eb->lru, &tree->buffer_lru);
2370 if (tree->lru_size >= BUFFER_LRU_MAX) {
2371 struct extent_buffer *rm;
2372 rm = list_entry(tree->buffer_lru.prev,
2373 struct extent_buffer, lru);
2375 list_del_init(&rm->lru);
2376 free_extent_buffer(rm);
2379 list_move(&eb->lru, &tree->buffer_lru);
2382 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2383 u64 start, unsigned long len)
2385 struct list_head *lru = &tree->buffer_lru;
2386 struct list_head *cur = lru->next;
2387 struct extent_buffer *eb;
2389 if (list_empty(lru))
2393 eb = list_entry(cur, struct extent_buffer, lru);
2394 if (eb->start == start && eb->len == len) {
2395 extent_buffer_get(eb);
2399 } while (cur != lru);
2403 static inline unsigned long num_extent_pages(u64 start, u64 len)
2405 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2406 (start >> PAGE_CACHE_SHIFT);
2409 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2413 struct address_space *mapping;
2416 return eb->first_page;
2417 i += eb->start >> PAGE_CACHE_SHIFT;
2418 mapping = eb->first_page->mapping;
2419 read_lock_irq(&mapping->tree_lock);
2420 p = radix_tree_lookup(&mapping->page_tree, i);
2421 read_unlock_irq(&mapping->tree_lock);
2425 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2430 struct extent_buffer *eb = NULL;
2432 spin_lock(&tree->lru_lock);
2433 eb = find_lru(tree, start, len);
2434 spin_unlock(&tree->lru_lock);
2439 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2440 INIT_LIST_HEAD(&eb->lru);
2443 atomic_set(&eb->refs, 1);
2448 static void __free_extent_buffer(struct extent_buffer *eb)
2450 kmem_cache_free(extent_buffer_cache, eb);
2453 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2454 u64 start, unsigned long len,
2458 unsigned long num_pages = num_extent_pages(start, len);
2460 unsigned long index = start >> PAGE_CACHE_SHIFT;
2461 struct extent_buffer *eb;
2463 struct address_space *mapping = tree->mapping;
2466 eb = __alloc_extent_buffer(tree, start, len, mask);
2467 if (!eb || IS_ERR(eb))
2470 if (eb->flags & EXTENT_BUFFER_FILLED)
2474 eb->first_page = page0;
2477 page_cache_get(page0);
2478 mark_page_accessed(page0);
2479 set_page_extent_mapped(page0);
2480 WARN_ON(!PageUptodate(page0));
2481 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2486 for (; i < num_pages; i++, index++) {
2487 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2492 set_page_extent_mapped(p);
2493 mark_page_accessed(p);
2496 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2499 set_page_private(p, EXTENT_PAGE_PRIVATE);
2501 if (!PageUptodate(p))
2506 eb->flags |= EXTENT_UPTODATE;
2507 eb->flags |= EXTENT_BUFFER_FILLED;
2510 spin_lock(&tree->lru_lock);
2512 spin_unlock(&tree->lru_lock);
2516 spin_lock(&tree->lru_lock);
2517 list_del_init(&eb->lru);
2518 spin_unlock(&tree->lru_lock);
2519 if (!atomic_dec_and_test(&eb->refs))
2521 for (index = 1; index < i; index++) {
2522 page_cache_release(extent_buffer_page(eb, index));
2525 page_cache_release(extent_buffer_page(eb, 0));
2526 __free_extent_buffer(eb);
2529 EXPORT_SYMBOL(alloc_extent_buffer);
2531 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2532 u64 start, unsigned long len,
2535 unsigned long num_pages = num_extent_pages(start, len);
2537 unsigned long index = start >> PAGE_CACHE_SHIFT;
2538 struct extent_buffer *eb;
2540 struct address_space *mapping = tree->mapping;
2543 eb = __alloc_extent_buffer(tree, start, len, mask);
2544 if (!eb || IS_ERR(eb))
2547 if (eb->flags & EXTENT_BUFFER_FILLED)
2550 for (i = 0; i < num_pages; i++, index++) {
2551 p = find_lock_page(mapping, index);
2555 set_page_extent_mapped(p);
2556 mark_page_accessed(p);
2560 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2563 set_page_private(p, EXTENT_PAGE_PRIVATE);
2566 if (!PageUptodate(p))
2571 eb->flags |= EXTENT_UPTODATE;
2572 eb->flags |= EXTENT_BUFFER_FILLED;
2575 spin_lock(&tree->lru_lock);
2577 spin_unlock(&tree->lru_lock);
2580 spin_lock(&tree->lru_lock);
2581 list_del_init(&eb->lru);
2582 spin_unlock(&tree->lru_lock);
2583 if (!atomic_dec_and_test(&eb->refs))
2585 for (index = 1; index < i; index++) {
2586 page_cache_release(extent_buffer_page(eb, index));
2589 page_cache_release(extent_buffer_page(eb, 0));
2590 __free_extent_buffer(eb);
2593 EXPORT_SYMBOL(find_extent_buffer);
2595 void free_extent_buffer(struct extent_buffer *eb)
2598 unsigned long num_pages;
2603 if (!atomic_dec_and_test(&eb->refs))
2606 WARN_ON(!list_empty(&eb->lru));
2607 num_pages = num_extent_pages(eb->start, eb->len);
2609 for (i = 1; i < num_pages; i++) {
2610 page_cache_release(extent_buffer_page(eb, i));
2612 page_cache_release(extent_buffer_page(eb, 0));
2613 __free_extent_buffer(eb);
2615 EXPORT_SYMBOL(free_extent_buffer);
2617 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2618 struct extent_buffer *eb)
2622 unsigned long num_pages;
2625 u64 start = eb->start;
2626 u64 end = start + eb->len - 1;
2628 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2629 num_pages = num_extent_pages(eb->start, eb->len);
2631 for (i = 0; i < num_pages; i++) {
2632 page = extent_buffer_page(eb, i);
2635 * if we're on the last page or the first page and the
2636 * block isn't aligned on a page boundary, do extra checks
2637 * to make sure we don't clean page that is partially dirty
2639 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2640 ((i == num_pages - 1) &&
2641 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2642 start = (u64)page->index << PAGE_CACHE_SHIFT;
2643 end = start + PAGE_CACHE_SIZE - 1;
2644 if (test_range_bit(tree, start, end,
2650 clear_page_dirty_for_io(page);
2651 write_lock_irq(&page->mapping->tree_lock);
2652 if (!PageDirty(page)) {
2653 radix_tree_tag_clear(&page->mapping->page_tree,
2655 PAGECACHE_TAG_DIRTY);
2657 write_unlock_irq(&page->mapping->tree_lock);
2662 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2664 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2665 struct extent_buffer *eb)
2667 return wait_on_extent_writeback(tree, eb->start,
2668 eb->start + eb->len - 1);
2670 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2672 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2673 struct extent_buffer *eb)
2676 unsigned long num_pages;
2678 num_pages = num_extent_pages(eb->start, eb->len);
2679 for (i = 0; i < num_pages; i++) {
2680 struct page *page = extent_buffer_page(eb, i);
2681 /* writepage may need to do something special for the
2682 * first page, we have to make sure page->private is
2683 * properly set. releasepage may drop page->private
2684 * on us if the page isn't already dirty.
2688 set_page_private(page,
2689 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2692 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2696 return set_extent_dirty(tree, eb->start,
2697 eb->start + eb->len - 1, GFP_NOFS);
2699 EXPORT_SYMBOL(set_extent_buffer_dirty);
2701 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2702 struct extent_buffer *eb)
2706 unsigned long num_pages;
2708 num_pages = num_extent_pages(eb->start, eb->len);
2710 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2712 for (i = 0; i < num_pages; i++) {
2713 page = extent_buffer_page(eb, i);
2714 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2715 ((i == num_pages - 1) &&
2716 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2717 check_page_uptodate(tree, page);
2720 SetPageUptodate(page);
2724 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2726 int extent_buffer_uptodate(struct extent_map_tree *tree,
2727 struct extent_buffer *eb)
2729 if (eb->flags & EXTENT_UPTODATE)
2731 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2732 EXTENT_UPTODATE, 1);
2734 EXPORT_SYMBOL(extent_buffer_uptodate);
2736 int read_extent_buffer_pages(struct extent_map_tree *tree,
2737 struct extent_buffer *eb,
2742 unsigned long start_i;
2746 unsigned long num_pages;
2748 if (eb->flags & EXTENT_UPTODATE)
2751 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2752 EXTENT_UPTODATE, 1)) {
2757 WARN_ON(start < eb->start);
2758 start_i = (start >> PAGE_CACHE_SHIFT) -
2759 (eb->start >> PAGE_CACHE_SHIFT);
2764 num_pages = num_extent_pages(eb->start, eb->len);
2765 for (i = start_i; i < num_pages; i++) {
2766 page = extent_buffer_page(eb, i);
2767 if (PageUptodate(page)) {
2771 if (TestSetPageLocked(page)) {
2777 if (!PageUptodate(page)) {
2778 err = page->mapping->a_ops->readpage(NULL, page);
2791 for (i = start_i; i < num_pages; i++) {
2792 page = extent_buffer_page(eb, i);
2793 wait_on_page_locked(page);
2794 if (!PageUptodate(page)) {
2799 eb->flags |= EXTENT_UPTODATE;
2802 EXPORT_SYMBOL(read_extent_buffer_pages);
2804 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2805 unsigned long start,
2812 char *dst = (char *)dstv;
2813 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2814 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2815 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2817 WARN_ON(start > eb->len);
2818 WARN_ON(start + len > eb->start + eb->len);
2820 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2823 page = extent_buffer_page(eb, i);
2824 if (!PageUptodate(page)) {
2825 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2828 WARN_ON(!PageUptodate(page));
2830 cur = min(len, (PAGE_CACHE_SIZE - offset));
2831 kaddr = kmap_atomic(page, KM_USER1);
2832 memcpy(dst, kaddr + offset, cur);
2833 kunmap_atomic(kaddr, KM_USER1);
2841 EXPORT_SYMBOL(read_extent_buffer);
2843 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2844 unsigned long min_len, char **token, char **map,
2845 unsigned long *map_start,
2846 unsigned long *map_len, int km)
2848 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2851 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2852 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2853 unsigned long end_i = (start_offset + start + min_len - 1) >>
2860 offset = start_offset;
2864 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2866 if (start + min_len > eb->len) {
2867 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2871 p = extent_buffer_page(eb, i);
2872 WARN_ON(!PageUptodate(p));
2873 kaddr = kmap_atomic(p, km);
2875 *map = kaddr + offset;
2876 *map_len = PAGE_CACHE_SIZE - offset;
2879 EXPORT_SYMBOL(map_private_extent_buffer);
2881 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2882 unsigned long min_len,
2883 char **token, char **map,
2884 unsigned long *map_start,
2885 unsigned long *map_len, int km)
2889 if (eb->map_token) {
2890 unmap_extent_buffer(eb, eb->map_token, km);
2891 eb->map_token = NULL;
2894 err = map_private_extent_buffer(eb, start, min_len, token, map,
2895 map_start, map_len, km);
2897 eb->map_token = *token;
2899 eb->map_start = *map_start;
2900 eb->map_len = *map_len;
2904 EXPORT_SYMBOL(map_extent_buffer);
2906 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2908 kunmap_atomic(token, km);
2910 EXPORT_SYMBOL(unmap_extent_buffer);
2912 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2913 unsigned long start,
2920 char *ptr = (char *)ptrv;
2921 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2922 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2925 WARN_ON(start > eb->len);
2926 WARN_ON(start + len > eb->start + eb->len);
2928 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2931 page = extent_buffer_page(eb, i);
2932 WARN_ON(!PageUptodate(page));
2934 cur = min(len, (PAGE_CACHE_SIZE - offset));
2936 kaddr = kmap_atomic(page, KM_USER0);
2937 ret = memcmp(ptr, kaddr + offset, cur);
2938 kunmap_atomic(kaddr, KM_USER0);
2949 EXPORT_SYMBOL(memcmp_extent_buffer);
2951 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2952 unsigned long start, unsigned long len)
2958 char *src = (char *)srcv;
2959 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2960 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2962 WARN_ON(start > eb->len);
2963 WARN_ON(start + len > eb->start + eb->len);
2965 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2968 page = extent_buffer_page(eb, i);
2969 WARN_ON(!PageUptodate(page));
2971 cur = min(len, PAGE_CACHE_SIZE - offset);
2972 kaddr = kmap_atomic(page, KM_USER1);
2973 memcpy(kaddr + offset, src, cur);
2974 kunmap_atomic(kaddr, KM_USER1);
2982 EXPORT_SYMBOL(write_extent_buffer);
2984 void memset_extent_buffer(struct extent_buffer *eb, char c,
2985 unsigned long start, unsigned long len)
2991 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2992 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2994 WARN_ON(start > eb->len);
2995 WARN_ON(start + len > eb->start + eb->len);
2997 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3000 page = extent_buffer_page(eb, i);
3001 WARN_ON(!PageUptodate(page));
3003 cur = min(len, PAGE_CACHE_SIZE - offset);
3004 kaddr = kmap_atomic(page, KM_USER0);
3005 memset(kaddr + offset, c, cur);
3006 kunmap_atomic(kaddr, KM_USER0);
3013 EXPORT_SYMBOL(memset_extent_buffer);
3015 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3016 unsigned long dst_offset, unsigned long src_offset,
3019 u64 dst_len = dst->len;
3024 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3025 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3027 WARN_ON(src->len != dst_len);
3029 offset = (start_offset + dst_offset) &
3030 ((unsigned long)PAGE_CACHE_SIZE - 1);
3033 page = extent_buffer_page(dst, i);
3034 WARN_ON(!PageUptodate(page));
3036 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3038 kaddr = kmap_atomic(page, KM_USER0);
3039 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3040 kunmap_atomic(kaddr, KM_USER0);
3048 EXPORT_SYMBOL(copy_extent_buffer);
3050 static void move_pages(struct page *dst_page, struct page *src_page,
3051 unsigned long dst_off, unsigned long src_off,
3054 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3055 if (dst_page == src_page) {
3056 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3058 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3059 char *p = dst_kaddr + dst_off + len;
3060 char *s = src_kaddr + src_off + len;
3065 kunmap_atomic(src_kaddr, KM_USER1);
3067 kunmap_atomic(dst_kaddr, KM_USER0);
3070 static void copy_pages(struct page *dst_page, struct page *src_page,
3071 unsigned long dst_off, unsigned long src_off,
3074 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3077 if (dst_page != src_page)
3078 src_kaddr = kmap_atomic(src_page, KM_USER1);
3080 src_kaddr = dst_kaddr;
3082 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3083 kunmap_atomic(dst_kaddr, KM_USER0);
3084 if (dst_page != src_page)
3085 kunmap_atomic(src_kaddr, KM_USER1);
3088 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3089 unsigned long src_offset, unsigned long len)
3092 size_t dst_off_in_page;
3093 size_t src_off_in_page;
3094 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3095 unsigned long dst_i;
3096 unsigned long src_i;
3098 if (src_offset + len > dst->len) {
3099 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3100 src_offset, len, dst->len);
3103 if (dst_offset + len > dst->len) {
3104 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3105 dst_offset, len, dst->len);
3110 dst_off_in_page = (start_offset + dst_offset) &
3111 ((unsigned long)PAGE_CACHE_SIZE - 1);
3112 src_off_in_page = (start_offset + src_offset) &
3113 ((unsigned long)PAGE_CACHE_SIZE - 1);
3115 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3116 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3118 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3120 cur = min_t(unsigned long, cur,
3121 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3123 copy_pages(extent_buffer_page(dst, dst_i),
3124 extent_buffer_page(dst, src_i),
3125 dst_off_in_page, src_off_in_page, cur);
3132 EXPORT_SYMBOL(memcpy_extent_buffer);
3134 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3135 unsigned long src_offset, unsigned long len)
3138 size_t dst_off_in_page;
3139 size_t src_off_in_page;
3140 unsigned long dst_end = dst_offset + len - 1;
3141 unsigned long src_end = src_offset + len - 1;
3142 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3143 unsigned long dst_i;
3144 unsigned long src_i;
3146 if (src_offset + len > dst->len) {
3147 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3148 src_offset, len, dst->len);
3151 if (dst_offset + len > dst->len) {
3152 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3153 dst_offset, len, dst->len);
3156 if (dst_offset < src_offset) {
3157 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3161 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3162 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3164 dst_off_in_page = (start_offset + dst_end) &
3165 ((unsigned long)PAGE_CACHE_SIZE - 1);
3166 src_off_in_page = (start_offset + src_end) &
3167 ((unsigned long)PAGE_CACHE_SIZE - 1);
3169 cur = min_t(unsigned long, len, src_off_in_page + 1);
3170 cur = min(cur, dst_off_in_page + 1);
3171 move_pages(extent_buffer_page(dst, dst_i),
3172 extent_buffer_page(dst, src_i),
3173 dst_off_in_page - cur + 1,
3174 src_off_in_page - cur + 1, cur);
3181 EXPORT_SYMBOL(memmove_extent_buffer);