]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/extent_io.c
Btrfs: Add zlib compression support
[karo-tx-linux.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23                                        unsigned long extra_flags,
24                                        void (*ctor)(void *, struct kmem_cache *,
25                                                     unsigned long));
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
32
33 #define LEAK_DEBUG 1
34 #ifdef LEAK_DEBUG
35 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
36 #endif
37
38 #define BUFFER_LRU_MAX 64
39
40 struct tree_entry {
41         u64 start;
42         u64 end;
43         struct rb_node rb_node;
44 };
45
46 struct extent_page_data {
47         struct bio *bio;
48         struct extent_io_tree *tree;
49         get_extent_t *get_extent;
50 };
51
52 int __init extent_io_init(void)
53 {
54         extent_state_cache = btrfs_cache_create("extent_state",
55                                             sizeof(struct extent_state), 0,
56                                             NULL);
57         if (!extent_state_cache)
58                 return -ENOMEM;
59
60         extent_buffer_cache = btrfs_cache_create("extent_buffers",
61                                             sizeof(struct extent_buffer), 0,
62                                             NULL);
63         if (!extent_buffer_cache)
64                 goto free_state_cache;
65         return 0;
66
67 free_state_cache:
68         kmem_cache_destroy(extent_state_cache);
69         return -ENOMEM;
70 }
71
72 void extent_io_exit(void)
73 {
74         struct extent_state *state;
75         struct extent_buffer *eb;
76
77         while (!list_empty(&states)) {
78                 state = list_entry(states.next, struct extent_state, leak_list);
79                 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
80                 list_del(&state->leak_list);
81                 kmem_cache_free(extent_state_cache, state);
82
83         }
84
85         while (!list_empty(&buffers)) {
86                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
87                 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
88                 list_del(&eb->leak_list);
89                 kmem_cache_free(extent_buffer_cache, eb);
90         }
91         if (extent_state_cache)
92                 kmem_cache_destroy(extent_state_cache);
93         if (extent_buffer_cache)
94                 kmem_cache_destroy(extent_buffer_cache);
95 }
96
97 void extent_io_tree_init(struct extent_io_tree *tree,
98                           struct address_space *mapping, gfp_t mask)
99 {
100         tree->state.rb_node = NULL;
101         tree->buffer.rb_node = NULL;
102         tree->ops = NULL;
103         tree->dirty_bytes = 0;
104         spin_lock_init(&tree->lock);
105         spin_lock_init(&tree->buffer_lock);
106         tree->mapping = mapping;
107 }
108 EXPORT_SYMBOL(extent_io_tree_init);
109
110 struct extent_state *alloc_extent_state(gfp_t mask)
111 {
112         struct extent_state *state;
113 #ifdef LEAK_DEBUG
114         unsigned long flags;
115 #endif
116
117         state = kmem_cache_alloc(extent_state_cache, mask);
118         if (!state)
119                 return state;
120         state->state = 0;
121         state->private = 0;
122         state->tree = NULL;
123 #ifdef LEAK_DEBUG
124         spin_lock_irqsave(&leak_lock, flags);
125         list_add(&state->leak_list, &states);
126         spin_unlock_irqrestore(&leak_lock, flags);
127 #endif
128         atomic_set(&state->refs, 1);
129         init_waitqueue_head(&state->wq);
130         return state;
131 }
132 EXPORT_SYMBOL(alloc_extent_state);
133
134 void free_extent_state(struct extent_state *state)
135 {
136         if (!state)
137                 return;
138         if (atomic_dec_and_test(&state->refs)) {
139 #ifdef LEAK_DEBUG
140                 unsigned long flags;
141 #endif
142                 WARN_ON(state->tree);
143 #ifdef LEAK_DEBUG
144                 spin_lock_irqsave(&leak_lock, flags);
145                 list_del(&state->leak_list);
146                 spin_unlock_irqrestore(&leak_lock, flags);
147 #endif
148                 kmem_cache_free(extent_state_cache, state);
149         }
150 }
151 EXPORT_SYMBOL(free_extent_state);
152
153 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154                                    struct rb_node *node)
155 {
156         struct rb_node ** p = &root->rb_node;
157         struct rb_node * parent = NULL;
158         struct tree_entry *entry;
159
160         while(*p) {
161                 parent = *p;
162                 entry = rb_entry(parent, struct tree_entry, rb_node);
163
164                 if (offset < entry->start)
165                         p = &(*p)->rb_left;
166                 else if (offset > entry->end)
167                         p = &(*p)->rb_right;
168                 else
169                         return parent;
170         }
171
172         entry = rb_entry(node, struct tree_entry, rb_node);
173         rb_link_node(node, parent, p);
174         rb_insert_color(node, root);
175         return NULL;
176 }
177
178 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
179                                      struct rb_node **prev_ret,
180                                      struct rb_node **next_ret)
181 {
182         struct rb_root *root = &tree->state;
183         struct rb_node * n = root->rb_node;
184         struct rb_node *prev = NULL;
185         struct rb_node *orig_prev = NULL;
186         struct tree_entry *entry;
187         struct tree_entry *prev_entry = NULL;
188
189         while(n) {
190                 entry = rb_entry(n, struct tree_entry, rb_node);
191                 prev = n;
192                 prev_entry = entry;
193
194                 if (offset < entry->start)
195                         n = n->rb_left;
196                 else if (offset > entry->end)
197                         n = n->rb_right;
198                 else {
199                         return n;
200                 }
201         }
202
203         if (prev_ret) {
204                 orig_prev = prev;
205                 while(prev && offset > prev_entry->end) {
206                         prev = rb_next(prev);
207                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
208                 }
209                 *prev_ret = prev;
210                 prev = orig_prev;
211         }
212
213         if (next_ret) {
214                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215                 while(prev && offset < prev_entry->start) {
216                         prev = rb_prev(prev);
217                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
218                 }
219                 *next_ret = prev;
220         }
221         return NULL;
222 }
223
224 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
225                                           u64 offset)
226 {
227         struct rb_node *prev = NULL;
228         struct rb_node *ret;
229
230         ret = __etree_search(tree, offset, &prev, NULL);
231         if (!ret) {
232                 return prev;
233         }
234         return ret;
235 }
236
237 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
238                                           u64 offset, struct rb_node *node)
239 {
240         struct rb_root *root = &tree->buffer;
241         struct rb_node ** p = &root->rb_node;
242         struct rb_node * parent = NULL;
243         struct extent_buffer *eb;
244
245         while(*p) {
246                 parent = *p;
247                 eb = rb_entry(parent, struct extent_buffer, rb_node);
248
249                 if (offset < eb->start)
250                         p = &(*p)->rb_left;
251                 else if (offset > eb->start)
252                         p = &(*p)->rb_right;
253                 else
254                         return eb;
255         }
256
257         rb_link_node(node, parent, p);
258         rb_insert_color(node, root);
259         return NULL;
260 }
261
262 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
263                                            u64 offset)
264 {
265         struct rb_root *root = &tree->buffer;
266         struct rb_node * n = root->rb_node;
267         struct extent_buffer *eb;
268
269         while(n) {
270                 eb = rb_entry(n, struct extent_buffer, rb_node);
271                 if (offset < eb->start)
272                         n = n->rb_left;
273                 else if (offset > eb->start)
274                         n = n->rb_right;
275                 else
276                         return eb;
277         }
278         return NULL;
279 }
280
281 /*
282  * utility function to look for merge candidates inside a given range.
283  * Any extents with matching state are merged together into a single
284  * extent in the tree.  Extents with EXTENT_IO in their state field
285  * are not merged because the end_io handlers need to be able to do
286  * operations on them without sleeping (or doing allocations/splits).
287  *
288  * This should be called with the tree lock held.
289  */
290 static int merge_state(struct extent_io_tree *tree,
291                        struct extent_state *state)
292 {
293         struct extent_state *other;
294         struct rb_node *other_node;
295
296         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
297                 return 0;
298
299         other_node = rb_prev(&state->rb_node);
300         if (other_node) {
301                 other = rb_entry(other_node, struct extent_state, rb_node);
302                 if (other->end == state->start - 1 &&
303                     other->state == state->state) {
304                         state->start = other->start;
305                         other->tree = NULL;
306                         rb_erase(&other->rb_node, &tree->state);
307                         free_extent_state(other);
308                 }
309         }
310         other_node = rb_next(&state->rb_node);
311         if (other_node) {
312                 other = rb_entry(other_node, struct extent_state, rb_node);
313                 if (other->start == state->end + 1 &&
314                     other->state == state->state) {
315                         other->start = state->start;
316                         state->tree = NULL;
317                         rb_erase(&state->rb_node, &tree->state);
318                         free_extent_state(state);
319                 }
320         }
321         return 0;
322 }
323
324 static void set_state_cb(struct extent_io_tree *tree,
325                          struct extent_state *state,
326                          unsigned long bits)
327 {
328         if (tree->ops && tree->ops->set_bit_hook) {
329                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
330                                         state->end, state->state, bits);
331         }
332 }
333
334 static void clear_state_cb(struct extent_io_tree *tree,
335                            struct extent_state *state,
336                            unsigned long bits)
337 {
338         if (tree->ops && tree->ops->set_bit_hook) {
339                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
340                                           state->end, state->state, bits);
341         }
342 }
343
344 /*
345  * insert an extent_state struct into the tree.  'bits' are set on the
346  * struct before it is inserted.
347  *
348  * This may return -EEXIST if the extent is already there, in which case the
349  * state struct is freed.
350  *
351  * The tree lock is not taken internally.  This is a utility function and
352  * probably isn't what you want to call (see set/clear_extent_bit).
353  */
354 static int insert_state(struct extent_io_tree *tree,
355                         struct extent_state *state, u64 start, u64 end,
356                         int bits)
357 {
358         struct rb_node *node;
359
360         if (end < start) {
361                 printk("end < start %Lu %Lu\n", end, start);
362                 WARN_ON(1);
363         }
364         if (bits & EXTENT_DIRTY)
365                 tree->dirty_bytes += end - start + 1;
366         set_state_cb(tree, state, bits);
367         state->state |= bits;
368         state->start = start;
369         state->end = end;
370         node = tree_insert(&tree->state, end, &state->rb_node);
371         if (node) {
372                 struct extent_state *found;
373                 found = rb_entry(node, struct extent_state, rb_node);
374                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
375                 free_extent_state(state);
376                 return -EEXIST;
377         }
378         state->tree = tree;
379         merge_state(tree, state);
380         return 0;
381 }
382
383 /*
384  * split a given extent state struct in two, inserting the preallocated
385  * struct 'prealloc' as the newly created second half.  'split' indicates an
386  * offset inside 'orig' where it should be split.
387  *
388  * Before calling,
389  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
390  * are two extent state structs in the tree:
391  * prealloc: [orig->start, split - 1]
392  * orig: [ split, orig->end ]
393  *
394  * The tree locks are not taken by this function. They need to be held
395  * by the caller.
396  */
397 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
398                        struct extent_state *prealloc, u64 split)
399 {
400         struct rb_node *node;
401         prealloc->start = orig->start;
402         prealloc->end = split - 1;
403         prealloc->state = orig->state;
404         orig->start = split;
405
406         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
407         if (node) {
408                 struct extent_state *found;
409                 found = rb_entry(node, struct extent_state, rb_node);
410                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
411                 free_extent_state(prealloc);
412                 return -EEXIST;
413         }
414         prealloc->tree = tree;
415         return 0;
416 }
417
418 /*
419  * utility function to clear some bits in an extent state struct.
420  * it will optionally wake up any one waiting on this state (wake == 1), or
421  * forcibly remove the state from the tree (delete == 1).
422  *
423  * If no bits are set on the state struct after clearing things, the
424  * struct is freed and removed from the tree
425  */
426 static int clear_state_bit(struct extent_io_tree *tree,
427                             struct extent_state *state, int bits, int wake,
428                             int delete)
429 {
430         int ret = state->state & bits;
431
432         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
433                 u64 range = state->end - state->start + 1;
434                 WARN_ON(range > tree->dirty_bytes);
435                 tree->dirty_bytes -= range;
436         }
437         clear_state_cb(tree, state, bits);
438         state->state &= ~bits;
439         if (wake)
440                 wake_up(&state->wq);
441         if (delete || state->state == 0) {
442                 if (state->tree) {
443                         clear_state_cb(tree, state, state->state);
444                         rb_erase(&state->rb_node, &tree->state);
445                         state->tree = NULL;
446                         free_extent_state(state);
447                 } else {
448                         WARN_ON(1);
449                 }
450         } else {
451                 merge_state(tree, state);
452         }
453         return ret;
454 }
455
456 /*
457  * clear some bits on a range in the tree.  This may require splitting
458  * or inserting elements in the tree, so the gfp mask is used to
459  * indicate which allocations or sleeping are allowed.
460  *
461  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
462  * the given range from the tree regardless of state (ie for truncate).
463  *
464  * the range [start, end] is inclusive.
465  *
466  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
467  * bits were already set, or zero if none of the bits were already set.
468  */
469 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
470                      int bits, int wake, int delete, gfp_t mask)
471 {
472         struct extent_state *state;
473         struct extent_state *prealloc = NULL;
474         struct rb_node *node;
475         unsigned long flags;
476         int err;
477         int set = 0;
478
479 again:
480         if (!prealloc && (mask & __GFP_WAIT)) {
481                 prealloc = alloc_extent_state(mask);
482                 if (!prealloc)
483                         return -ENOMEM;
484         }
485
486         spin_lock_irqsave(&tree->lock, flags);
487         /*
488          * this search will find the extents that end after
489          * our range starts
490          */
491         node = tree_search(tree, start);
492         if (!node)
493                 goto out;
494         state = rb_entry(node, struct extent_state, rb_node);
495         if (state->start > end)
496                 goto out;
497         WARN_ON(state->end < start);
498
499         /*
500          *     | ---- desired range ---- |
501          *  | state | or
502          *  | ------------- state -------------- |
503          *
504          * We need to split the extent we found, and may flip
505          * bits on second half.
506          *
507          * If the extent we found extends past our range, we
508          * just split and search again.  It'll get split again
509          * the next time though.
510          *
511          * If the extent we found is inside our range, we clear
512          * the desired bit on it.
513          */
514
515         if (state->start < start) {
516                 if (!prealloc)
517                         prealloc = alloc_extent_state(GFP_ATOMIC);
518                 err = split_state(tree, state, prealloc, start);
519                 BUG_ON(err == -EEXIST);
520                 prealloc = NULL;
521                 if (err)
522                         goto out;
523                 if (state->end <= end) {
524                         start = state->end + 1;
525                         set |= clear_state_bit(tree, state, bits,
526                                         wake, delete);
527                 } else {
528                         start = state->start;
529                 }
530                 goto search_again;
531         }
532         /*
533          * | ---- desired range ---- |
534          *                        | state |
535          * We need to split the extent, and clear the bit
536          * on the first half
537          */
538         if (state->start <= end && state->end > end) {
539                 if (!prealloc)
540                         prealloc = alloc_extent_state(GFP_ATOMIC);
541                 err = split_state(tree, state, prealloc, end + 1);
542                 BUG_ON(err == -EEXIST);
543
544                 if (wake)
545                         wake_up(&state->wq);
546                 set |= clear_state_bit(tree, prealloc, bits,
547                                        wake, delete);
548                 prealloc = NULL;
549                 goto out;
550         }
551
552         start = state->end + 1;
553         set |= clear_state_bit(tree, state, bits, wake, delete);
554         goto search_again;
555
556 out:
557         spin_unlock_irqrestore(&tree->lock, flags);
558         if (prealloc)
559                 free_extent_state(prealloc);
560
561         return set;
562
563 search_again:
564         if (start > end)
565                 goto out;
566         spin_unlock_irqrestore(&tree->lock, flags);
567         if (mask & __GFP_WAIT)
568                 cond_resched();
569         goto again;
570 }
571 EXPORT_SYMBOL(clear_extent_bit);
572
573 static int wait_on_state(struct extent_io_tree *tree,
574                          struct extent_state *state)
575 {
576         DEFINE_WAIT(wait);
577         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
578         spin_unlock_irq(&tree->lock);
579         schedule();
580         spin_lock_irq(&tree->lock);
581         finish_wait(&state->wq, &wait);
582         return 0;
583 }
584
585 /*
586  * waits for one or more bits to clear on a range in the state tree.
587  * The range [start, end] is inclusive.
588  * The tree lock is taken by this function
589  */
590 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
591 {
592         struct extent_state *state;
593         struct rb_node *node;
594
595         spin_lock_irq(&tree->lock);
596 again:
597         while (1) {
598                 /*
599                  * this search will find all the extents that end after
600                  * our range starts
601                  */
602                 node = tree_search(tree, start);
603                 if (!node)
604                         break;
605
606                 state = rb_entry(node, struct extent_state, rb_node);
607
608                 if (state->start > end)
609                         goto out;
610
611                 if (state->state & bits) {
612                         start = state->start;
613                         atomic_inc(&state->refs);
614                         wait_on_state(tree, state);
615                         free_extent_state(state);
616                         goto again;
617                 }
618                 start = state->end + 1;
619
620                 if (start > end)
621                         break;
622
623                 if (need_resched()) {
624                         spin_unlock_irq(&tree->lock);
625                         cond_resched();
626                         spin_lock_irq(&tree->lock);
627                 }
628         }
629 out:
630         spin_unlock_irq(&tree->lock);
631         return 0;
632 }
633 EXPORT_SYMBOL(wait_extent_bit);
634
635 static void set_state_bits(struct extent_io_tree *tree,
636                            struct extent_state *state,
637                            int bits)
638 {
639         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
640                 u64 range = state->end - state->start + 1;
641                 tree->dirty_bytes += range;
642         }
643         set_state_cb(tree, state, bits);
644         state->state |= bits;
645 }
646
647 /*
648  * set some bits on a range in the tree.  This may require allocations
649  * or sleeping, so the gfp mask is used to indicate what is allowed.
650  *
651  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
652  * range already has the desired bits set.  The start of the existing
653  * range is returned in failed_start in this case.
654  *
655  * [start, end] is inclusive
656  * This takes the tree lock.
657  */
658 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
659                    int exclusive, u64 *failed_start, gfp_t mask)
660 {
661         struct extent_state *state;
662         struct extent_state *prealloc = NULL;
663         struct rb_node *node;
664         unsigned long flags;
665         int err = 0;
666         int set;
667         u64 last_start;
668         u64 last_end;
669 again:
670         if (!prealloc && (mask & __GFP_WAIT)) {
671                 prealloc = alloc_extent_state(mask);
672                 if (!prealloc)
673                         return -ENOMEM;
674         }
675
676         spin_lock_irqsave(&tree->lock, flags);
677         /*
678          * this search will find all the extents that end after
679          * our range starts.
680          */
681         node = tree_search(tree, start);
682         if (!node) {
683                 err = insert_state(tree, prealloc, start, end, bits);
684                 prealloc = NULL;
685                 BUG_ON(err == -EEXIST);
686                 goto out;
687         }
688
689         state = rb_entry(node, struct extent_state, rb_node);
690         last_start = state->start;
691         last_end = state->end;
692
693         /*
694          * | ---- desired range ---- |
695          * | state |
696          *
697          * Just lock what we found and keep going
698          */
699         if (state->start == start && state->end <= end) {
700                 set = state->state & bits;
701                 if (set && exclusive) {
702                         *failed_start = state->start;
703                         err = -EEXIST;
704                         goto out;
705                 }
706                 set_state_bits(tree, state, bits);
707                 start = state->end + 1;
708                 merge_state(tree, state);
709                 goto search_again;
710         }
711
712         /*
713          *     | ---- desired range ---- |
714          * | state |
715          *   or
716          * | ------------- state -------------- |
717          *
718          * We need to split the extent we found, and may flip bits on
719          * second half.
720          *
721          * If the extent we found extends past our
722          * range, we just split and search again.  It'll get split
723          * again the next time though.
724          *
725          * If the extent we found is inside our range, we set the
726          * desired bit on it.
727          */
728         if (state->start < start) {
729                 set = state->state & bits;
730                 if (exclusive && set) {
731                         *failed_start = start;
732                         err = -EEXIST;
733                         goto out;
734                 }
735                 err = split_state(tree, state, prealloc, start);
736                 BUG_ON(err == -EEXIST);
737                 prealloc = NULL;
738                 if (err)
739                         goto out;
740                 if (state->end <= end) {
741                         set_state_bits(tree, state, bits);
742                         start = state->end + 1;
743                         merge_state(tree, state);
744                 } else {
745                         start = state->start;
746                 }
747                 goto search_again;
748         }
749         /*
750          * | ---- desired range ---- |
751          *     | state | or               | state |
752          *
753          * There's a hole, we need to insert something in it and
754          * ignore the extent we found.
755          */
756         if (state->start > start) {
757                 u64 this_end;
758                 if (end < last_start)
759                         this_end = end;
760                 else
761                         this_end = last_start -1;
762                 err = insert_state(tree, prealloc, start, this_end,
763                                    bits);
764                 prealloc = NULL;
765                 BUG_ON(err == -EEXIST);
766                 if (err)
767                         goto out;
768                 start = this_end + 1;
769                 goto search_again;
770         }
771         /*
772          * | ---- desired range ---- |
773          *                        | state |
774          * We need to split the extent, and set the bit
775          * on the first half
776          */
777         if (state->start <= end && state->end > end) {
778                 set = state->state & bits;
779                 if (exclusive && set) {
780                         *failed_start = start;
781                         err = -EEXIST;
782                         goto out;
783                 }
784                 err = split_state(tree, state, prealloc, end + 1);
785                 BUG_ON(err == -EEXIST);
786
787                 set_state_bits(tree, prealloc, bits);
788                 merge_state(tree, prealloc);
789                 prealloc = NULL;
790                 goto out;
791         }
792
793         goto search_again;
794
795 out:
796         spin_unlock_irqrestore(&tree->lock, flags);
797         if (prealloc)
798                 free_extent_state(prealloc);
799
800         return err;
801
802 search_again:
803         if (start > end)
804                 goto out;
805         spin_unlock_irqrestore(&tree->lock, flags);
806         if (mask & __GFP_WAIT)
807                 cond_resched();
808         goto again;
809 }
810 EXPORT_SYMBOL(set_extent_bit);
811
812 /* wrappers around set/clear extent bit */
813 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
814                      gfp_t mask)
815 {
816         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
817                               mask);
818 }
819 EXPORT_SYMBOL(set_extent_dirty);
820
821 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
822                        gfp_t mask)
823 {
824         return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
825 }
826 EXPORT_SYMBOL(set_extent_ordered);
827
828 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
829                     int bits, gfp_t mask)
830 {
831         return set_extent_bit(tree, start, end, bits, 0, NULL,
832                               mask);
833 }
834 EXPORT_SYMBOL(set_extent_bits);
835
836 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
837                       int bits, gfp_t mask)
838 {
839         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
840 }
841 EXPORT_SYMBOL(clear_extent_bits);
842
843 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
844                      gfp_t mask)
845 {
846         return set_extent_bit(tree, start, end,
847                               EXTENT_DELALLOC | EXTENT_DIRTY,
848                               0, NULL, mask);
849 }
850 EXPORT_SYMBOL(set_extent_delalloc);
851
852 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
853                        gfp_t mask)
854 {
855         return clear_extent_bit(tree, start, end,
856                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
857 }
858 EXPORT_SYMBOL(clear_extent_dirty);
859
860 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
861                          gfp_t mask)
862 {
863         return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
864 }
865 EXPORT_SYMBOL(clear_extent_ordered);
866
867 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
868                      gfp_t mask)
869 {
870         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
871                               mask);
872 }
873 EXPORT_SYMBOL(set_extent_new);
874
875 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
876                        gfp_t mask)
877 {
878         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879 }
880 EXPORT_SYMBOL(clear_extent_new);
881
882 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
883                         gfp_t mask)
884 {
885         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
886                               mask);
887 }
888 EXPORT_SYMBOL(set_extent_uptodate);
889
890 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
891                           gfp_t mask)
892 {
893         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
894 }
895 EXPORT_SYMBOL(clear_extent_uptodate);
896
897 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
898                          gfp_t mask)
899 {
900         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
901                               0, NULL, mask);
902 }
903 EXPORT_SYMBOL(set_extent_writeback);
904
905 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
906                            gfp_t mask)
907 {
908         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
909 }
910 EXPORT_SYMBOL(clear_extent_writeback);
911
912 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913 {
914         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
915 }
916 EXPORT_SYMBOL(wait_on_extent_writeback);
917
918 /*
919  * either insert or lock state struct between start and end use mask to tell
920  * us if waiting is desired.
921  */
922 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
923 {
924         int err;
925         u64 failed_start;
926         while (1) {
927                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
928                                      &failed_start, mask);
929                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
930                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
931                         start = failed_start;
932                 } else {
933                         break;
934                 }
935                 WARN_ON(start > end);
936         }
937         return err;
938 }
939 EXPORT_SYMBOL(lock_extent);
940
941 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
942                   gfp_t mask)
943 {
944         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
945 }
946 EXPORT_SYMBOL(unlock_extent);
947
948 /*
949  * helper function to set pages and extents in the tree dirty
950  */
951 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
952 {
953         unsigned long index = start >> PAGE_CACHE_SHIFT;
954         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
955         struct page *page;
956
957         while (index <= end_index) {
958                 page = find_get_page(tree->mapping, index);
959                 BUG_ON(!page);
960                 __set_page_dirty_nobuffers(page);
961                 page_cache_release(page);
962                 index++;
963         }
964         set_extent_dirty(tree, start, end, GFP_NOFS);
965         return 0;
966 }
967 EXPORT_SYMBOL(set_range_dirty);
968
969 /*
970  * helper function to set both pages and extents in the tree writeback
971  */
972 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
973 {
974         unsigned long index = start >> PAGE_CACHE_SHIFT;
975         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
976         struct page *page;
977
978         while (index <= end_index) {
979                 page = find_get_page(tree->mapping, index);
980                 BUG_ON(!page);
981                 set_page_writeback(page);
982                 page_cache_release(page);
983                 index++;
984         }
985         set_extent_writeback(tree, start, end, GFP_NOFS);
986         return 0;
987 }
988 EXPORT_SYMBOL(set_range_writeback);
989
990 /*
991  * find the first offset in the io tree with 'bits' set. zero is
992  * returned if we find something, and *start_ret and *end_ret are
993  * set to reflect the state struct that was found.
994  *
995  * If nothing was found, 1 is returned, < 0 on error
996  */
997 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
998                           u64 *start_ret, u64 *end_ret, int bits)
999 {
1000         struct rb_node *node;
1001         struct extent_state *state;
1002         int ret = 1;
1003
1004         spin_lock_irq(&tree->lock);
1005         /*
1006          * this search will find all the extents that end after
1007          * our range starts.
1008          */
1009         node = tree_search(tree, start);
1010         if (!node) {
1011                 goto out;
1012         }
1013
1014         while(1) {
1015                 state = rb_entry(node, struct extent_state, rb_node);
1016                 if (state->end >= start && (state->state & bits)) {
1017                         *start_ret = state->start;
1018                         *end_ret = state->end;
1019                         ret = 0;
1020                         break;
1021                 }
1022                 node = rb_next(node);
1023                 if (!node)
1024                         break;
1025         }
1026 out:
1027         spin_unlock_irq(&tree->lock);
1028         return ret;
1029 }
1030 EXPORT_SYMBOL(find_first_extent_bit);
1031
1032 /* find the first state struct with 'bits' set after 'start', and
1033  * return it.  tree->lock must be held.  NULL will returned if
1034  * nothing was found after 'start'
1035  */
1036 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1037                                                  u64 start, int bits)
1038 {
1039         struct rb_node *node;
1040         struct extent_state *state;
1041
1042         /*
1043          * this search will find all the extents that end after
1044          * our range starts.
1045          */
1046         node = tree_search(tree, start);
1047         if (!node) {
1048                 goto out;
1049         }
1050
1051         while(1) {
1052                 state = rb_entry(node, struct extent_state, rb_node);
1053                 if (state->end >= start && (state->state & bits)) {
1054                         return state;
1055                 }
1056                 node = rb_next(node);
1057                 if (!node)
1058                         break;
1059         }
1060 out:
1061         return NULL;
1062 }
1063 EXPORT_SYMBOL(find_first_extent_bit_state);
1064
1065 /*
1066  * find a contiguous range of bytes in the file marked as delalloc, not
1067  * more than 'max_bytes'.  start and end are used to return the range,
1068  *
1069  * 1 is returned if we find something, 0 if nothing was in the tree
1070  */
1071 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1072                                         u64 *start, u64 *end, u64 max_bytes)
1073 {
1074         struct rb_node *node;
1075         struct extent_state *state;
1076         u64 cur_start = *start;
1077         u64 found = 0;
1078         u64 total_bytes = 0;
1079
1080         spin_lock_irq(&tree->lock);
1081
1082         /*
1083          * this search will find all the extents that end after
1084          * our range starts.
1085          */
1086         node = tree_search(tree, cur_start);
1087         if (!node) {
1088                 if (!found)
1089                         *end = (u64)-1;
1090                 goto out;
1091         }
1092
1093         while(1) {
1094                 state = rb_entry(node, struct extent_state, rb_node);
1095                 if (found && (state->start != cur_start ||
1096                               (state->state & EXTENT_BOUNDARY))) {
1097                         goto out;
1098                 }
1099                 if (!(state->state & EXTENT_DELALLOC)) {
1100                         if (!found)
1101                                 *end = state->end;
1102                         goto out;
1103                 }
1104                 if (!found)
1105                         *start = state->start;
1106                 found++;
1107                 *end = state->end;
1108                 cur_start = state->end + 1;
1109                 node = rb_next(node);
1110                 if (!node)
1111                         break;
1112                 total_bytes += state->end - state->start + 1;
1113                 if (total_bytes >= max_bytes)
1114                         break;
1115         }
1116 out:
1117         spin_unlock_irq(&tree->lock);
1118         return found;
1119 }
1120
1121 static noinline int __unlock_for_delalloc(struct inode *inode,
1122                                           struct page *locked_page,
1123                                           u64 start, u64 end)
1124 {
1125         int ret;
1126         struct page *pages[16];
1127         unsigned long index = start >> PAGE_CACHE_SHIFT;
1128         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1129         unsigned long nr_pages = end_index - index + 1;
1130         int i;
1131
1132         if (index == locked_page->index && end_index == index)
1133                 return 0;
1134
1135         while(nr_pages > 0) {
1136                 ret = find_get_pages_contig(inode->i_mapping, index,
1137                                      min(nr_pages, ARRAY_SIZE(pages)), pages);
1138                 for (i = 0; i < ret; i++) {
1139                         if (pages[i] != locked_page)
1140                                 unlock_page(pages[i]);
1141                         page_cache_release(pages[i]);
1142                 }
1143                 nr_pages -= ret;
1144                 index += ret;
1145                 cond_resched();
1146         }
1147         return 0;
1148 }
1149
1150 static noinline int lock_delalloc_pages(struct inode *inode,
1151                                         struct page *locked_page,
1152                                         u64 delalloc_start,
1153                                         u64 delalloc_end)
1154 {
1155         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1156         unsigned long start_index = index;
1157         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1158         unsigned long pages_locked = 0;
1159         struct page *pages[16];
1160         unsigned long nrpages;
1161         int ret;
1162         int i;
1163
1164         /* the caller is responsible for locking the start index */
1165         if (index == locked_page->index && index == end_index)
1166                 return 0;
1167
1168         /* skip the page at the start index */
1169         nrpages = end_index - index + 1;
1170         while(nrpages > 0) {
1171                 ret = find_get_pages_contig(inode->i_mapping, index,
1172                                      min(nrpages, ARRAY_SIZE(pages)), pages);
1173                 if (ret == 0) {
1174                         ret = -EAGAIN;
1175                         goto done;
1176                 }
1177                 /* now we have an array of pages, lock them all */
1178                 for (i = 0; i < ret; i++) {
1179                         /*
1180                          * the caller is taking responsibility for
1181                          * locked_page
1182                          */
1183                         if (pages[i] != locked_page)
1184                                 lock_page(pages[i]);
1185                         page_cache_release(pages[i]);
1186                 }
1187                 pages_locked += ret;
1188                 nrpages -= ret;
1189                 index += ret;
1190                 cond_resched();
1191         }
1192         ret = 0;
1193 done:
1194         if (ret && pages_locked) {
1195                 __unlock_for_delalloc(inode, locked_page,
1196                               delalloc_start,
1197                               ((u64)(start_index + pages_locked - 1)) <<
1198                               PAGE_CACHE_SHIFT);
1199         }
1200         return ret;
1201 }
1202
1203 /*
1204  * find a contiguous range of bytes in the file marked as delalloc, not
1205  * more than 'max_bytes'.  start and end are used to return the range,
1206  *
1207  * 1 is returned if we find something, 0 if nothing was in the tree
1208  */
1209 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1210                                              struct extent_io_tree *tree,
1211                                              struct page *locked_page,
1212                                              u64 *start, u64 *end,
1213                                              u64 max_bytes)
1214 {
1215         u64 delalloc_start;
1216         u64 delalloc_end;
1217         u64 found;
1218         int ret;
1219         int loops = 0;
1220
1221 again:
1222         /* step one, find a bunch of delalloc bytes starting at start */
1223         delalloc_start = *start;
1224         delalloc_end = 0;
1225         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1226                                     max_bytes);
1227         if (!found) {
1228                 *start = delalloc_start;
1229                 *end = delalloc_end;
1230                 return found;
1231         }
1232
1233         /*
1234          * make sure to limit the number of pages we try to lock down
1235          * if we're looping.
1236          */
1237         if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1238                 delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
1239                         ~((u64)PAGE_CACHE_SIZE - 1);
1240         }
1241         /* step two, lock all the pages after the page that has start */
1242         ret = lock_delalloc_pages(inode, locked_page,
1243                                   delalloc_start, delalloc_end);
1244         if (ret == -EAGAIN) {
1245                 /* some of the pages are gone, lets avoid looping by
1246                  * shortening the size of the delalloc range we're searching
1247                  */
1248                 if (!loops) {
1249                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1250                         max_bytes = PAGE_CACHE_SIZE - offset;
1251                         loops = 1;
1252                         goto again;
1253                 } else {
1254                         found = 0;
1255                         goto out_failed;
1256                 }
1257         }
1258         BUG_ON(ret);
1259
1260         /* step three, lock the state bits for the whole range */
1261         lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1262
1263         /* then test to make sure it is all still delalloc */
1264         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1265                              EXTENT_DELALLOC, 1);
1266         if (!ret) {
1267                 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1268                 __unlock_for_delalloc(inode, locked_page,
1269                               delalloc_start, delalloc_end);
1270                 cond_resched();
1271                 goto again;
1272         }
1273         *start = delalloc_start;
1274         *end = delalloc_end;
1275 out_failed:
1276         return found;
1277 }
1278
1279 int extent_clear_unlock_delalloc(struct inode *inode,
1280                                 struct extent_io_tree *tree,
1281                                 u64 start, u64 end, struct page *locked_page,
1282                                 int clear_dirty, int set_writeback,
1283                                 int end_writeback)
1284 {
1285         int ret;
1286         struct page *pages[16];
1287         unsigned long index = start >> PAGE_CACHE_SHIFT;
1288         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1289         unsigned long nr_pages = end_index - index + 1;
1290         int i;
1291         int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1292
1293         if (clear_dirty)
1294                 clear_bits |= EXTENT_DIRTY;
1295
1296         clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1297
1298         while(nr_pages > 0) {
1299                 ret = find_get_pages_contig(inode->i_mapping, index,
1300                                      min(nr_pages, ARRAY_SIZE(pages)), pages);
1301                 for (i = 0; i < ret; i++) {
1302                         if (pages[i] == locked_page) {
1303                                 page_cache_release(pages[i]);
1304                                 continue;
1305                         }
1306                         if (clear_dirty)
1307                                 clear_page_dirty_for_io(pages[i]);
1308                         if (set_writeback)
1309                                 set_page_writeback(pages[i]);
1310                         if (end_writeback)
1311                                 end_page_writeback(pages[i]);
1312                         unlock_page(pages[i]);
1313                         page_cache_release(pages[i]);
1314                 }
1315                 nr_pages -= ret;
1316                 index += ret;
1317                 cond_resched();
1318         }
1319         return 0;
1320 }
1321 EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1322
1323 /*
1324  * count the number of bytes in the tree that have a given bit(s)
1325  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1326  * cached.  The total number found is returned.
1327  */
1328 u64 count_range_bits(struct extent_io_tree *tree,
1329                      u64 *start, u64 search_end, u64 max_bytes,
1330                      unsigned long bits)
1331 {
1332         struct rb_node *node;
1333         struct extent_state *state;
1334         u64 cur_start = *start;
1335         u64 total_bytes = 0;
1336         int found = 0;
1337
1338         if (search_end <= cur_start) {
1339                 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1340                 WARN_ON(1);
1341                 return 0;
1342         }
1343
1344         spin_lock_irq(&tree->lock);
1345         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1346                 total_bytes = tree->dirty_bytes;
1347                 goto out;
1348         }
1349         /*
1350          * this search will find all the extents that end after
1351          * our range starts.
1352          */
1353         node = tree_search(tree, cur_start);
1354         if (!node) {
1355                 goto out;
1356         }
1357
1358         while(1) {
1359                 state = rb_entry(node, struct extent_state, rb_node);
1360                 if (state->start > search_end)
1361                         break;
1362                 if (state->end >= cur_start && (state->state & bits)) {
1363                         total_bytes += min(search_end, state->end) + 1 -
1364                                        max(cur_start, state->start);
1365                         if (total_bytes >= max_bytes)
1366                                 break;
1367                         if (!found) {
1368                                 *start = state->start;
1369                                 found = 1;
1370                         }
1371                 }
1372                 node = rb_next(node);
1373                 if (!node)
1374                         break;
1375         }
1376 out:
1377         spin_unlock_irq(&tree->lock);
1378         return total_bytes;
1379 }
1380 /*
1381  * helper function to lock both pages and extents in the tree.
1382  * pages must be locked first.
1383  */
1384 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1385 {
1386         unsigned long index = start >> PAGE_CACHE_SHIFT;
1387         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1388         struct page *page;
1389         int err;
1390
1391         while (index <= end_index) {
1392                 page = grab_cache_page(tree->mapping, index);
1393                 if (!page) {
1394                         err = -ENOMEM;
1395                         goto failed;
1396                 }
1397                 if (IS_ERR(page)) {
1398                         err = PTR_ERR(page);
1399                         goto failed;
1400                 }
1401                 index++;
1402         }
1403         lock_extent(tree, start, end, GFP_NOFS);
1404         return 0;
1405
1406 failed:
1407         /*
1408          * we failed above in getting the page at 'index', so we undo here
1409          * up to but not including the page at 'index'
1410          */
1411         end_index = index;
1412         index = start >> PAGE_CACHE_SHIFT;
1413         while (index < end_index) {
1414                 page = find_get_page(tree->mapping, index);
1415                 unlock_page(page);
1416                 page_cache_release(page);
1417                 index++;
1418         }
1419         return err;
1420 }
1421 EXPORT_SYMBOL(lock_range);
1422
1423 /*
1424  * helper function to unlock both pages and extents in the tree.
1425  */
1426 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1427 {
1428         unsigned long index = start >> PAGE_CACHE_SHIFT;
1429         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1430         struct page *page;
1431
1432         while (index <= end_index) {
1433                 page = find_get_page(tree->mapping, index);
1434                 unlock_page(page);
1435                 page_cache_release(page);
1436                 index++;
1437         }
1438         unlock_extent(tree, start, end, GFP_NOFS);
1439         return 0;
1440 }
1441 EXPORT_SYMBOL(unlock_range);
1442
1443 /*
1444  * set the private field for a given byte offset in the tree.  If there isn't
1445  * an extent_state there already, this does nothing.
1446  */
1447 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1448 {
1449         struct rb_node *node;
1450         struct extent_state *state;
1451         int ret = 0;
1452
1453         spin_lock_irq(&tree->lock);
1454         /*
1455          * this search will find all the extents that end after
1456          * our range starts.
1457          */
1458         node = tree_search(tree, start);
1459         if (!node) {
1460                 ret = -ENOENT;
1461                 goto out;
1462         }
1463         state = rb_entry(node, struct extent_state, rb_node);
1464         if (state->start != start) {
1465                 ret = -ENOENT;
1466                 goto out;
1467         }
1468         state->private = private;
1469 out:
1470         spin_unlock_irq(&tree->lock);
1471         return ret;
1472 }
1473
1474 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1475 {
1476         struct rb_node *node;
1477         struct extent_state *state;
1478         int ret = 0;
1479
1480         spin_lock_irq(&tree->lock);
1481         /*
1482          * this search will find all the extents that end after
1483          * our range starts.
1484          */
1485         node = tree_search(tree, start);
1486         if (!node) {
1487                 ret = -ENOENT;
1488                 goto out;
1489         }
1490         state = rb_entry(node, struct extent_state, rb_node);
1491         if (state->start != start) {
1492                 ret = -ENOENT;
1493                 goto out;
1494         }
1495         *private = state->private;
1496 out:
1497         spin_unlock_irq(&tree->lock);
1498         return ret;
1499 }
1500
1501 /*
1502  * searches a range in the state tree for a given mask.
1503  * If 'filled' == 1, this returns 1 only if every extent in the tree
1504  * has the bits set.  Otherwise, 1 is returned if any bit in the
1505  * range is found set.
1506  */
1507 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1508                    int bits, int filled)
1509 {
1510         struct extent_state *state = NULL;
1511         struct rb_node *node;
1512         int bitset = 0;
1513         unsigned long flags;
1514
1515         spin_lock_irqsave(&tree->lock, flags);
1516         node = tree_search(tree, start);
1517         while (node && start <= end) {
1518                 state = rb_entry(node, struct extent_state, rb_node);
1519
1520                 if (filled && state->start > start) {
1521                         bitset = 0;
1522                         break;
1523                 }
1524
1525                 if (state->start > end)
1526                         break;
1527
1528                 if (state->state & bits) {
1529                         bitset = 1;
1530                         if (!filled)
1531                                 break;
1532                 } else if (filled) {
1533                         bitset = 0;
1534                         break;
1535                 }
1536                 start = state->end + 1;
1537                 if (start > end)
1538                         break;
1539                 node = rb_next(node);
1540                 if (!node) {
1541                         if (filled)
1542                                 bitset = 0;
1543                         break;
1544                 }
1545         }
1546         spin_unlock_irqrestore(&tree->lock, flags);
1547         return bitset;
1548 }
1549 EXPORT_SYMBOL(test_range_bit);
1550
1551 /*
1552  * helper function to set a given page up to date if all the
1553  * extents in the tree for that page are up to date
1554  */
1555 static int check_page_uptodate(struct extent_io_tree *tree,
1556                                struct page *page)
1557 {
1558         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1559         u64 end = start + PAGE_CACHE_SIZE - 1;
1560         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1561                 SetPageUptodate(page);
1562         return 0;
1563 }
1564
1565 /*
1566  * helper function to unlock a page if all the extents in the tree
1567  * for that page are unlocked
1568  */
1569 static int check_page_locked(struct extent_io_tree *tree,
1570                              struct page *page)
1571 {
1572         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1573         u64 end = start + PAGE_CACHE_SIZE - 1;
1574         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1575                 unlock_page(page);
1576         return 0;
1577 }
1578
1579 /*
1580  * helper function to end page writeback if all the extents
1581  * in the tree for that page are done with writeback
1582  */
1583 static int check_page_writeback(struct extent_io_tree *tree,
1584                              struct page *page)
1585 {
1586         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1587         u64 end = start + PAGE_CACHE_SIZE - 1;
1588         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1589                 end_page_writeback(page);
1590         return 0;
1591 }
1592
1593 /* lots and lots of room for performance fixes in the end_bio funcs */
1594
1595 /*
1596  * after a writepage IO is done, we need to:
1597  * clear the uptodate bits on error
1598  * clear the writeback bits in the extent tree for this IO
1599  * end_page_writeback if the page has no more pending IO
1600  *
1601  * Scheduling is not allowed, so the extent state tree is expected
1602  * to have one and only one object corresponding to this IO.
1603  */
1604 static void end_bio_extent_writepage(struct bio *bio, int err)
1605 {
1606         int uptodate = err == 0;
1607         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1608         struct extent_io_tree *tree;
1609         u64 start;
1610         u64 end;
1611         int whole_page;
1612         int ret;
1613
1614         do {
1615                 struct page *page = bvec->bv_page;
1616                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1617
1618                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1619                          bvec->bv_offset;
1620                 end = start + bvec->bv_len - 1;
1621
1622                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1623                         whole_page = 1;
1624                 else
1625                         whole_page = 0;
1626
1627                 if (--bvec >= bio->bi_io_vec)
1628                         prefetchw(&bvec->bv_page->flags);
1629                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1630                         ret = tree->ops->writepage_end_io_hook(page, start,
1631                                                        end, NULL, uptodate);
1632                         if (ret)
1633                                 uptodate = 0;
1634                 }
1635
1636                 if (!uptodate && tree->ops &&
1637                     tree->ops->writepage_io_failed_hook) {
1638                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1639                                                          start, end, NULL);
1640                         if (ret == 0) {
1641                                 uptodate = (err == 0);
1642                                 continue;
1643                         }
1644                 }
1645
1646                 if (!uptodate) {
1647                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1648                         ClearPageUptodate(page);
1649                         SetPageError(page);
1650                 }
1651
1652                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1653
1654                 if (whole_page)
1655                         end_page_writeback(page);
1656                 else
1657                         check_page_writeback(tree, page);
1658         } while (bvec >= bio->bi_io_vec);
1659
1660         bio_put(bio);
1661 }
1662
1663 /*
1664  * after a readpage IO is done, we need to:
1665  * clear the uptodate bits on error
1666  * set the uptodate bits if things worked
1667  * set the page up to date if all extents in the tree are uptodate
1668  * clear the lock bit in the extent tree
1669  * unlock the page if there are no other extents locked for it
1670  *
1671  * Scheduling is not allowed, so the extent state tree is expected
1672  * to have one and only one object corresponding to this IO.
1673  */
1674 static void end_bio_extent_readpage(struct bio *bio, int err)
1675 {
1676         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1677         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1678         struct extent_io_tree *tree;
1679         u64 start;
1680         u64 end;
1681         int whole_page;
1682         int ret;
1683
1684         do {
1685                 struct page *page = bvec->bv_page;
1686                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1687
1688                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1689                         bvec->bv_offset;
1690                 end = start + bvec->bv_len - 1;
1691
1692                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1693                         whole_page = 1;
1694                 else
1695                         whole_page = 0;
1696
1697                 if (--bvec >= bio->bi_io_vec)
1698                         prefetchw(&bvec->bv_page->flags);
1699
1700                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1701                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1702                                                               NULL);
1703                         if (ret)
1704                                 uptodate = 0;
1705                 }
1706                 if (!uptodate && tree->ops &&
1707                     tree->ops->readpage_io_failed_hook) {
1708                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1709                                                          start, end, NULL);
1710                         if (ret == 0) {
1711                                 uptodate =
1712                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1713                                 continue;
1714                         }
1715                 }
1716
1717                 if (uptodate)
1718                         set_extent_uptodate(tree, start, end,
1719                                             GFP_ATOMIC);
1720                 unlock_extent(tree, start, end, GFP_ATOMIC);
1721
1722                 if (whole_page) {
1723                         if (uptodate) {
1724                                 SetPageUptodate(page);
1725                         } else {
1726                                 ClearPageUptodate(page);
1727                                 SetPageError(page);
1728                         }
1729                         unlock_page(page);
1730                 } else {
1731                         if (uptodate) {
1732                                 check_page_uptodate(tree, page);
1733                         } else {
1734                                 ClearPageUptodate(page);
1735                                 SetPageError(page);
1736                         }
1737                         check_page_locked(tree, page);
1738                 }
1739         } while (bvec >= bio->bi_io_vec);
1740
1741         bio_put(bio);
1742 }
1743
1744 /*
1745  * IO done from prepare_write is pretty simple, we just unlock
1746  * the structs in the extent tree when done, and set the uptodate bits
1747  * as appropriate.
1748  */
1749 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1750 {
1751         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1752         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1753         struct extent_io_tree *tree;
1754         u64 start;
1755         u64 end;
1756
1757         do {
1758                 struct page *page = bvec->bv_page;
1759                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1760
1761                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1762                         bvec->bv_offset;
1763                 end = start + bvec->bv_len - 1;
1764
1765                 if (--bvec >= bio->bi_io_vec)
1766                         prefetchw(&bvec->bv_page->flags);
1767
1768                 if (uptodate) {
1769                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1770                 } else {
1771                         ClearPageUptodate(page);
1772                         SetPageError(page);
1773                 }
1774
1775                 unlock_extent(tree, start, end, GFP_ATOMIC);
1776
1777         } while (bvec >= bio->bi_io_vec);
1778
1779         bio_put(bio);
1780 }
1781
1782 static struct bio *
1783 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1784                  gfp_t gfp_flags)
1785 {
1786         struct bio *bio;
1787
1788         bio = bio_alloc(gfp_flags, nr_vecs);
1789
1790         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1791                 while (!bio && (nr_vecs /= 2))
1792                         bio = bio_alloc(gfp_flags, nr_vecs);
1793         }
1794
1795         if (bio) {
1796                 bio->bi_size = 0;
1797                 bio->bi_bdev = bdev;
1798                 bio->bi_sector = first_sector;
1799         }
1800         return bio;
1801 }
1802
1803 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1804                           unsigned long bio_flags)
1805 {
1806         int ret = 0;
1807         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1808         struct page *page = bvec->bv_page;
1809         struct extent_io_tree *tree = bio->bi_private;
1810         u64 start;
1811         u64 end;
1812
1813         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1814         end = start + bvec->bv_len - 1;
1815
1816         bio->bi_private = NULL;
1817
1818         bio_get(bio);
1819
1820         if (tree->ops && tree->ops->submit_bio_hook)
1821                 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1822                                            mirror_num, bio_flags);
1823         else
1824                 submit_bio(rw, bio);
1825         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1826                 ret = -EOPNOTSUPP;
1827         bio_put(bio);
1828         return ret;
1829 }
1830
1831 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1832                               struct page *page, sector_t sector,
1833                               size_t size, unsigned long offset,
1834                               struct block_device *bdev,
1835                               struct bio **bio_ret,
1836                               unsigned long max_pages,
1837                               bio_end_io_t end_io_func,
1838                               int mirror_num,
1839                               unsigned long prev_bio_flags,
1840                               unsigned long bio_flags)
1841 {
1842         int ret = 0;
1843         struct bio *bio;
1844         int nr;
1845         int contig = 0;
1846         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1847         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1848         size_t page_size = min(size, PAGE_CACHE_SIZE);
1849
1850         if (bio_ret && *bio_ret) {
1851                 bio = *bio_ret;
1852                 if (old_compressed)
1853                         contig = bio->bi_sector == sector;
1854                 else
1855                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
1856                                 sector;
1857
1858                 if (prev_bio_flags != bio_flags || !contig ||
1859                     (tree->ops && tree->ops->merge_bio_hook &&
1860                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
1861                                                bio_flags)) ||
1862                     bio_add_page(bio, page, page_size, offset) < page_size) {
1863                         ret = submit_one_bio(rw, bio, mirror_num,
1864                                              prev_bio_flags);
1865                         bio = NULL;
1866                 } else {
1867                         return 0;
1868                 }
1869         }
1870         if (this_compressed)
1871                 nr = BIO_MAX_PAGES;
1872         else
1873                 nr = bio_get_nr_vecs(bdev);
1874
1875         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1876         if (!bio) {
1877                 printk("failed to allocate bio nr %d\n", nr);
1878         }
1879
1880         bio_add_page(bio, page, page_size, offset);
1881         bio->bi_end_io = end_io_func;
1882         bio->bi_private = tree;
1883
1884         if (bio_ret) {
1885                 *bio_ret = bio;
1886         } else {
1887                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1888         }
1889
1890         return ret;
1891 }
1892
1893 void set_page_extent_mapped(struct page *page)
1894 {
1895         if (!PagePrivate(page)) {
1896                 SetPagePrivate(page);
1897                 page_cache_get(page);
1898                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1899         }
1900 }
1901
1902 void set_page_extent_head(struct page *page, unsigned long len)
1903 {
1904         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1905 }
1906
1907 /*
1908  * basic readpage implementation.  Locked extent state structs are inserted
1909  * into the tree that are removed when the IO is done (by the end_io
1910  * handlers)
1911  */
1912 static int __extent_read_full_page(struct extent_io_tree *tree,
1913                                    struct page *page,
1914                                    get_extent_t *get_extent,
1915                                    struct bio **bio, int mirror_num,
1916                                    unsigned long *bio_flags)
1917 {
1918         struct inode *inode = page->mapping->host;
1919         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1920         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1921         u64 end;
1922         u64 cur = start;
1923         u64 extent_offset;
1924         u64 last_byte = i_size_read(inode);
1925         u64 block_start;
1926         u64 cur_end;
1927         sector_t sector;
1928         struct extent_map *em;
1929         struct block_device *bdev;
1930         int ret;
1931         int nr = 0;
1932         size_t page_offset = 0;
1933         size_t iosize;
1934         size_t disk_io_size;
1935         size_t blocksize = inode->i_sb->s_blocksize;
1936         unsigned long this_bio_flag = 0;
1937
1938         set_page_extent_mapped(page);
1939
1940         end = page_end;
1941         lock_extent(tree, start, end, GFP_NOFS);
1942
1943         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1944                 char *userpage;
1945                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1946
1947                 if (zero_offset) {
1948                         iosize = PAGE_CACHE_SIZE - zero_offset;
1949                         userpage = kmap_atomic(page, KM_USER0);
1950                         memset(userpage + zero_offset, 0, iosize);
1951                         flush_dcache_page(page);
1952                         kunmap_atomic(userpage, KM_USER0);
1953                 }
1954         }
1955         while (cur <= end) {
1956                 if (cur >= last_byte) {
1957                         char *userpage;
1958                         iosize = PAGE_CACHE_SIZE - page_offset;
1959                         userpage = kmap_atomic(page, KM_USER0);
1960                         memset(userpage + page_offset, 0, iosize);
1961                         flush_dcache_page(page);
1962                         kunmap_atomic(userpage, KM_USER0);
1963                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1964                                             GFP_NOFS);
1965                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1966                         break;
1967                 }
1968                 em = get_extent(inode, page, page_offset, cur,
1969                                 end - cur + 1, 0);
1970                 if (IS_ERR(em) || !em) {
1971                         SetPageError(page);
1972                         unlock_extent(tree, cur, end, GFP_NOFS);
1973                         break;
1974                 }
1975                 extent_offset = cur - em->start;
1976                 if (extent_map_end(em) <= cur) {
1977 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1978                 }
1979                 BUG_ON(extent_map_end(em) <= cur);
1980                 if (end < cur) {
1981 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1982                 }
1983                 BUG_ON(end < cur);
1984
1985                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1986                         this_bio_flag = EXTENT_BIO_COMPRESSED;
1987
1988                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1989                 cur_end = min(extent_map_end(em) - 1, end);
1990                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1991                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
1992                         disk_io_size = em->block_len;
1993                         sector = em->block_start >> 9;
1994                 } else {
1995                         sector = (em->block_start + extent_offset) >> 9;
1996                         disk_io_size = iosize;
1997                 }
1998                 bdev = em->bdev;
1999                 block_start = em->block_start;
2000                 free_extent_map(em);
2001                 em = NULL;
2002
2003                 /* we've found a hole, just zero and go on */
2004                 if (block_start == EXTENT_MAP_HOLE) {
2005                         char *userpage;
2006                         userpage = kmap_atomic(page, KM_USER0);
2007                         memset(userpage + page_offset, 0, iosize);
2008                         flush_dcache_page(page);
2009                         kunmap_atomic(userpage, KM_USER0);
2010
2011                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2012                                             GFP_NOFS);
2013                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2014                         cur = cur + iosize;
2015                         page_offset += iosize;
2016                         continue;
2017                 }
2018                 /* the get_extent function already copied into the page */
2019                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2020                         check_page_uptodate(tree, page);
2021                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2022                         cur = cur + iosize;
2023                         page_offset += iosize;
2024                         continue;
2025                 }
2026                 /* we have an inline extent but it didn't get marked up
2027                  * to date.  Error out
2028                  */
2029                 if (block_start == EXTENT_MAP_INLINE) {
2030                         SetPageError(page);
2031                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2032                         cur = cur + iosize;
2033                         page_offset += iosize;
2034                         continue;
2035                 }
2036
2037                 ret = 0;
2038                 if (tree->ops && tree->ops->readpage_io_hook) {
2039                         ret = tree->ops->readpage_io_hook(page, cur,
2040                                                           cur + iosize - 1);
2041                 }
2042                 if (!ret) {
2043                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2044                         pnr -= page->index;
2045                         ret = submit_extent_page(READ, tree, page,
2046                                          sector, disk_io_size, page_offset,
2047                                          bdev, bio, pnr,
2048                                          end_bio_extent_readpage, mirror_num,
2049                                          *bio_flags,
2050                                          this_bio_flag);
2051                         nr++;
2052                         *bio_flags = this_bio_flag;
2053                 }
2054                 if (ret)
2055                         SetPageError(page);
2056                 cur = cur + iosize;
2057                 page_offset += iosize;
2058         }
2059         if (!nr) {
2060                 if (!PageError(page))
2061                         SetPageUptodate(page);
2062                 unlock_page(page);
2063         }
2064         return 0;
2065 }
2066
2067 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2068                             get_extent_t *get_extent)
2069 {
2070         struct bio *bio = NULL;
2071         unsigned long bio_flags = 0;
2072         int ret;
2073
2074         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2075                                       &bio_flags);
2076         if (bio)
2077                 submit_one_bio(READ, bio, 0, bio_flags);
2078         return ret;
2079 }
2080 EXPORT_SYMBOL(extent_read_full_page);
2081
2082 /*
2083  * the writepage semantics are similar to regular writepage.  extent
2084  * records are inserted to lock ranges in the tree, and as dirty areas
2085  * are found, they are marked writeback.  Then the lock bits are removed
2086  * and the end_io handler clears the writeback ranges
2087  */
2088 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2089                               void *data)
2090 {
2091         struct inode *inode = page->mapping->host;
2092         struct extent_page_data *epd = data;
2093         struct extent_io_tree *tree = epd->tree;
2094         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2095         u64 delalloc_start;
2096         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2097         u64 end;
2098         u64 cur = start;
2099         u64 extent_offset;
2100         u64 last_byte = i_size_read(inode);
2101         u64 block_start;
2102         u64 iosize;
2103         u64 unlock_start;
2104         sector_t sector;
2105         struct extent_map *em;
2106         struct block_device *bdev;
2107         int ret;
2108         int nr = 0;
2109         size_t pg_offset = 0;
2110         size_t blocksize;
2111         loff_t i_size = i_size_read(inode);
2112         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2113         u64 nr_delalloc;
2114         u64 delalloc_end;
2115         int page_started;
2116         int compressed;
2117
2118         WARN_ON(!PageLocked(page));
2119         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2120         if (page->index > end_index ||
2121            (page->index == end_index && !pg_offset)) {
2122                 page->mapping->a_ops->invalidatepage(page, 0);
2123                 unlock_page(page);
2124                 return 0;
2125         }
2126
2127         if (page->index == end_index) {
2128                 char *userpage;
2129
2130                 userpage = kmap_atomic(page, KM_USER0);
2131                 memset(userpage + pg_offset, 0,
2132                        PAGE_CACHE_SIZE - pg_offset);
2133                 kunmap_atomic(userpage, KM_USER0);
2134                 flush_dcache_page(page);
2135         }
2136         pg_offset = 0;
2137
2138         set_page_extent_mapped(page);
2139
2140         delalloc_start = start;
2141         delalloc_end = 0;
2142         page_started = 0;
2143         while(delalloc_end < page_end) {
2144                 nr_delalloc = find_lock_delalloc_range(inode, tree,
2145                                                        page,
2146                                                        &delalloc_start,
2147                                                        &delalloc_end,
2148                                                        128 * 1024 * 1024);
2149                 if (nr_delalloc == 0) {
2150                         delalloc_start = delalloc_end + 1;
2151                         continue;
2152                 }
2153                 tree->ops->fill_delalloc(inode, page, delalloc_start,
2154                                          delalloc_end, &page_started);
2155                 delalloc_start = delalloc_end + 1;
2156         }
2157
2158         /* did the fill delalloc function already unlock and start the IO? */
2159         if (page_started) {
2160                 return 0;
2161         }
2162
2163         lock_extent(tree, start, page_end, GFP_NOFS);
2164         unlock_start = start;
2165
2166         if (tree->ops && tree->ops->writepage_start_hook) {
2167                 ret = tree->ops->writepage_start_hook(page, start,
2168                                                       page_end);
2169                 if (ret == -EAGAIN) {
2170                         unlock_extent(tree, start, page_end, GFP_NOFS);
2171                         redirty_page_for_writepage(wbc, page);
2172                         unlock_page(page);
2173                         return 0;
2174                 }
2175         }
2176
2177         end = page_end;
2178         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2179                 printk("found delalloc bits after lock_extent\n");
2180         }
2181
2182         if (last_byte <= start) {
2183                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2184                 unlock_extent(tree, start, page_end, GFP_NOFS);
2185                 if (tree->ops && tree->ops->writepage_end_io_hook)
2186                         tree->ops->writepage_end_io_hook(page, start,
2187                                                          page_end, NULL, 1);
2188                 unlock_start = page_end + 1;
2189                 goto done;
2190         }
2191
2192         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2193         blocksize = inode->i_sb->s_blocksize;
2194
2195         while (cur <= end) {
2196                 if (cur >= last_byte) {
2197                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2198                         unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2199                         if (tree->ops && tree->ops->writepage_end_io_hook)
2200                                 tree->ops->writepage_end_io_hook(page, cur,
2201                                                          page_end, NULL, 1);
2202                         unlock_start = page_end + 1;
2203                         break;
2204                 }
2205                 em = epd->get_extent(inode, page, pg_offset, cur,
2206                                      end - cur + 1, 1);
2207                 if (IS_ERR(em) || !em) {
2208                         SetPageError(page);
2209                         break;
2210                 }
2211
2212                 extent_offset = cur - em->start;
2213                 BUG_ON(extent_map_end(em) <= cur);
2214                 BUG_ON(end < cur);
2215                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2216                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2217                 sector = (em->block_start + extent_offset) >> 9;
2218                 bdev = em->bdev;
2219                 block_start = em->block_start;
2220                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2221                 free_extent_map(em);
2222                 em = NULL;
2223
2224                 /*
2225                  * compressed and inline extents are written through other
2226                  * paths in the FS
2227                  */
2228                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2229                     block_start == EXTENT_MAP_INLINE) {
2230                         clear_extent_dirty(tree, cur,
2231                                            cur + iosize - 1, GFP_NOFS);
2232
2233                         unlock_extent(tree, unlock_start, cur + iosize -1,
2234                                       GFP_NOFS);
2235
2236                         /*
2237                          * end_io notification does not happen here for
2238                          * compressed extents
2239                          */
2240                         if (!compressed && tree->ops &&
2241                             tree->ops->writepage_end_io_hook)
2242                                 tree->ops->writepage_end_io_hook(page, cur,
2243                                                          cur + iosize - 1,
2244                                                          NULL, 1);
2245                         else if (compressed) {
2246                                 /* we don't want to end_page_writeback on
2247                                  * a compressed extent.  this happens
2248                                  * elsewhere
2249                                  */
2250                                 nr++;
2251                         }
2252
2253                         cur += iosize;
2254                         pg_offset += iosize;
2255                         unlock_start = cur;
2256                         continue;
2257                 }
2258                 /* leave this out until we have a page_mkwrite call */
2259                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2260                                    EXTENT_DIRTY, 0)) {
2261                         cur = cur + iosize;
2262                         pg_offset += iosize;
2263                         continue;
2264                 }
2265
2266                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2267                 if (tree->ops && tree->ops->writepage_io_hook) {
2268                         ret = tree->ops->writepage_io_hook(page, cur,
2269                                                 cur + iosize - 1);
2270                 } else {
2271                         ret = 0;
2272                 }
2273                 if (ret) {
2274                         SetPageError(page);
2275                 } else {
2276                         unsigned long max_nr = end_index + 1;
2277
2278                         set_range_writeback(tree, cur, cur + iosize - 1);
2279                         if (!PageWriteback(page)) {
2280                                 printk("warning page %lu not writeback, "
2281                                        "cur %llu end %llu\n", page->index,
2282                                        (unsigned long long)cur,
2283                                        (unsigned long long)end);
2284                         }
2285
2286                         ret = submit_extent_page(WRITE, tree, page, sector,
2287                                                  iosize, pg_offset, bdev,
2288                                                  &epd->bio, max_nr,
2289                                                  end_bio_extent_writepage,
2290                                                  0, 0, 0);
2291                         if (ret)
2292                                 SetPageError(page);
2293                 }
2294                 cur = cur + iosize;
2295                 pg_offset += iosize;
2296                 nr++;
2297         }
2298 done:
2299         if (nr == 0) {
2300                 /* make sure the mapping tag for page dirty gets cleared */
2301                 set_page_writeback(page);
2302                 end_page_writeback(page);
2303         }
2304         if (unlock_start <= page_end)
2305                 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2306         unlock_page(page);
2307         return 0;
2308 }
2309
2310 /**
2311  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2312  * @mapping: address space structure to write
2313  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2314  * @writepage: function called for each page
2315  * @data: data passed to writepage function
2316  *
2317  * If a page is already under I/O, write_cache_pages() skips it, even
2318  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2319  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2320  * and msync() need to guarantee that all the data which was dirty at the time
2321  * the call was made get new I/O started against them.  If wbc->sync_mode is
2322  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2323  * existing IO to complete.
2324  */
2325 int extent_write_cache_pages(struct extent_io_tree *tree,
2326                              struct address_space *mapping,
2327                              struct writeback_control *wbc,
2328                              writepage_t writepage, void *data)
2329 {
2330         struct backing_dev_info *bdi = mapping->backing_dev_info;
2331         int ret = 0;
2332         int done = 0;
2333         struct pagevec pvec;
2334         int nr_pages;
2335         pgoff_t index;
2336         pgoff_t end;            /* Inclusive */
2337         int scanned = 0;
2338         int range_whole = 0;
2339
2340         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2341                 wbc->encountered_congestion = 1;
2342                 return 0;
2343         }
2344
2345         pagevec_init(&pvec, 0);
2346         if (wbc->range_cyclic) {
2347                 index = mapping->writeback_index; /* Start from prev offset */
2348                 end = -1;
2349         } else {
2350                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2351                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2352                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2353                         range_whole = 1;
2354                 scanned = 1;
2355         }
2356 retry:
2357         while (!done && (index <= end) &&
2358                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2359                                               PAGECACHE_TAG_DIRTY,
2360                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2361                 unsigned i;
2362
2363                 scanned = 1;
2364                 for (i = 0; i < nr_pages; i++) {
2365                         struct page *page = pvec.pages[i];
2366
2367                         /*
2368                          * At this point we hold neither mapping->tree_lock nor
2369                          * lock on the page itself: the page may be truncated or
2370                          * invalidated (changing page->mapping to NULL), or even
2371                          * swizzled back from swapper_space to tmpfs file
2372                          * mapping
2373                          */
2374                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2375                                 tree->ops->write_cache_pages_lock_hook(page);
2376                         else
2377                                 lock_page(page);
2378
2379                         if (unlikely(page->mapping != mapping)) {
2380                                 unlock_page(page);
2381                                 continue;
2382                         }
2383
2384                         if (!wbc->range_cyclic && page->index > end) {
2385                                 done = 1;
2386                                 unlock_page(page);
2387                                 continue;
2388                         }
2389
2390                         if (wbc->sync_mode != WB_SYNC_NONE)
2391                                 wait_on_page_writeback(page);
2392
2393                         if (PageWriteback(page) ||
2394                             !clear_page_dirty_for_io(page)) {
2395                                 unlock_page(page);
2396                                 continue;
2397                         }
2398
2399                         ret = (*writepage)(page, wbc, data);
2400
2401                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2402                                 unlock_page(page);
2403                                 ret = 0;
2404                         }
2405                         if (ret || (--(wbc->nr_to_write) <= 0))
2406                                 done = 1;
2407                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2408                                 wbc->encountered_congestion = 1;
2409                                 done = 1;
2410                         }
2411                 }
2412                 pagevec_release(&pvec);
2413                 cond_resched();
2414         }
2415         if (!scanned && !done) {
2416                 /*
2417                  * We hit the last page and there is more work to be done: wrap
2418                  * back to the start of the file
2419                  */
2420                 scanned = 1;
2421                 index = 0;
2422                 goto retry;
2423         }
2424         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2425                 mapping->writeback_index = index;
2426
2427         if (wbc->range_cont)
2428                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2429         return ret;
2430 }
2431 EXPORT_SYMBOL(extent_write_cache_pages);
2432
2433 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2434                           get_extent_t *get_extent,
2435                           struct writeback_control *wbc)
2436 {
2437         int ret;
2438         struct address_space *mapping = page->mapping;
2439         struct extent_page_data epd = {
2440                 .bio = NULL,
2441                 .tree = tree,
2442                 .get_extent = get_extent,
2443         };
2444         struct writeback_control wbc_writepages = {
2445                 .bdi            = wbc->bdi,
2446                 .sync_mode      = WB_SYNC_NONE,
2447                 .older_than_this = NULL,
2448                 .nr_to_write    = 64,
2449                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2450                 .range_end      = (loff_t)-1,
2451         };
2452
2453
2454         ret = __extent_writepage(page, wbc, &epd);
2455
2456         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2457                                  __extent_writepage, &epd);
2458         if (epd.bio) {
2459                 submit_one_bio(WRITE, epd.bio, 0, 0);
2460         }
2461         return ret;
2462 }
2463 EXPORT_SYMBOL(extent_write_full_page);
2464
2465
2466 int extent_writepages(struct extent_io_tree *tree,
2467                       struct address_space *mapping,
2468                       get_extent_t *get_extent,
2469                       struct writeback_control *wbc)
2470 {
2471         int ret = 0;
2472         struct extent_page_data epd = {
2473                 .bio = NULL,
2474                 .tree = tree,
2475                 .get_extent = get_extent,
2476         };
2477
2478         ret = extent_write_cache_pages(tree, mapping, wbc,
2479                                        __extent_writepage, &epd);
2480         if (epd.bio) {
2481                 submit_one_bio(WRITE, epd.bio, 0, 0);
2482         }
2483         return ret;
2484 }
2485 EXPORT_SYMBOL(extent_writepages);
2486
2487 int extent_readpages(struct extent_io_tree *tree,
2488                      struct address_space *mapping,
2489                      struct list_head *pages, unsigned nr_pages,
2490                      get_extent_t get_extent)
2491 {
2492         struct bio *bio = NULL;
2493         unsigned page_idx;
2494         struct pagevec pvec;
2495         unsigned long bio_flags = 0;
2496
2497         pagevec_init(&pvec, 0);
2498         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2499                 struct page *page = list_entry(pages->prev, struct page, lru);
2500
2501                 prefetchw(&page->flags);
2502                 list_del(&page->lru);
2503                 /*
2504                  * what we want to do here is call add_to_page_cache_lru,
2505                  * but that isn't exported, so we reproduce it here
2506                  */
2507                 if (!add_to_page_cache(page, mapping,
2508                                         page->index, GFP_KERNEL)) {
2509
2510                         /* open coding of lru_cache_add, also not exported */
2511                         page_cache_get(page);
2512                         if (!pagevec_add(&pvec, page))
2513                                 __pagevec_lru_add(&pvec);
2514                         __extent_read_full_page(tree, page, get_extent,
2515                                                 &bio, 0, &bio_flags);
2516                 }
2517                 page_cache_release(page);
2518         }
2519         if (pagevec_count(&pvec))
2520                 __pagevec_lru_add(&pvec);
2521         BUG_ON(!list_empty(pages));
2522         if (bio)
2523                 submit_one_bio(READ, bio, 0, bio_flags);
2524         return 0;
2525 }
2526 EXPORT_SYMBOL(extent_readpages);
2527
2528 /*
2529  * basic invalidatepage code, this waits on any locked or writeback
2530  * ranges corresponding to the page, and then deletes any extent state
2531  * records from the tree
2532  */
2533 int extent_invalidatepage(struct extent_io_tree *tree,
2534                           struct page *page, unsigned long offset)
2535 {
2536         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2537         u64 end = start + PAGE_CACHE_SIZE - 1;
2538         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2539
2540         start += (offset + blocksize -1) & ~(blocksize - 1);
2541         if (start > end)
2542                 return 0;
2543
2544         lock_extent(tree, start, end, GFP_NOFS);
2545         wait_on_extent_writeback(tree, start, end);
2546         clear_extent_bit(tree, start, end,
2547                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2548                          1, 1, GFP_NOFS);
2549         return 0;
2550 }
2551 EXPORT_SYMBOL(extent_invalidatepage);
2552
2553 /*
2554  * simple commit_write call, set_range_dirty is used to mark both
2555  * the pages and the extent records as dirty
2556  */
2557 int extent_commit_write(struct extent_io_tree *tree,
2558                         struct inode *inode, struct page *page,
2559                         unsigned from, unsigned to)
2560 {
2561         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2562
2563         set_page_extent_mapped(page);
2564         set_page_dirty(page);
2565
2566         if (pos > inode->i_size) {
2567                 i_size_write(inode, pos);
2568                 mark_inode_dirty(inode);
2569         }
2570         return 0;
2571 }
2572 EXPORT_SYMBOL(extent_commit_write);
2573
2574 int extent_prepare_write(struct extent_io_tree *tree,
2575                          struct inode *inode, struct page *page,
2576                          unsigned from, unsigned to, get_extent_t *get_extent)
2577 {
2578         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2579         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2580         u64 block_start;
2581         u64 orig_block_start;
2582         u64 block_end;
2583         u64 cur_end;
2584         struct extent_map *em;
2585         unsigned blocksize = 1 << inode->i_blkbits;
2586         size_t page_offset = 0;
2587         size_t block_off_start;
2588         size_t block_off_end;
2589         int err = 0;
2590         int iocount = 0;
2591         int ret = 0;
2592         int isnew;
2593
2594         set_page_extent_mapped(page);
2595
2596         block_start = (page_start + from) & ~((u64)blocksize - 1);
2597         block_end = (page_start + to - 1) | (blocksize - 1);
2598         orig_block_start = block_start;
2599
2600         lock_extent(tree, page_start, page_end, GFP_NOFS);
2601         while(block_start <= block_end) {
2602                 em = get_extent(inode, page, page_offset, block_start,
2603                                 block_end - block_start + 1, 1);
2604                 if (IS_ERR(em) || !em) {
2605                         goto err;
2606                 }
2607                 cur_end = min(block_end, extent_map_end(em) - 1);
2608                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2609                 block_off_end = block_off_start + blocksize;
2610                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2611
2612                 if (!PageUptodate(page) && isnew &&
2613                     (block_off_end > to || block_off_start < from)) {
2614                         void *kaddr;
2615
2616                         kaddr = kmap_atomic(page, KM_USER0);
2617                         if (block_off_end > to)
2618                                 memset(kaddr + to, 0, block_off_end - to);
2619                         if (block_off_start < from)
2620                                 memset(kaddr + block_off_start, 0,
2621                                        from - block_off_start);
2622                         flush_dcache_page(page);
2623                         kunmap_atomic(kaddr, KM_USER0);
2624                 }
2625                 if ((em->block_start != EXTENT_MAP_HOLE &&
2626                      em->block_start != EXTENT_MAP_INLINE) &&
2627                     !isnew && !PageUptodate(page) &&
2628                     (block_off_end > to || block_off_start < from) &&
2629                     !test_range_bit(tree, block_start, cur_end,
2630                                     EXTENT_UPTODATE, 1)) {
2631                         u64 sector;
2632                         u64 extent_offset = block_start - em->start;
2633                         size_t iosize;
2634                         sector = (em->block_start + extent_offset) >> 9;
2635                         iosize = (cur_end - block_start + blocksize) &
2636                                 ~((u64)blocksize - 1);
2637                         /*
2638                          * we've already got the extent locked, but we
2639                          * need to split the state such that our end_bio
2640                          * handler can clear the lock.
2641                          */
2642                         set_extent_bit(tree, block_start,
2643                                        block_start + iosize - 1,
2644                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2645                         ret = submit_extent_page(READ, tree, page,
2646                                          sector, iosize, page_offset, em->bdev,
2647                                          NULL, 1,
2648                                          end_bio_extent_preparewrite, 0,
2649                                          0, 0);
2650                         iocount++;
2651                         block_start = block_start + iosize;
2652                 } else {
2653                         set_extent_uptodate(tree, block_start, cur_end,
2654                                             GFP_NOFS);
2655                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2656                         block_start = cur_end + 1;
2657                 }
2658                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2659                 free_extent_map(em);
2660         }
2661         if (iocount) {
2662                 wait_extent_bit(tree, orig_block_start,
2663                                 block_end, EXTENT_LOCKED);
2664         }
2665         check_page_uptodate(tree, page);
2666 err:
2667         /* FIXME, zero out newly allocated blocks on error */
2668         return err;
2669 }
2670 EXPORT_SYMBOL(extent_prepare_write);
2671
2672 /*
2673  * a helper for releasepage, this tests for areas of the page that
2674  * are locked or under IO and drops the related state bits if it is safe
2675  * to drop the page.
2676  */
2677 int try_release_extent_state(struct extent_map_tree *map,
2678                              struct extent_io_tree *tree, struct page *page,
2679                              gfp_t mask)
2680 {
2681         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2682         u64 end = start + PAGE_CACHE_SIZE - 1;
2683         int ret = 1;
2684
2685         if (test_range_bit(tree, start, end,
2686                            EXTENT_IOBITS | EXTENT_ORDERED, 0))
2687                 ret = 0;
2688         else {
2689                 if ((mask & GFP_NOFS) == GFP_NOFS)
2690                         mask = GFP_NOFS;
2691                 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2692                                  1, 1, mask);
2693         }
2694         return ret;
2695 }
2696 EXPORT_SYMBOL(try_release_extent_state);
2697
2698 /*
2699  * a helper for releasepage.  As long as there are no locked extents
2700  * in the range corresponding to the page, both state records and extent
2701  * map records are removed
2702  */
2703 int try_release_extent_mapping(struct extent_map_tree *map,
2704                                struct extent_io_tree *tree, struct page *page,
2705                                gfp_t mask)
2706 {
2707         struct extent_map *em;
2708         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2709         u64 end = start + PAGE_CACHE_SIZE - 1;
2710
2711         if ((mask & __GFP_WAIT) &&
2712             page->mapping->host->i_size > 16 * 1024 * 1024) {
2713                 u64 len;
2714                 while (start <= end) {
2715                         len = end - start + 1;
2716                         spin_lock(&map->lock);
2717                         em = lookup_extent_mapping(map, start, len);
2718                         if (!em || IS_ERR(em)) {
2719                                 spin_unlock(&map->lock);
2720                                 break;
2721                         }
2722                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2723                             em->start != start) {
2724                                 spin_unlock(&map->lock);
2725                                 free_extent_map(em);
2726                                 break;
2727                         }
2728                         if (!test_range_bit(tree, em->start,
2729                                             extent_map_end(em) - 1,
2730                                             EXTENT_LOCKED | EXTENT_WRITEBACK |
2731                                             EXTENT_ORDERED,
2732                                             0)) {
2733                                 remove_extent_mapping(map, em);
2734                                 /* once for the rb tree */
2735                                 free_extent_map(em);
2736                         }
2737                         start = extent_map_end(em);
2738                         spin_unlock(&map->lock);
2739
2740                         /* once for us */
2741                         free_extent_map(em);
2742                 }
2743         }
2744         return try_release_extent_state(map, tree, page, mask);
2745 }
2746 EXPORT_SYMBOL(try_release_extent_mapping);
2747
2748 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2749                 get_extent_t *get_extent)
2750 {
2751         struct inode *inode = mapping->host;
2752         u64 start = iblock << inode->i_blkbits;
2753         sector_t sector = 0;
2754         struct extent_map *em;
2755
2756         em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2757         if (!em || IS_ERR(em))
2758                 return 0;
2759
2760         if (em->block_start == EXTENT_MAP_INLINE ||
2761             em->block_start == EXTENT_MAP_HOLE)
2762                 goto out;
2763
2764         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2765 out:
2766         free_extent_map(em);
2767         return sector;
2768 }
2769
2770 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2771                                               unsigned long i)
2772 {
2773         struct page *p;
2774         struct address_space *mapping;
2775
2776         if (i == 0)
2777                 return eb->first_page;
2778         i += eb->start >> PAGE_CACHE_SHIFT;
2779         mapping = eb->first_page->mapping;
2780         if (!mapping)
2781                 return NULL;
2782
2783         /*
2784          * extent_buffer_page is only called after pinning the page
2785          * by increasing the reference count.  So we know the page must
2786          * be in the radix tree.
2787          */
2788         rcu_read_lock();
2789         p = radix_tree_lookup(&mapping->page_tree, i);
2790         rcu_read_unlock();
2791
2792         return p;
2793 }
2794
2795 static inline unsigned long num_extent_pages(u64 start, u64 len)
2796 {
2797         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2798                 (start >> PAGE_CACHE_SHIFT);
2799 }
2800
2801 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2802                                                    u64 start,
2803                                                    unsigned long len,
2804                                                    gfp_t mask)
2805 {
2806         struct extent_buffer *eb = NULL;
2807 #ifdef LEAK_DEBUG
2808         unsigned long flags;
2809 #endif
2810
2811         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2812         eb->start = start;
2813         eb->len = len;
2814         mutex_init(&eb->mutex);
2815 #ifdef LEAK_DEBUG
2816         spin_lock_irqsave(&leak_lock, flags);
2817         list_add(&eb->leak_list, &buffers);
2818         spin_unlock_irqrestore(&leak_lock, flags);
2819 #endif
2820         atomic_set(&eb->refs, 1);
2821
2822         return eb;
2823 }
2824
2825 static void __free_extent_buffer(struct extent_buffer *eb)
2826 {
2827 #ifdef LEAK_DEBUG
2828         unsigned long flags;
2829         spin_lock_irqsave(&leak_lock, flags);
2830         list_del(&eb->leak_list);
2831         spin_unlock_irqrestore(&leak_lock, flags);
2832 #endif
2833         kmem_cache_free(extent_buffer_cache, eb);
2834 }
2835
2836 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2837                                           u64 start, unsigned long len,
2838                                           struct page *page0,
2839                                           gfp_t mask)
2840 {
2841         unsigned long num_pages = num_extent_pages(start, len);
2842         unsigned long i;
2843         unsigned long index = start >> PAGE_CACHE_SHIFT;
2844         struct extent_buffer *eb;
2845         struct extent_buffer *exists = NULL;
2846         struct page *p;
2847         struct address_space *mapping = tree->mapping;
2848         int uptodate = 1;
2849
2850         spin_lock(&tree->buffer_lock);
2851         eb = buffer_search(tree, start);
2852         if (eb) {
2853                 atomic_inc(&eb->refs);
2854                 spin_unlock(&tree->buffer_lock);
2855                 mark_page_accessed(eb->first_page);
2856                 return eb;
2857         }
2858         spin_unlock(&tree->buffer_lock);
2859
2860         eb = __alloc_extent_buffer(tree, start, len, mask);
2861         if (!eb)
2862                 return NULL;
2863
2864         if (page0) {
2865                 eb->first_page = page0;
2866                 i = 1;
2867                 index++;
2868                 page_cache_get(page0);
2869                 mark_page_accessed(page0);
2870                 set_page_extent_mapped(page0);
2871                 set_page_extent_head(page0, len);
2872                 uptodate = PageUptodate(page0);
2873         } else {
2874                 i = 0;
2875         }
2876         for (; i < num_pages; i++, index++) {
2877                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2878                 if (!p) {
2879                         WARN_ON(1);
2880                         goto free_eb;
2881                 }
2882                 set_page_extent_mapped(p);
2883                 mark_page_accessed(p);
2884                 if (i == 0) {
2885                         eb->first_page = p;
2886                         set_page_extent_head(p, len);
2887                 } else {
2888                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2889                 }
2890                 if (!PageUptodate(p))
2891                         uptodate = 0;
2892                 unlock_page(p);
2893         }
2894         if (uptodate)
2895                 eb->flags |= EXTENT_UPTODATE;
2896         eb->flags |= EXTENT_BUFFER_FILLED;
2897
2898         spin_lock(&tree->buffer_lock);
2899         exists = buffer_tree_insert(tree, start, &eb->rb_node);
2900         if (exists) {
2901                 /* add one reference for the caller */
2902                 atomic_inc(&exists->refs);
2903                 spin_unlock(&tree->buffer_lock);
2904                 goto free_eb;
2905         }
2906         spin_unlock(&tree->buffer_lock);
2907
2908         /* add one reference for the tree */
2909         atomic_inc(&eb->refs);
2910         return eb;
2911
2912 free_eb:
2913         if (!atomic_dec_and_test(&eb->refs))
2914                 return exists;
2915         for (index = 1; index < i; index++)
2916                 page_cache_release(extent_buffer_page(eb, index));
2917         page_cache_release(extent_buffer_page(eb, 0));
2918         __free_extent_buffer(eb);
2919         return exists;
2920 }
2921 EXPORT_SYMBOL(alloc_extent_buffer);
2922
2923 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2924                                          u64 start, unsigned long len,
2925                                           gfp_t mask)
2926 {
2927         struct extent_buffer *eb;
2928
2929         spin_lock(&tree->buffer_lock);
2930         eb = buffer_search(tree, start);
2931         if (eb)
2932                 atomic_inc(&eb->refs);
2933         spin_unlock(&tree->buffer_lock);
2934
2935         if (eb)
2936                 mark_page_accessed(eb->first_page);
2937
2938         return eb;
2939 }
2940 EXPORT_SYMBOL(find_extent_buffer);
2941
2942 void free_extent_buffer(struct extent_buffer *eb)
2943 {
2944         if (!eb)
2945                 return;
2946
2947         if (!atomic_dec_and_test(&eb->refs))
2948                 return;
2949
2950         WARN_ON(1);
2951 }
2952 EXPORT_SYMBOL(free_extent_buffer);
2953
2954 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2955                               struct extent_buffer *eb)
2956 {
2957         int set;
2958         unsigned long i;
2959         unsigned long num_pages;
2960         struct page *page;
2961
2962         u64 start = eb->start;
2963         u64 end = start + eb->len - 1;
2964
2965         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2966         num_pages = num_extent_pages(eb->start, eb->len);
2967
2968         for (i = 0; i < num_pages; i++) {
2969                 page = extent_buffer_page(eb, i);
2970                 lock_page(page);
2971                 if (i == 0)
2972                         set_page_extent_head(page, eb->len);
2973                 else
2974                         set_page_private(page, EXTENT_PAGE_PRIVATE);
2975
2976                 /*
2977                  * if we're on the last page or the first page and the
2978                  * block isn't aligned on a page boundary, do extra checks
2979                  * to make sure we don't clean page that is partially dirty
2980                  */
2981                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2982                     ((i == num_pages - 1) &&
2983                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2984                         start = (u64)page->index << PAGE_CACHE_SHIFT;
2985                         end  = start + PAGE_CACHE_SIZE - 1;
2986                         if (test_range_bit(tree, start, end,
2987                                            EXTENT_DIRTY, 0)) {
2988                                 unlock_page(page);
2989                                 continue;
2990                         }
2991                 }
2992                 clear_page_dirty_for_io(page);
2993                 spin_lock_irq(&page->mapping->tree_lock);
2994                 if (!PageDirty(page)) {
2995                         radix_tree_tag_clear(&page->mapping->page_tree,
2996                                                 page_index(page),
2997                                                 PAGECACHE_TAG_DIRTY);
2998                 }
2999                 spin_unlock_irq(&page->mapping->tree_lock);
3000                 unlock_page(page);
3001         }
3002         return 0;
3003 }
3004 EXPORT_SYMBOL(clear_extent_buffer_dirty);
3005
3006 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3007                                     struct extent_buffer *eb)
3008 {
3009         return wait_on_extent_writeback(tree, eb->start,
3010                                         eb->start + eb->len - 1);
3011 }
3012 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3013
3014 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3015                              struct extent_buffer *eb)
3016 {
3017         unsigned long i;
3018         unsigned long num_pages;
3019
3020         num_pages = num_extent_pages(eb->start, eb->len);
3021         for (i = 0; i < num_pages; i++) {
3022                 struct page *page = extent_buffer_page(eb, i);
3023                 /* writepage may need to do something special for the
3024                  * first page, we have to make sure page->private is
3025                  * properly set.  releasepage may drop page->private
3026                  * on us if the page isn't already dirty.
3027                  */
3028                 lock_page(page);
3029                 if (i == 0) {
3030                         set_page_extent_head(page, eb->len);
3031                 } else if (PagePrivate(page) &&
3032                            page->private != EXTENT_PAGE_PRIVATE) {
3033                         set_page_extent_mapped(page);
3034                 }
3035                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3036                 set_extent_dirty(tree, page_offset(page),
3037                                  page_offset(page) + PAGE_CACHE_SIZE -1,
3038                                  GFP_NOFS);
3039                 unlock_page(page);
3040         }
3041         return 0;
3042 }
3043 EXPORT_SYMBOL(set_extent_buffer_dirty);
3044
3045 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3046                                 struct extent_buffer *eb)
3047 {
3048         unsigned long i;
3049         struct page *page;
3050         unsigned long num_pages;
3051
3052         num_pages = num_extent_pages(eb->start, eb->len);
3053         eb->flags &= ~EXTENT_UPTODATE;
3054
3055         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3056                               GFP_NOFS);
3057         for (i = 0; i < num_pages; i++) {
3058                 page = extent_buffer_page(eb, i);
3059                 if (page)
3060                         ClearPageUptodate(page);
3061         }
3062         return 0;
3063 }
3064
3065 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3066                                 struct extent_buffer *eb)
3067 {
3068         unsigned long i;
3069         struct page *page;
3070         unsigned long num_pages;
3071
3072         num_pages = num_extent_pages(eb->start, eb->len);
3073
3074         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3075                             GFP_NOFS);
3076         for (i = 0; i < num_pages; i++) {
3077                 page = extent_buffer_page(eb, i);
3078                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3079                     ((i == num_pages - 1) &&
3080                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3081                         check_page_uptodate(tree, page);
3082                         continue;
3083                 }
3084                 SetPageUptodate(page);
3085         }
3086         return 0;
3087 }
3088 EXPORT_SYMBOL(set_extent_buffer_uptodate);
3089
3090 int extent_range_uptodate(struct extent_io_tree *tree,
3091                           u64 start, u64 end)
3092 {
3093         struct page *page;
3094         int ret;
3095         int pg_uptodate = 1;
3096         int uptodate;
3097         unsigned long index;
3098
3099         ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3100         if (ret)
3101                 return 1;
3102         while(start <= end) {
3103                 index = start >> PAGE_CACHE_SHIFT;
3104                 page = find_get_page(tree->mapping, index);
3105                 uptodate = PageUptodate(page);
3106                 page_cache_release(page);
3107                 if (!uptodate) {
3108                         pg_uptodate = 0;
3109                         break;
3110                 }
3111                 start += PAGE_CACHE_SIZE;
3112         }
3113         return pg_uptodate;
3114 }
3115
3116 int extent_buffer_uptodate(struct extent_io_tree *tree,
3117                            struct extent_buffer *eb)
3118 {
3119         int ret = 0;
3120         unsigned long num_pages;
3121         unsigned long i;
3122         struct page *page;
3123         int pg_uptodate = 1;
3124
3125         if (eb->flags & EXTENT_UPTODATE)
3126                 return 1;
3127
3128         ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3129                            EXTENT_UPTODATE, 1);
3130         if (ret)
3131                 return ret;
3132
3133         num_pages = num_extent_pages(eb->start, eb->len);
3134         for (i = 0; i < num_pages; i++) {
3135                 page = extent_buffer_page(eb, i);
3136                 if (!PageUptodate(page)) {
3137                         pg_uptodate = 0;
3138                         break;
3139                 }
3140         }
3141         return pg_uptodate;
3142 }
3143 EXPORT_SYMBOL(extent_buffer_uptodate);
3144
3145 int read_extent_buffer_pages(struct extent_io_tree *tree,
3146                              struct extent_buffer *eb,
3147                              u64 start, int wait,
3148                              get_extent_t *get_extent, int mirror_num)
3149 {
3150         unsigned long i;
3151         unsigned long start_i;
3152         struct page *page;
3153         int err;
3154         int ret = 0;
3155         int locked_pages = 0;
3156         int all_uptodate = 1;
3157         int inc_all_pages = 0;
3158         unsigned long num_pages;
3159         struct bio *bio = NULL;
3160         unsigned long bio_flags = 0;
3161
3162         if (eb->flags & EXTENT_UPTODATE)
3163                 return 0;
3164
3165         if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3166                            EXTENT_UPTODATE, 1)) {
3167                 return 0;
3168         }
3169
3170         if (start) {
3171                 WARN_ON(start < eb->start);
3172                 start_i = (start >> PAGE_CACHE_SHIFT) -
3173                         (eb->start >> PAGE_CACHE_SHIFT);
3174         } else {
3175                 start_i = 0;
3176         }
3177
3178         num_pages = num_extent_pages(eb->start, eb->len);
3179         for (i = start_i; i < num_pages; i++) {
3180                 page = extent_buffer_page(eb, i);
3181                 if (!wait) {
3182                         if (!trylock_page(page))
3183                                 goto unlock_exit;
3184                 } else {
3185                         lock_page(page);
3186                 }
3187                 locked_pages++;
3188                 if (!PageUptodate(page)) {
3189                         all_uptodate = 0;
3190                 }
3191         }
3192         if (all_uptodate) {
3193                 if (start_i == 0)
3194                         eb->flags |= EXTENT_UPTODATE;
3195                 if (ret) {
3196                         printk("all up to date but ret is %d\n", ret);
3197                 }
3198                 goto unlock_exit;
3199         }
3200
3201         for (i = start_i; i < num_pages; i++) {
3202                 page = extent_buffer_page(eb, i);
3203                 if (inc_all_pages)
3204                         page_cache_get(page);
3205                 if (!PageUptodate(page)) {
3206                         if (start_i == 0)
3207                                 inc_all_pages = 1;
3208                         ClearPageError(page);
3209                         err = __extent_read_full_page(tree, page,
3210                                                       get_extent, &bio,
3211                                                       mirror_num, &bio_flags);
3212                         if (err) {
3213                                 ret = err;
3214                                 printk("err %d from __extent_read_full_page\n", ret);
3215                         }
3216                 } else {
3217                         unlock_page(page);
3218                 }
3219         }
3220
3221         if (bio)
3222                 submit_one_bio(READ, bio, mirror_num, bio_flags);
3223
3224         if (ret || !wait) {
3225                 if (ret)
3226                         printk("ret %d wait %d returning\n", ret, wait);
3227                 return ret;
3228         }
3229         for (i = start_i; i < num_pages; i++) {
3230                 page = extent_buffer_page(eb, i);
3231                 wait_on_page_locked(page);
3232                 if (!PageUptodate(page)) {
3233                         printk("page not uptodate after wait_on_page_locked\n");
3234                         ret = -EIO;
3235                 }
3236         }
3237         if (!ret)
3238                 eb->flags |= EXTENT_UPTODATE;
3239         return ret;
3240
3241 unlock_exit:
3242         i = start_i;
3243         while(locked_pages > 0) {
3244                 page = extent_buffer_page(eb, i);
3245                 i++;
3246                 unlock_page(page);
3247                 locked_pages--;
3248         }
3249         return ret;
3250 }
3251 EXPORT_SYMBOL(read_extent_buffer_pages);
3252
3253 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3254                         unsigned long start,
3255                         unsigned long len)
3256 {
3257         size_t cur;
3258         size_t offset;
3259         struct page *page;
3260         char *kaddr;
3261         char *dst = (char *)dstv;
3262         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3263         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3264
3265         WARN_ON(start > eb->len);
3266         WARN_ON(start + len > eb->start + eb->len);
3267
3268         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3269
3270         while(len > 0) {
3271                 page = extent_buffer_page(eb, i);
3272
3273                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3274                 kaddr = kmap_atomic(page, KM_USER1);
3275                 memcpy(dst, kaddr + offset, cur);
3276                 kunmap_atomic(kaddr, KM_USER1);
3277
3278                 dst += cur;
3279                 len -= cur;
3280                 offset = 0;
3281                 i++;
3282         }
3283 }
3284 EXPORT_SYMBOL(read_extent_buffer);
3285
3286 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3287                                unsigned long min_len, char **token, char **map,
3288                                unsigned long *map_start,
3289                                unsigned long *map_len, int km)
3290 {
3291         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3292         char *kaddr;
3293         struct page *p;
3294         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3295         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3296         unsigned long end_i = (start_offset + start + min_len - 1) >>
3297                 PAGE_CACHE_SHIFT;
3298
3299         if (i != end_i)
3300                 return -EINVAL;
3301
3302         if (i == 0) {
3303                 offset = start_offset;
3304                 *map_start = 0;
3305         } else {
3306                 offset = 0;
3307                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3308         }
3309         if (start + min_len > eb->len) {
3310 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3311                 WARN_ON(1);
3312         }
3313
3314         p = extent_buffer_page(eb, i);
3315         kaddr = kmap_atomic(p, km);
3316         *token = kaddr;
3317         *map = kaddr + offset;
3318         *map_len = PAGE_CACHE_SIZE - offset;
3319         return 0;
3320 }
3321 EXPORT_SYMBOL(map_private_extent_buffer);
3322
3323 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3324                       unsigned long min_len,
3325                       char **token, char **map,
3326                       unsigned long *map_start,
3327                       unsigned long *map_len, int km)
3328 {
3329         int err;
3330         int save = 0;
3331         if (eb->map_token) {
3332                 unmap_extent_buffer(eb, eb->map_token, km);
3333                 eb->map_token = NULL;
3334                 save = 1;
3335         }
3336         err = map_private_extent_buffer(eb, start, min_len, token, map,
3337                                        map_start, map_len, km);
3338         if (!err && save) {
3339                 eb->map_token = *token;
3340                 eb->kaddr = *map;
3341                 eb->map_start = *map_start;
3342                 eb->map_len = *map_len;
3343         }
3344         return err;
3345 }
3346 EXPORT_SYMBOL(map_extent_buffer);
3347
3348 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3349 {
3350         kunmap_atomic(token, km);
3351 }
3352 EXPORT_SYMBOL(unmap_extent_buffer);
3353
3354 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3355                           unsigned long start,
3356                           unsigned long len)
3357 {
3358         size_t cur;
3359         size_t offset;
3360         struct page *page;
3361         char *kaddr;
3362         char *ptr = (char *)ptrv;
3363         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3364         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3365         int ret = 0;
3366
3367         WARN_ON(start > eb->len);
3368         WARN_ON(start + len > eb->start + eb->len);
3369
3370         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3371
3372         while(len > 0) {
3373                 page = extent_buffer_page(eb, i);
3374
3375                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3376
3377                 kaddr = kmap_atomic(page, KM_USER0);
3378                 ret = memcmp(ptr, kaddr + offset, cur);
3379                 kunmap_atomic(kaddr, KM_USER0);
3380                 if (ret)
3381                         break;
3382
3383                 ptr += cur;
3384                 len -= cur;
3385                 offset = 0;
3386                 i++;
3387         }
3388         return ret;
3389 }
3390 EXPORT_SYMBOL(memcmp_extent_buffer);
3391
3392 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3393                          unsigned long start, unsigned long len)
3394 {
3395         size_t cur;
3396         size_t offset;
3397         struct page *page;
3398         char *kaddr;
3399         char *src = (char *)srcv;
3400         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3401         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3402
3403         WARN_ON(start > eb->len);
3404         WARN_ON(start + len > eb->start + eb->len);
3405
3406         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3407
3408         while(len > 0) {
3409                 page = extent_buffer_page(eb, i);
3410                 WARN_ON(!PageUptodate(page));
3411
3412                 cur = min(len, PAGE_CACHE_SIZE - offset);
3413                 kaddr = kmap_atomic(page, KM_USER1);
3414                 memcpy(kaddr + offset, src, cur);
3415                 kunmap_atomic(kaddr, KM_USER1);
3416
3417                 src += cur;
3418                 len -= cur;
3419                 offset = 0;
3420                 i++;
3421         }
3422 }
3423 EXPORT_SYMBOL(write_extent_buffer);
3424
3425 void memset_extent_buffer(struct extent_buffer *eb, char c,
3426                           unsigned long start, unsigned long len)
3427 {
3428         size_t cur;
3429         size_t offset;
3430         struct page *page;
3431         char *kaddr;
3432         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3433         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3434
3435         WARN_ON(start > eb->len);
3436         WARN_ON(start + len > eb->start + eb->len);
3437
3438         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3439
3440         while(len > 0) {
3441                 page = extent_buffer_page(eb, i);
3442                 WARN_ON(!PageUptodate(page));
3443
3444                 cur = min(len, PAGE_CACHE_SIZE - offset);
3445                 kaddr = kmap_atomic(page, KM_USER0);
3446                 memset(kaddr + offset, c, cur);
3447                 kunmap_atomic(kaddr, KM_USER0);
3448
3449                 len -= cur;
3450                 offset = 0;
3451                 i++;
3452         }
3453 }
3454 EXPORT_SYMBOL(memset_extent_buffer);
3455
3456 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3457                         unsigned long dst_offset, unsigned long src_offset,
3458                         unsigned long len)
3459 {
3460         u64 dst_len = dst->len;
3461         size_t cur;
3462         size_t offset;
3463         struct page *page;
3464         char *kaddr;
3465         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3466         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3467
3468         WARN_ON(src->len != dst_len);
3469
3470         offset = (start_offset + dst_offset) &
3471                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3472
3473         while(len > 0) {
3474                 page = extent_buffer_page(dst, i);
3475                 WARN_ON(!PageUptodate(page));
3476
3477                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3478
3479                 kaddr = kmap_atomic(page, KM_USER0);
3480                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3481                 kunmap_atomic(kaddr, KM_USER0);
3482
3483                 src_offset += cur;
3484                 len -= cur;
3485                 offset = 0;
3486                 i++;
3487         }
3488 }
3489 EXPORT_SYMBOL(copy_extent_buffer);
3490
3491 static void move_pages(struct page *dst_page, struct page *src_page,
3492                        unsigned long dst_off, unsigned long src_off,
3493                        unsigned long len)
3494 {
3495         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3496         if (dst_page == src_page) {
3497                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3498         } else {
3499                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3500                 char *p = dst_kaddr + dst_off + len;
3501                 char *s = src_kaddr + src_off + len;
3502
3503                 while (len--)
3504                         *--p = *--s;
3505
3506                 kunmap_atomic(src_kaddr, KM_USER1);
3507         }
3508         kunmap_atomic(dst_kaddr, KM_USER0);
3509 }
3510
3511 static void copy_pages(struct page *dst_page, struct page *src_page,
3512                        unsigned long dst_off, unsigned long src_off,
3513                        unsigned long len)
3514 {
3515         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3516         char *src_kaddr;
3517
3518         if (dst_page != src_page)
3519                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3520         else
3521                 src_kaddr = dst_kaddr;
3522
3523         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3524         kunmap_atomic(dst_kaddr, KM_USER0);
3525         if (dst_page != src_page)
3526                 kunmap_atomic(src_kaddr, KM_USER1);
3527 }
3528
3529 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3530                            unsigned long src_offset, unsigned long len)
3531 {
3532         size_t cur;
3533         size_t dst_off_in_page;
3534         size_t src_off_in_page;
3535         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3536         unsigned long dst_i;
3537         unsigned long src_i;
3538
3539         if (src_offset + len > dst->len) {
3540                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3541                        src_offset, len, dst->len);
3542                 BUG_ON(1);
3543         }
3544         if (dst_offset + len > dst->len) {
3545                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3546                        dst_offset, len, dst->len);
3547                 BUG_ON(1);
3548         }
3549
3550         while(len > 0) {
3551                 dst_off_in_page = (start_offset + dst_offset) &
3552                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3553                 src_off_in_page = (start_offset + src_offset) &
3554                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3555
3556                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3557                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3558
3559                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3560                                                src_off_in_page));
3561                 cur = min_t(unsigned long, cur,
3562                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3563
3564                 copy_pages(extent_buffer_page(dst, dst_i),
3565                            extent_buffer_page(dst, src_i),
3566                            dst_off_in_page, src_off_in_page, cur);
3567
3568                 src_offset += cur;
3569                 dst_offset += cur;
3570                 len -= cur;
3571         }
3572 }
3573 EXPORT_SYMBOL(memcpy_extent_buffer);
3574
3575 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3576                            unsigned long src_offset, unsigned long len)
3577 {
3578         size_t cur;
3579         size_t dst_off_in_page;
3580         size_t src_off_in_page;
3581         unsigned long dst_end = dst_offset + len - 1;
3582         unsigned long src_end = src_offset + len - 1;
3583         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3584         unsigned long dst_i;
3585         unsigned long src_i;
3586
3587         if (src_offset + len > dst->len) {
3588                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3589                        src_offset, len, dst->len);
3590                 BUG_ON(1);
3591         }
3592         if (dst_offset + len > dst->len) {
3593                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3594                        dst_offset, len, dst->len);
3595                 BUG_ON(1);
3596         }
3597         if (dst_offset < src_offset) {
3598                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3599                 return;
3600         }
3601         while(len > 0) {
3602                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3603                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3604
3605                 dst_off_in_page = (start_offset + dst_end) &
3606                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3607                 src_off_in_page = (start_offset + src_end) &
3608                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3609
3610                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3611                 cur = min(cur, dst_off_in_page + 1);
3612                 move_pages(extent_buffer_page(dst, dst_i),
3613                            extent_buffer_page(dst, src_i),
3614                            dst_off_in_page - cur + 1,
3615                            src_off_in_page - cur + 1, cur);
3616
3617                 dst_end -= cur;
3618                 src_end -= cur;
3619                 len -= cur;
3620         }
3621 }
3622 EXPORT_SYMBOL(memmove_extent_buffer);
3623
3624 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3625 {
3626         u64 start = page_offset(page);
3627         struct extent_buffer *eb;
3628         int ret = 1;
3629         unsigned long i;
3630         unsigned long num_pages;
3631
3632         spin_lock(&tree->buffer_lock);
3633         eb = buffer_search(tree, start);
3634         if (!eb)
3635                 goto out;
3636
3637         if (atomic_read(&eb->refs) > 1) {
3638                 ret = 0;
3639                 goto out;
3640         }
3641         /* at this point we can safely release the extent buffer */
3642         num_pages = num_extent_pages(eb->start, eb->len);
3643         for (i = 0; i < num_pages; i++)
3644                 page_cache_release(extent_buffer_page(eb, i));
3645         rb_erase(&eb->rb_node, &tree->buffer);
3646         __free_extent_buffer(eb);
3647 out:
3648         spin_unlock(&tree->buffer_lock);
3649         return ret;
3650 }
3651 EXPORT_SYMBOL(try_release_extent_buffer);