]> git.karo-electronics.de Git - linux-beck.git/blob - fs/btrfs/extent_io.c
1d8244d39b66dddd37df240f386ee3d4d9629251
[linux-beck.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/spinlock.h>
8 #include <linux/blkdev.h>
9 #include <linux/swap.h>
10 #include <linux/writeback.h>
11 #include <linux/pagevec.h>
12 #include <linux/prefetch.h>
13 #include <linux/cleancache.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
16 #include "ctree.h"
17 #include "btrfs_inode.h"
18 #include "volumes.h"
19 #include "check-integrity.h"
20 #include "locking.h"
21 #include "rcu-string.h"
22 #include "backref.h"
23
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 static struct bio_set *btrfs_bioset;
27
28 #ifdef CONFIG_BTRFS_DEBUG
29 static LIST_HEAD(buffers);
30 static LIST_HEAD(states);
31
32 static DEFINE_SPINLOCK(leak_lock);
33
34 static inline
35 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
36 {
37         unsigned long flags;
38
39         spin_lock_irqsave(&leak_lock, flags);
40         list_add(new, head);
41         spin_unlock_irqrestore(&leak_lock, flags);
42 }
43
44 static inline
45 void btrfs_leak_debug_del(struct list_head *entry)
46 {
47         unsigned long flags;
48
49         spin_lock_irqsave(&leak_lock, flags);
50         list_del(entry);
51         spin_unlock_irqrestore(&leak_lock, flags);
52 }
53
54 static inline
55 void btrfs_leak_debug_check(void)
56 {
57         struct extent_state *state;
58         struct extent_buffer *eb;
59
60         while (!list_empty(&states)) {
61                 state = list_entry(states.next, struct extent_state, leak_list);
62                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
63                        "state %lu in tree %p refs %d\n",
64                        state->start, state->end, state->state, state->tree,
65                        atomic_read(&state->refs));
66                 list_del(&state->leak_list);
67                 kmem_cache_free(extent_state_cache, state);
68         }
69
70         while (!list_empty(&buffers)) {
71                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
72                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
73                        "refs %d\n",
74                        eb->start, eb->len, atomic_read(&eb->refs));
75                 list_del(&eb->leak_list);
76                 kmem_cache_free(extent_buffer_cache, eb);
77         }
78 }
79
80 #define btrfs_debug_check_extent_io_range(tree, start, end)             \
81         __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
82 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
83                 struct extent_io_tree *tree, u64 start, u64 end)
84 {
85         struct inode *inode;
86         u64 isize;
87
88         if (!tree->mapping)
89                 return;
90
91         inode = tree->mapping->host;
92         isize = i_size_read(inode);
93         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
94                 printk_ratelimited(KERN_DEBUG
95                     "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
96                                 caller, btrfs_ino(inode), isize, start, end);
97         }
98 }
99 #else
100 #define btrfs_leak_debug_add(new, head) do {} while (0)
101 #define btrfs_leak_debug_del(entry)     do {} while (0)
102 #define btrfs_leak_debug_check()        do {} while (0)
103 #define btrfs_debug_check_extent_io_range(c, s, e)      do {} while (0)
104 #endif
105
106 #define BUFFER_LRU_MAX 64
107
108 struct tree_entry {
109         u64 start;
110         u64 end;
111         struct rb_node rb_node;
112 };
113
114 struct extent_page_data {
115         struct bio *bio;
116         struct extent_io_tree *tree;
117         get_extent_t *get_extent;
118         unsigned long bio_flags;
119
120         /* tells writepage not to lock the state bits for this range
121          * it still does the unlocking
122          */
123         unsigned int extent_locked:1;
124
125         /* tells the submit_bio code to use a WRITE_SYNC */
126         unsigned int sync_io:1;
127 };
128
129 static noinline void flush_write_bio(void *data);
130 static inline struct btrfs_fs_info *
131 tree_fs_info(struct extent_io_tree *tree)
132 {
133         if (!tree->mapping)
134                 return NULL;
135         return btrfs_sb(tree->mapping->host->i_sb);
136 }
137
138 int __init extent_io_init(void)
139 {
140         extent_state_cache = kmem_cache_create("btrfs_extent_state",
141                         sizeof(struct extent_state), 0,
142                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
143         if (!extent_state_cache)
144                 return -ENOMEM;
145
146         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
147                         sizeof(struct extent_buffer), 0,
148                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
149         if (!extent_buffer_cache)
150                 goto free_state_cache;
151
152         btrfs_bioset = bioset_create(BIO_POOL_SIZE,
153                                      offsetof(struct btrfs_io_bio, bio));
154         if (!btrfs_bioset)
155                 goto free_buffer_cache;
156
157         if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
158                 goto free_bioset;
159
160         return 0;
161
162 free_bioset:
163         bioset_free(btrfs_bioset);
164         btrfs_bioset = NULL;
165
166 free_buffer_cache:
167         kmem_cache_destroy(extent_buffer_cache);
168         extent_buffer_cache = NULL;
169
170 free_state_cache:
171         kmem_cache_destroy(extent_state_cache);
172         extent_state_cache = NULL;
173         return -ENOMEM;
174 }
175
176 void extent_io_exit(void)
177 {
178         btrfs_leak_debug_check();
179
180         /*
181          * Make sure all delayed rcu free are flushed before we
182          * destroy caches.
183          */
184         rcu_barrier();
185         if (extent_state_cache)
186                 kmem_cache_destroy(extent_state_cache);
187         if (extent_buffer_cache)
188                 kmem_cache_destroy(extent_buffer_cache);
189         if (btrfs_bioset)
190                 bioset_free(btrfs_bioset);
191 }
192
193 void extent_io_tree_init(struct extent_io_tree *tree,
194                          struct address_space *mapping)
195 {
196         tree->state = RB_ROOT;
197         INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
198         tree->ops = NULL;
199         tree->dirty_bytes = 0;
200         spin_lock_init(&tree->lock);
201         spin_lock_init(&tree->buffer_lock);
202         tree->mapping = mapping;
203 }
204
205 static struct extent_state *alloc_extent_state(gfp_t mask)
206 {
207         struct extent_state *state;
208
209         state = kmem_cache_alloc(extent_state_cache, mask);
210         if (!state)
211                 return state;
212         state->state = 0;
213         state->private = 0;
214         state->tree = NULL;
215         btrfs_leak_debug_add(&state->leak_list, &states);
216         atomic_set(&state->refs, 1);
217         init_waitqueue_head(&state->wq);
218         trace_alloc_extent_state(state, mask, _RET_IP_);
219         return state;
220 }
221
222 void free_extent_state(struct extent_state *state)
223 {
224         if (!state)
225                 return;
226         if (atomic_dec_and_test(&state->refs)) {
227                 WARN_ON(state->tree);
228                 btrfs_leak_debug_del(&state->leak_list);
229                 trace_free_extent_state(state, _RET_IP_);
230                 kmem_cache_free(extent_state_cache, state);
231         }
232 }
233
234 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
235                                    struct rb_node *node,
236                                    struct rb_node ***p_in,
237                                    struct rb_node **parent_in)
238 {
239         struct rb_node **p = &root->rb_node;
240         struct rb_node *parent = NULL;
241         struct tree_entry *entry;
242
243         if (p_in && parent_in) {
244                 p = *p_in;
245                 parent = *parent_in;
246                 goto do_insert;
247         }
248
249         while (*p) {
250                 parent = *p;
251                 entry = rb_entry(parent, struct tree_entry, rb_node);
252
253                 if (offset < entry->start)
254                         p = &(*p)->rb_left;
255                 else if (offset > entry->end)
256                         p = &(*p)->rb_right;
257                 else
258                         return parent;
259         }
260
261 do_insert:
262         rb_link_node(node, parent, p);
263         rb_insert_color(node, root);
264         return NULL;
265 }
266
267 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
268                                       struct rb_node **prev_ret,
269                                       struct rb_node **next_ret,
270                                       struct rb_node ***p_ret,
271                                       struct rb_node **parent_ret)
272 {
273         struct rb_root *root = &tree->state;
274         struct rb_node **n = &root->rb_node;
275         struct rb_node *prev = NULL;
276         struct rb_node *orig_prev = NULL;
277         struct tree_entry *entry;
278         struct tree_entry *prev_entry = NULL;
279
280         while (*n) {
281                 prev = *n;
282                 entry = rb_entry(prev, struct tree_entry, rb_node);
283                 prev_entry = entry;
284
285                 if (offset < entry->start)
286                         n = &(*n)->rb_left;
287                 else if (offset > entry->end)
288                         n = &(*n)->rb_right;
289                 else
290                         return *n;
291         }
292
293         if (p_ret)
294                 *p_ret = n;
295         if (parent_ret)
296                 *parent_ret = prev;
297
298         if (prev_ret) {
299                 orig_prev = prev;
300                 while (prev && offset > prev_entry->end) {
301                         prev = rb_next(prev);
302                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
303                 }
304                 *prev_ret = prev;
305                 prev = orig_prev;
306         }
307
308         if (next_ret) {
309                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
310                 while (prev && offset < prev_entry->start) {
311                         prev = rb_prev(prev);
312                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
313                 }
314                 *next_ret = prev;
315         }
316         return NULL;
317 }
318
319 static inline struct rb_node *
320 tree_search_for_insert(struct extent_io_tree *tree,
321                        u64 offset,
322                        struct rb_node ***p_ret,
323                        struct rb_node **parent_ret)
324 {
325         struct rb_node *prev = NULL;
326         struct rb_node *ret;
327
328         ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
329         if (!ret)
330                 return prev;
331         return ret;
332 }
333
334 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
335                                           u64 offset)
336 {
337         return tree_search_for_insert(tree, offset, NULL, NULL);
338 }
339
340 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
341                      struct extent_state *other)
342 {
343         if (tree->ops && tree->ops->merge_extent_hook)
344                 tree->ops->merge_extent_hook(tree->mapping->host, new,
345                                              other);
346 }
347
348 /*
349  * utility function to look for merge candidates inside a given range.
350  * Any extents with matching state are merged together into a single
351  * extent in the tree.  Extents with EXTENT_IO in their state field
352  * are not merged because the end_io handlers need to be able to do
353  * operations on them without sleeping (or doing allocations/splits).
354  *
355  * This should be called with the tree lock held.
356  */
357 static void merge_state(struct extent_io_tree *tree,
358                         struct extent_state *state)
359 {
360         struct extent_state *other;
361         struct rb_node *other_node;
362
363         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
364                 return;
365
366         other_node = rb_prev(&state->rb_node);
367         if (other_node) {
368                 other = rb_entry(other_node, struct extent_state, rb_node);
369                 if (other->end == state->start - 1 &&
370                     other->state == state->state) {
371                         merge_cb(tree, state, other);
372                         state->start = other->start;
373                         other->tree = NULL;
374                         rb_erase(&other->rb_node, &tree->state);
375                         free_extent_state(other);
376                 }
377         }
378         other_node = rb_next(&state->rb_node);
379         if (other_node) {
380                 other = rb_entry(other_node, struct extent_state, rb_node);
381                 if (other->start == state->end + 1 &&
382                     other->state == state->state) {
383                         merge_cb(tree, state, other);
384                         state->end = other->end;
385                         other->tree = NULL;
386                         rb_erase(&other->rb_node, &tree->state);
387                         free_extent_state(other);
388                 }
389         }
390 }
391
392 static void set_state_cb(struct extent_io_tree *tree,
393                          struct extent_state *state, unsigned long *bits)
394 {
395         if (tree->ops && tree->ops->set_bit_hook)
396                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
397 }
398
399 static void clear_state_cb(struct extent_io_tree *tree,
400                            struct extent_state *state, unsigned long *bits)
401 {
402         if (tree->ops && tree->ops->clear_bit_hook)
403                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
404 }
405
406 static void set_state_bits(struct extent_io_tree *tree,
407                            struct extent_state *state, unsigned long *bits);
408
409 /*
410  * insert an extent_state struct into the tree.  'bits' are set on the
411  * struct before it is inserted.
412  *
413  * This may return -EEXIST if the extent is already there, in which case the
414  * state struct is freed.
415  *
416  * The tree lock is not taken internally.  This is a utility function and
417  * probably isn't what you want to call (see set/clear_extent_bit).
418  */
419 static int insert_state(struct extent_io_tree *tree,
420                         struct extent_state *state, u64 start, u64 end,
421                         struct rb_node ***p,
422                         struct rb_node **parent,
423                         unsigned long *bits)
424 {
425         struct rb_node *node;
426
427         if (end < start)
428                 WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
429                        end, start);
430         state->start = start;
431         state->end = end;
432
433         set_state_bits(tree, state, bits);
434
435         node = tree_insert(&tree->state, end, &state->rb_node, p, parent);
436         if (node) {
437                 struct extent_state *found;
438                 found = rb_entry(node, struct extent_state, rb_node);
439                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
440                        "%llu %llu\n",
441                        found->start, found->end, start, end);
442                 return -EEXIST;
443         }
444         state->tree = tree;
445         merge_state(tree, state);
446         return 0;
447 }
448
449 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
450                      u64 split)
451 {
452         if (tree->ops && tree->ops->split_extent_hook)
453                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
454 }
455
456 /*
457  * split a given extent state struct in two, inserting the preallocated
458  * struct 'prealloc' as the newly created second half.  'split' indicates an
459  * offset inside 'orig' where it should be split.
460  *
461  * Before calling,
462  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
463  * are two extent state structs in the tree:
464  * prealloc: [orig->start, split - 1]
465  * orig: [ split, orig->end ]
466  *
467  * The tree locks are not taken by this function. They need to be held
468  * by the caller.
469  */
470 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
471                        struct extent_state *prealloc, u64 split)
472 {
473         struct rb_node *node;
474
475         split_cb(tree, orig, split);
476
477         prealloc->start = orig->start;
478         prealloc->end = split - 1;
479         prealloc->state = orig->state;
480         orig->start = split;
481
482         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node,
483                            NULL, NULL);
484         if (node) {
485                 free_extent_state(prealloc);
486                 return -EEXIST;
487         }
488         prealloc->tree = tree;
489         return 0;
490 }
491
492 static struct extent_state *next_state(struct extent_state *state)
493 {
494         struct rb_node *next = rb_next(&state->rb_node);
495         if (next)
496                 return rb_entry(next, struct extent_state, rb_node);
497         else
498                 return NULL;
499 }
500
501 /*
502  * utility function to clear some bits in an extent state struct.
503  * it will optionally wake up any one waiting on this state (wake == 1).
504  *
505  * If no bits are set on the state struct after clearing things, the
506  * struct is freed and removed from the tree
507  */
508 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
509                                             struct extent_state *state,
510                                             unsigned long *bits, int wake)
511 {
512         struct extent_state *next;
513         unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
514
515         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
516                 u64 range = state->end - state->start + 1;
517                 WARN_ON(range > tree->dirty_bytes);
518                 tree->dirty_bytes -= range;
519         }
520         clear_state_cb(tree, state, bits);
521         state->state &= ~bits_to_clear;
522         if (wake)
523                 wake_up(&state->wq);
524         if (state->state == 0) {
525                 next = next_state(state);
526                 if (state->tree) {
527                         rb_erase(&state->rb_node, &tree->state);
528                         state->tree = NULL;
529                         free_extent_state(state);
530                 } else {
531                         WARN_ON(1);
532                 }
533         } else {
534                 merge_state(tree, state);
535                 next = next_state(state);
536         }
537         return next;
538 }
539
540 static struct extent_state *
541 alloc_extent_state_atomic(struct extent_state *prealloc)
542 {
543         if (!prealloc)
544                 prealloc = alloc_extent_state(GFP_ATOMIC);
545
546         return prealloc;
547 }
548
549 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
550 {
551         btrfs_panic(tree_fs_info(tree), err, "Locking error: "
552                     "Extent tree was modified by another "
553                     "thread while locked.");
554 }
555
556 /*
557  * clear some bits on a range in the tree.  This may require splitting
558  * or inserting elements in the tree, so the gfp mask is used to
559  * indicate which allocations or sleeping are allowed.
560  *
561  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
562  * the given range from the tree regardless of state (ie for truncate).
563  *
564  * the range [start, end] is inclusive.
565  *
566  * This takes the tree lock, and returns 0 on success and < 0 on error.
567  */
568 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
569                      unsigned long bits, int wake, int delete,
570                      struct extent_state **cached_state,
571                      gfp_t mask)
572 {
573         struct extent_state *state;
574         struct extent_state *cached;
575         struct extent_state *prealloc = NULL;
576         struct rb_node *node;
577         u64 last_end;
578         int err;
579         int clear = 0;
580
581         btrfs_debug_check_extent_io_range(tree, start, end);
582
583         if (bits & EXTENT_DELALLOC)
584                 bits |= EXTENT_NORESERVE;
585
586         if (delete)
587                 bits |= ~EXTENT_CTLBITS;
588         bits |= EXTENT_FIRST_DELALLOC;
589
590         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
591                 clear = 1;
592 again:
593         if (!prealloc && (mask & __GFP_WAIT)) {
594                 prealloc = alloc_extent_state(mask);
595                 if (!prealloc)
596                         return -ENOMEM;
597         }
598
599         spin_lock(&tree->lock);
600         if (cached_state) {
601                 cached = *cached_state;
602
603                 if (clear) {
604                         *cached_state = NULL;
605                         cached_state = NULL;
606                 }
607
608                 if (cached && cached->tree && cached->start <= start &&
609                     cached->end > start) {
610                         if (clear)
611                                 atomic_dec(&cached->refs);
612                         state = cached;
613                         goto hit_next;
614                 }
615                 if (clear)
616                         free_extent_state(cached);
617         }
618         /*
619          * this search will find the extents that end after
620          * our range starts
621          */
622         node = tree_search(tree, start);
623         if (!node)
624                 goto out;
625         state = rb_entry(node, struct extent_state, rb_node);
626 hit_next:
627         if (state->start > end)
628                 goto out;
629         WARN_ON(state->end < start);
630         last_end = state->end;
631
632         /* the state doesn't have the wanted bits, go ahead */
633         if (!(state->state & bits)) {
634                 state = next_state(state);
635                 goto next;
636         }
637
638         /*
639          *     | ---- desired range ---- |
640          *  | state | or
641          *  | ------------- state -------------- |
642          *
643          * We need to split the extent we found, and may flip
644          * bits on second half.
645          *
646          * If the extent we found extends past our range, we
647          * just split and search again.  It'll get split again
648          * the next time though.
649          *
650          * If the extent we found is inside our range, we clear
651          * the desired bit on it.
652          */
653
654         if (state->start < start) {
655                 prealloc = alloc_extent_state_atomic(prealloc);
656                 BUG_ON(!prealloc);
657                 err = split_state(tree, state, prealloc, start);
658                 if (err)
659                         extent_io_tree_panic(tree, err);
660
661                 prealloc = NULL;
662                 if (err)
663                         goto out;
664                 if (state->end <= end) {
665                         state = clear_state_bit(tree, state, &bits, wake);
666                         goto next;
667                 }
668                 goto search_again;
669         }
670         /*
671          * | ---- desired range ---- |
672          *                        | state |
673          * We need to split the extent, and clear the bit
674          * on the first half
675          */
676         if (state->start <= end && state->end > end) {
677                 prealloc = alloc_extent_state_atomic(prealloc);
678                 BUG_ON(!prealloc);
679                 err = split_state(tree, state, prealloc, end + 1);
680                 if (err)
681                         extent_io_tree_panic(tree, err);
682
683                 if (wake)
684                         wake_up(&state->wq);
685
686                 clear_state_bit(tree, prealloc, &bits, wake);
687
688                 prealloc = NULL;
689                 goto out;
690         }
691
692         state = clear_state_bit(tree, state, &bits, wake);
693 next:
694         if (last_end == (u64)-1)
695                 goto out;
696         start = last_end + 1;
697         if (start <= end && state && !need_resched())
698                 goto hit_next;
699         goto search_again;
700
701 out:
702         spin_unlock(&tree->lock);
703         if (prealloc)
704                 free_extent_state(prealloc);
705
706         return 0;
707
708 search_again:
709         if (start > end)
710                 goto out;
711         spin_unlock(&tree->lock);
712         if (mask & __GFP_WAIT)
713                 cond_resched();
714         goto again;
715 }
716
717 static void wait_on_state(struct extent_io_tree *tree,
718                           struct extent_state *state)
719                 __releases(tree->lock)
720                 __acquires(tree->lock)
721 {
722         DEFINE_WAIT(wait);
723         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
724         spin_unlock(&tree->lock);
725         schedule();
726         spin_lock(&tree->lock);
727         finish_wait(&state->wq, &wait);
728 }
729
730 /*
731  * waits for one or more bits to clear on a range in the state tree.
732  * The range [start, end] is inclusive.
733  * The tree lock is taken by this function
734  */
735 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
736                             unsigned long bits)
737 {
738         struct extent_state *state;
739         struct rb_node *node;
740
741         btrfs_debug_check_extent_io_range(tree, start, end);
742
743         spin_lock(&tree->lock);
744 again:
745         while (1) {
746                 /*
747                  * this search will find all the extents that end after
748                  * our range starts
749                  */
750                 node = tree_search(tree, start);
751                 if (!node)
752                         break;
753
754                 state = rb_entry(node, struct extent_state, rb_node);
755
756                 if (state->start > end)
757                         goto out;
758
759                 if (state->state & bits) {
760                         start = state->start;
761                         atomic_inc(&state->refs);
762                         wait_on_state(tree, state);
763                         free_extent_state(state);
764                         goto again;
765                 }
766                 start = state->end + 1;
767
768                 if (start > end)
769                         break;
770
771                 cond_resched_lock(&tree->lock);
772         }
773 out:
774         spin_unlock(&tree->lock);
775 }
776
777 static void set_state_bits(struct extent_io_tree *tree,
778                            struct extent_state *state,
779                            unsigned long *bits)
780 {
781         unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
782
783         set_state_cb(tree, state, bits);
784         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
785                 u64 range = state->end - state->start + 1;
786                 tree->dirty_bytes += range;
787         }
788         state->state |= bits_to_set;
789 }
790
791 static void cache_state(struct extent_state *state,
792                         struct extent_state **cached_ptr)
793 {
794         if (cached_ptr && !(*cached_ptr)) {
795                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
796                         *cached_ptr = state;
797                         atomic_inc(&state->refs);
798                 }
799         }
800 }
801
802 /*
803  * set some bits on a range in the tree.  This may require allocations or
804  * sleeping, so the gfp mask is used to indicate what is allowed.
805  *
806  * If any of the exclusive bits are set, this will fail with -EEXIST if some
807  * part of the range already has the desired bits set.  The start of the
808  * existing range is returned in failed_start in this case.
809  *
810  * [start, end] is inclusive This takes the tree lock.
811  */
812
813 static int __must_check
814 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
815                  unsigned long bits, unsigned long exclusive_bits,
816                  u64 *failed_start, struct extent_state **cached_state,
817                  gfp_t mask)
818 {
819         struct extent_state *state;
820         struct extent_state *prealloc = NULL;
821         struct rb_node *node;
822         struct rb_node **p;
823         struct rb_node *parent;
824         int err = 0;
825         u64 last_start;
826         u64 last_end;
827
828         btrfs_debug_check_extent_io_range(tree, start, end);
829
830         bits |= EXTENT_FIRST_DELALLOC;
831 again:
832         if (!prealloc && (mask & __GFP_WAIT)) {
833                 prealloc = alloc_extent_state(mask);
834                 BUG_ON(!prealloc);
835         }
836
837         spin_lock(&tree->lock);
838         if (cached_state && *cached_state) {
839                 state = *cached_state;
840                 if (state->start <= start && state->end > start &&
841                     state->tree) {
842                         node = &state->rb_node;
843                         goto hit_next;
844                 }
845         }
846         /*
847          * this search will find all the extents that end after
848          * our range starts.
849          */
850         node = tree_search_for_insert(tree, start, &p, &parent);
851         if (!node) {
852                 prealloc = alloc_extent_state_atomic(prealloc);
853                 BUG_ON(!prealloc);
854                 err = insert_state(tree, prealloc, start, end,
855                                    &p, &parent, &bits);
856                 if (err)
857                         extent_io_tree_panic(tree, err);
858
859                 cache_state(prealloc, cached_state);
860                 prealloc = NULL;
861                 goto out;
862         }
863         state = rb_entry(node, struct extent_state, rb_node);
864 hit_next:
865         last_start = state->start;
866         last_end = state->end;
867
868         /*
869          * | ---- desired range ---- |
870          * | state |
871          *
872          * Just lock what we found and keep going
873          */
874         if (state->start == start && state->end <= end) {
875                 if (state->state & exclusive_bits) {
876                         *failed_start = state->start;
877                         err = -EEXIST;
878                         goto out;
879                 }
880
881                 set_state_bits(tree, state, &bits);
882                 cache_state(state, cached_state);
883                 merge_state(tree, state);
884                 if (last_end == (u64)-1)
885                         goto out;
886                 start = last_end + 1;
887                 state = next_state(state);
888                 if (start < end && state && state->start == start &&
889                     !need_resched())
890                         goto hit_next;
891                 goto search_again;
892         }
893
894         /*
895          *     | ---- desired range ---- |
896          * | state |
897          *   or
898          * | ------------- state -------------- |
899          *
900          * We need to split the extent we found, and may flip bits on
901          * second half.
902          *
903          * If the extent we found extends past our
904          * range, we just split and search again.  It'll get split
905          * again the next time though.
906          *
907          * If the extent we found is inside our range, we set the
908          * desired bit on it.
909          */
910         if (state->start < start) {
911                 if (state->state & exclusive_bits) {
912                         *failed_start = start;
913                         err = -EEXIST;
914                         goto out;
915                 }
916
917                 prealloc = alloc_extent_state_atomic(prealloc);
918                 BUG_ON(!prealloc);
919                 err = split_state(tree, state, prealloc, start);
920                 if (err)
921                         extent_io_tree_panic(tree, err);
922
923                 prealloc = NULL;
924                 if (err)
925                         goto out;
926                 if (state->end <= end) {
927                         set_state_bits(tree, state, &bits);
928                         cache_state(state, cached_state);
929                         merge_state(tree, state);
930                         if (last_end == (u64)-1)
931                                 goto out;
932                         start = last_end + 1;
933                         state = next_state(state);
934                         if (start < end && state && state->start == start &&
935                             !need_resched())
936                                 goto hit_next;
937                 }
938                 goto search_again;
939         }
940         /*
941          * | ---- desired range ---- |
942          *     | state | or               | state |
943          *
944          * There's a hole, we need to insert something in it and
945          * ignore the extent we found.
946          */
947         if (state->start > start) {
948                 u64 this_end;
949                 if (end < last_start)
950                         this_end = end;
951                 else
952                         this_end = last_start - 1;
953
954                 prealloc = alloc_extent_state_atomic(prealloc);
955                 BUG_ON(!prealloc);
956
957                 /*
958                  * Avoid to free 'prealloc' if it can be merged with
959                  * the later extent.
960                  */
961                 err = insert_state(tree, prealloc, start, this_end,
962                                    NULL, NULL, &bits);
963                 if (err)
964                         extent_io_tree_panic(tree, err);
965
966                 cache_state(prealloc, cached_state);
967                 prealloc = NULL;
968                 start = this_end + 1;
969                 goto search_again;
970         }
971         /*
972          * | ---- desired range ---- |
973          *                        | state |
974          * We need to split the extent, and set the bit
975          * on the first half
976          */
977         if (state->start <= end && state->end > end) {
978                 if (state->state & exclusive_bits) {
979                         *failed_start = start;
980                         err = -EEXIST;
981                         goto out;
982                 }
983
984                 prealloc = alloc_extent_state_atomic(prealloc);
985                 BUG_ON(!prealloc);
986                 err = split_state(tree, state, prealloc, end + 1);
987                 if (err)
988                         extent_io_tree_panic(tree, err);
989
990                 set_state_bits(tree, prealloc, &bits);
991                 cache_state(prealloc, cached_state);
992                 merge_state(tree, prealloc);
993                 prealloc = NULL;
994                 goto out;
995         }
996
997         goto search_again;
998
999 out:
1000         spin_unlock(&tree->lock);
1001         if (prealloc)
1002                 free_extent_state(prealloc);
1003
1004         return err;
1005
1006 search_again:
1007         if (start > end)
1008                 goto out;
1009         spin_unlock(&tree->lock);
1010         if (mask & __GFP_WAIT)
1011                 cond_resched();
1012         goto again;
1013 }
1014
1015 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1016                    unsigned long bits, u64 * failed_start,
1017                    struct extent_state **cached_state, gfp_t mask)
1018 {
1019         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1020                                 cached_state, mask);
1021 }
1022
1023
1024 /**
1025  * convert_extent_bit - convert all bits in a given range from one bit to
1026  *                      another
1027  * @tree:       the io tree to search
1028  * @start:      the start offset in bytes
1029  * @end:        the end offset in bytes (inclusive)
1030  * @bits:       the bits to set in this range
1031  * @clear_bits: the bits to clear in this range
1032  * @cached_state:       state that we're going to cache
1033  * @mask:       the allocation mask
1034  *
1035  * This will go through and set bits for the given range.  If any states exist
1036  * already in this range they are set with the given bit and cleared of the
1037  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1038  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1039  * boundary bits like LOCK.
1040  */
1041 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1042                        unsigned long bits, unsigned long clear_bits,
1043                        struct extent_state **cached_state, gfp_t mask)
1044 {
1045         struct extent_state *state;
1046         struct extent_state *prealloc = NULL;
1047         struct rb_node *node;
1048         struct rb_node **p;
1049         struct rb_node *parent;
1050         int err = 0;
1051         u64 last_start;
1052         u64 last_end;
1053
1054         btrfs_debug_check_extent_io_range(tree, start, end);
1055
1056 again:
1057         if (!prealloc && (mask & __GFP_WAIT)) {
1058                 prealloc = alloc_extent_state(mask);
1059                 if (!prealloc)
1060                         return -ENOMEM;
1061         }
1062
1063         spin_lock(&tree->lock);
1064         if (cached_state && *cached_state) {
1065                 state = *cached_state;
1066                 if (state->start <= start && state->end > start &&
1067                     state->tree) {
1068                         node = &state->rb_node;
1069                         goto hit_next;
1070                 }
1071         }
1072
1073         /*
1074          * this search will find all the extents that end after
1075          * our range starts.
1076          */
1077         node = tree_search_for_insert(tree, start, &p, &parent);
1078         if (!node) {
1079                 prealloc = alloc_extent_state_atomic(prealloc);
1080                 if (!prealloc) {
1081                         err = -ENOMEM;
1082                         goto out;
1083                 }
1084                 err = insert_state(tree, prealloc, start, end,
1085                                    &p, &parent, &bits);
1086                 if (err)
1087                         extent_io_tree_panic(tree, err);
1088                 cache_state(prealloc, cached_state);
1089                 prealloc = NULL;
1090                 goto out;
1091         }
1092         state = rb_entry(node, struct extent_state, rb_node);
1093 hit_next:
1094         last_start = state->start;
1095         last_end = state->end;
1096
1097         /*
1098          * | ---- desired range ---- |
1099          * | state |
1100          *
1101          * Just lock what we found and keep going
1102          */
1103         if (state->start == start && state->end <= end) {
1104                 set_state_bits(tree, state, &bits);
1105                 cache_state(state, cached_state);
1106                 state = clear_state_bit(tree, state, &clear_bits, 0);
1107                 if (last_end == (u64)-1)
1108                         goto out;
1109                 start = last_end + 1;
1110                 if (start < end && state && state->start == start &&
1111                     !need_resched())
1112                         goto hit_next;
1113                 goto search_again;
1114         }
1115
1116         /*
1117          *     | ---- desired range ---- |
1118          * | state |
1119          *   or
1120          * | ------------- state -------------- |
1121          *
1122          * We need to split the extent we found, and may flip bits on
1123          * second half.
1124          *
1125          * If the extent we found extends past our
1126          * range, we just split and search again.  It'll get split
1127          * again the next time though.
1128          *
1129          * If the extent we found is inside our range, we set the
1130          * desired bit on it.
1131          */
1132         if (state->start < start) {
1133                 prealloc = alloc_extent_state_atomic(prealloc);
1134                 if (!prealloc) {
1135                         err = -ENOMEM;
1136                         goto out;
1137                 }
1138                 err = split_state(tree, state, prealloc, start);
1139                 if (err)
1140                         extent_io_tree_panic(tree, err);
1141                 prealloc = NULL;
1142                 if (err)
1143                         goto out;
1144                 if (state->end <= end) {
1145                         set_state_bits(tree, state, &bits);
1146                         cache_state(state, cached_state);
1147                         state = clear_state_bit(tree, state, &clear_bits, 0);
1148                         if (last_end == (u64)-1)
1149                                 goto out;
1150                         start = last_end + 1;
1151                         if (start < end && state && state->start == start &&
1152                             !need_resched())
1153                                 goto hit_next;
1154                 }
1155                 goto search_again;
1156         }
1157         /*
1158          * | ---- desired range ---- |
1159          *     | state | or               | state |
1160          *
1161          * There's a hole, we need to insert something in it and
1162          * ignore the extent we found.
1163          */
1164         if (state->start > start) {
1165                 u64 this_end;
1166                 if (end < last_start)
1167                         this_end = end;
1168                 else
1169                         this_end = last_start - 1;
1170
1171                 prealloc = alloc_extent_state_atomic(prealloc);
1172                 if (!prealloc) {
1173                         err = -ENOMEM;
1174                         goto out;
1175                 }
1176
1177                 /*
1178                  * Avoid to free 'prealloc' if it can be merged with
1179                  * the later extent.
1180                  */
1181                 err = insert_state(tree, prealloc, start, this_end,
1182                                    NULL, NULL, &bits);
1183                 if (err)
1184                         extent_io_tree_panic(tree, err);
1185                 cache_state(prealloc, cached_state);
1186                 prealloc = NULL;
1187                 start = this_end + 1;
1188                 goto search_again;
1189         }
1190         /*
1191          * | ---- desired range ---- |
1192          *                        | state |
1193          * We need to split the extent, and set the bit
1194          * on the first half
1195          */
1196         if (state->start <= end && state->end > end) {
1197                 prealloc = alloc_extent_state_atomic(prealloc);
1198                 if (!prealloc) {
1199                         err = -ENOMEM;
1200                         goto out;
1201                 }
1202
1203                 err = split_state(tree, state, prealloc, end + 1);
1204                 if (err)
1205                         extent_io_tree_panic(tree, err);
1206
1207                 set_state_bits(tree, prealloc, &bits);
1208                 cache_state(prealloc, cached_state);
1209                 clear_state_bit(tree, prealloc, &clear_bits, 0);
1210                 prealloc = NULL;
1211                 goto out;
1212         }
1213
1214         goto search_again;
1215
1216 out:
1217         spin_unlock(&tree->lock);
1218         if (prealloc)
1219                 free_extent_state(prealloc);
1220
1221         return err;
1222
1223 search_again:
1224         if (start > end)
1225                 goto out;
1226         spin_unlock(&tree->lock);
1227         if (mask & __GFP_WAIT)
1228                 cond_resched();
1229         goto again;
1230 }
1231
1232 /* wrappers around set/clear extent bit */
1233 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1234                      gfp_t mask)
1235 {
1236         return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1237                               NULL, mask);
1238 }
1239
1240 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1241                     unsigned long bits, gfp_t mask)
1242 {
1243         return set_extent_bit(tree, start, end, bits, NULL,
1244                               NULL, mask);
1245 }
1246
1247 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1248                       unsigned long bits, gfp_t mask)
1249 {
1250         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1251 }
1252
1253 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1254                         struct extent_state **cached_state, gfp_t mask)
1255 {
1256         return set_extent_bit(tree, start, end,
1257                               EXTENT_DELALLOC | EXTENT_UPTODATE,
1258                               NULL, cached_state, mask);
1259 }
1260
1261 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1262                       struct extent_state **cached_state, gfp_t mask)
1263 {
1264         return set_extent_bit(tree, start, end,
1265                               EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1266                               NULL, cached_state, mask);
1267 }
1268
1269 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1270                        gfp_t mask)
1271 {
1272         return clear_extent_bit(tree, start, end,
1273                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1274                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1275 }
1276
1277 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1278                      gfp_t mask)
1279 {
1280         return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1281                               NULL, mask);
1282 }
1283
1284 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1285                         struct extent_state **cached_state, gfp_t mask)
1286 {
1287         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
1288                               cached_state, mask);
1289 }
1290
1291 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1292                           struct extent_state **cached_state, gfp_t mask)
1293 {
1294         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1295                                 cached_state, mask);
1296 }
1297
1298 /*
1299  * either insert or lock state struct between start and end use mask to tell
1300  * us if waiting is desired.
1301  */
1302 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1303                      unsigned long bits, struct extent_state **cached_state)
1304 {
1305         int err;
1306         u64 failed_start;
1307         while (1) {
1308                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1309                                        EXTENT_LOCKED, &failed_start,
1310                                        cached_state, GFP_NOFS);
1311                 if (err == -EEXIST) {
1312                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1313                         start = failed_start;
1314                 } else
1315                         break;
1316                 WARN_ON(start > end);
1317         }
1318         return err;
1319 }
1320
1321 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1322 {
1323         return lock_extent_bits(tree, start, end, 0, NULL);
1324 }
1325
1326 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1327 {
1328         int err;
1329         u64 failed_start;
1330
1331         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1332                                &failed_start, NULL, GFP_NOFS);
1333         if (err == -EEXIST) {
1334                 if (failed_start > start)
1335                         clear_extent_bit(tree, start, failed_start - 1,
1336                                          EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1337                 return 0;
1338         }
1339         return 1;
1340 }
1341
1342 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1343                          struct extent_state **cached, gfp_t mask)
1344 {
1345         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1346                                 mask);
1347 }
1348
1349 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1350 {
1351         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1352                                 GFP_NOFS);
1353 }
1354
1355 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1356 {
1357         unsigned long index = start >> PAGE_CACHE_SHIFT;
1358         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1359         struct page *page;
1360
1361         while (index <= end_index) {
1362                 page = find_get_page(inode->i_mapping, index);
1363                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1364                 clear_page_dirty_for_io(page);
1365                 page_cache_release(page);
1366                 index++;
1367         }
1368         return 0;
1369 }
1370
1371 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1372 {
1373         unsigned long index = start >> PAGE_CACHE_SHIFT;
1374         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1375         struct page *page;
1376
1377         while (index <= end_index) {
1378                 page = find_get_page(inode->i_mapping, index);
1379                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1380                 account_page_redirty(page);
1381                 __set_page_dirty_nobuffers(page);
1382                 page_cache_release(page);
1383                 index++;
1384         }
1385         return 0;
1386 }
1387
1388 /*
1389  * helper function to set both pages and extents in the tree writeback
1390  */
1391 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1392 {
1393         unsigned long index = start >> PAGE_CACHE_SHIFT;
1394         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1395         struct page *page;
1396
1397         while (index <= end_index) {
1398                 page = find_get_page(tree->mapping, index);
1399                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1400                 set_page_writeback(page);
1401                 page_cache_release(page);
1402                 index++;
1403         }
1404         return 0;
1405 }
1406
1407 /* find the first state struct with 'bits' set after 'start', and
1408  * return it.  tree->lock must be held.  NULL will returned if
1409  * nothing was found after 'start'
1410  */
1411 static struct extent_state *
1412 find_first_extent_bit_state(struct extent_io_tree *tree,
1413                             u64 start, unsigned long bits)
1414 {
1415         struct rb_node *node;
1416         struct extent_state *state;
1417
1418         /*
1419          * this search will find all the extents that end after
1420          * our range starts.
1421          */
1422         node = tree_search(tree, start);
1423         if (!node)
1424                 goto out;
1425
1426         while (1) {
1427                 state = rb_entry(node, struct extent_state, rb_node);
1428                 if (state->end >= start && (state->state & bits))
1429                         return state;
1430
1431                 node = rb_next(node);
1432                 if (!node)
1433                         break;
1434         }
1435 out:
1436         return NULL;
1437 }
1438
1439 /*
1440  * find the first offset in the io tree with 'bits' set. zero is
1441  * returned if we find something, and *start_ret and *end_ret are
1442  * set to reflect the state struct that was found.
1443  *
1444  * If nothing was found, 1 is returned. If found something, return 0.
1445  */
1446 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1447                           u64 *start_ret, u64 *end_ret, unsigned long bits,
1448                           struct extent_state **cached_state)
1449 {
1450         struct extent_state *state;
1451         struct rb_node *n;
1452         int ret = 1;
1453
1454         spin_lock(&tree->lock);
1455         if (cached_state && *cached_state) {
1456                 state = *cached_state;
1457                 if (state->end == start - 1 && state->tree) {
1458                         n = rb_next(&state->rb_node);
1459                         while (n) {
1460                                 state = rb_entry(n, struct extent_state,
1461                                                  rb_node);
1462                                 if (state->state & bits)
1463                                         goto got_it;
1464                                 n = rb_next(n);
1465                         }
1466                         free_extent_state(*cached_state);
1467                         *cached_state = NULL;
1468                         goto out;
1469                 }
1470                 free_extent_state(*cached_state);
1471                 *cached_state = NULL;
1472         }
1473
1474         state = find_first_extent_bit_state(tree, start, bits);
1475 got_it:
1476         if (state) {
1477                 cache_state(state, cached_state);
1478                 *start_ret = state->start;
1479                 *end_ret = state->end;
1480                 ret = 0;
1481         }
1482 out:
1483         spin_unlock(&tree->lock);
1484         return ret;
1485 }
1486
1487 /*
1488  * find a contiguous range of bytes in the file marked as delalloc, not
1489  * more than 'max_bytes'.  start and end are used to return the range,
1490  *
1491  * 1 is returned if we find something, 0 if nothing was in the tree
1492  */
1493 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1494                                         u64 *start, u64 *end, u64 max_bytes,
1495                                         struct extent_state **cached_state)
1496 {
1497         struct rb_node *node;
1498         struct extent_state *state;
1499         u64 cur_start = *start;
1500         u64 found = 0;
1501         u64 total_bytes = 0;
1502
1503         spin_lock(&tree->lock);
1504
1505         /*
1506          * this search will find all the extents that end after
1507          * our range starts.
1508          */
1509         node = tree_search(tree, cur_start);
1510         if (!node) {
1511                 if (!found)
1512                         *end = (u64)-1;
1513                 goto out;
1514         }
1515
1516         while (1) {
1517                 state = rb_entry(node, struct extent_state, rb_node);
1518                 if (found && (state->start != cur_start ||
1519                               (state->state & EXTENT_BOUNDARY))) {
1520                         goto out;
1521                 }
1522                 if (!(state->state & EXTENT_DELALLOC)) {
1523                         if (!found)
1524                                 *end = state->end;
1525                         goto out;
1526                 }
1527                 if (!found) {
1528                         *start = state->start;
1529                         *cached_state = state;
1530                         atomic_inc(&state->refs);
1531                 }
1532                 found++;
1533                 *end = state->end;
1534                 cur_start = state->end + 1;
1535                 node = rb_next(node);
1536                 total_bytes += state->end - state->start + 1;
1537                 if (total_bytes >= max_bytes)
1538                         break;
1539                 if (!node)
1540                         break;
1541         }
1542 out:
1543         spin_unlock(&tree->lock);
1544         return found;
1545 }
1546
1547 static noinline void __unlock_for_delalloc(struct inode *inode,
1548                                            struct page *locked_page,
1549                                            u64 start, u64 end)
1550 {
1551         int ret;
1552         struct page *pages[16];
1553         unsigned long index = start >> PAGE_CACHE_SHIFT;
1554         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1555         unsigned long nr_pages = end_index - index + 1;
1556         int i;
1557
1558         if (index == locked_page->index && end_index == index)
1559                 return;
1560
1561         while (nr_pages > 0) {
1562                 ret = find_get_pages_contig(inode->i_mapping, index,
1563                                      min_t(unsigned long, nr_pages,
1564                                      ARRAY_SIZE(pages)), pages);
1565                 for (i = 0; i < ret; i++) {
1566                         if (pages[i] != locked_page)
1567                                 unlock_page(pages[i]);
1568                         page_cache_release(pages[i]);
1569                 }
1570                 nr_pages -= ret;
1571                 index += ret;
1572                 cond_resched();
1573         }
1574 }
1575
1576 static noinline int lock_delalloc_pages(struct inode *inode,
1577                                         struct page *locked_page,
1578                                         u64 delalloc_start,
1579                                         u64 delalloc_end)
1580 {
1581         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1582         unsigned long start_index = index;
1583         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1584         unsigned long pages_locked = 0;
1585         struct page *pages[16];
1586         unsigned long nrpages;
1587         int ret;
1588         int i;
1589
1590         /* the caller is responsible for locking the start index */
1591         if (index == locked_page->index && index == end_index)
1592                 return 0;
1593
1594         /* skip the page at the start index */
1595         nrpages = end_index - index + 1;
1596         while (nrpages > 0) {
1597                 ret = find_get_pages_contig(inode->i_mapping, index,
1598                                      min_t(unsigned long,
1599                                      nrpages, ARRAY_SIZE(pages)), pages);
1600                 if (ret == 0) {
1601                         ret = -EAGAIN;
1602                         goto done;
1603                 }
1604                 /* now we have an array of pages, lock them all */
1605                 for (i = 0; i < ret; i++) {
1606                         /*
1607                          * the caller is taking responsibility for
1608                          * locked_page
1609                          */
1610                         if (pages[i] != locked_page) {
1611                                 lock_page(pages[i]);
1612                                 if (!PageDirty(pages[i]) ||
1613                                     pages[i]->mapping != inode->i_mapping) {
1614                                         ret = -EAGAIN;
1615                                         unlock_page(pages[i]);
1616                                         page_cache_release(pages[i]);
1617                                         goto done;
1618                                 }
1619                         }
1620                         page_cache_release(pages[i]);
1621                         pages_locked++;
1622                 }
1623                 nrpages -= ret;
1624                 index += ret;
1625                 cond_resched();
1626         }
1627         ret = 0;
1628 done:
1629         if (ret && pages_locked) {
1630                 __unlock_for_delalloc(inode, locked_page,
1631                               delalloc_start,
1632                               ((u64)(start_index + pages_locked - 1)) <<
1633                               PAGE_CACHE_SHIFT);
1634         }
1635         return ret;
1636 }
1637
1638 /*
1639  * find a contiguous range of bytes in the file marked as delalloc, not
1640  * more than 'max_bytes'.  start and end are used to return the range,
1641  *
1642  * 1 is returned if we find something, 0 if nothing was in the tree
1643  */
1644 STATIC u64 find_lock_delalloc_range(struct inode *inode,
1645                                     struct extent_io_tree *tree,
1646                                     struct page *locked_page, u64 *start,
1647                                     u64 *end, u64 max_bytes)
1648 {
1649         u64 delalloc_start;
1650         u64 delalloc_end;
1651         u64 found;
1652         struct extent_state *cached_state = NULL;
1653         int ret;
1654         int loops = 0;
1655
1656 again:
1657         /* step one, find a bunch of delalloc bytes starting at start */
1658         delalloc_start = *start;
1659         delalloc_end = 0;
1660         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1661                                     max_bytes, &cached_state);
1662         if (!found || delalloc_end <= *start) {
1663                 *start = delalloc_start;
1664                 *end = delalloc_end;
1665                 free_extent_state(cached_state);
1666                 return 0;
1667         }
1668
1669         /*
1670          * start comes from the offset of locked_page.  We have to lock
1671          * pages in order, so we can't process delalloc bytes before
1672          * locked_page
1673          */
1674         if (delalloc_start < *start)
1675                 delalloc_start = *start;
1676
1677         /*
1678          * make sure to limit the number of pages we try to lock down
1679          */
1680         if (delalloc_end + 1 - delalloc_start > max_bytes)
1681                 delalloc_end = delalloc_start + max_bytes - 1;
1682
1683         /* step two, lock all the pages after the page that has start */
1684         ret = lock_delalloc_pages(inode, locked_page,
1685                                   delalloc_start, delalloc_end);
1686         if (ret == -EAGAIN) {
1687                 /* some of the pages are gone, lets avoid looping by
1688                  * shortening the size of the delalloc range we're searching
1689                  */
1690                 free_extent_state(cached_state);
1691                 if (!loops) {
1692                         max_bytes = PAGE_CACHE_SIZE;
1693                         loops = 1;
1694                         goto again;
1695                 } else {
1696                         found = 0;
1697                         goto out_failed;
1698                 }
1699         }
1700         BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1701
1702         /* step three, lock the state bits for the whole range */
1703         lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1704
1705         /* then test to make sure it is all still delalloc */
1706         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1707                              EXTENT_DELALLOC, 1, cached_state);
1708         if (!ret) {
1709                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1710                                      &cached_state, GFP_NOFS);
1711                 __unlock_for_delalloc(inode, locked_page,
1712                               delalloc_start, delalloc_end);
1713                 cond_resched();
1714                 goto again;
1715         }
1716         free_extent_state(cached_state);
1717         *start = delalloc_start;
1718         *end = delalloc_end;
1719 out_failed:
1720         return found;
1721 }
1722
1723 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1724                                  struct page *locked_page,
1725                                  unsigned long clear_bits,
1726                                  unsigned long page_ops)
1727 {
1728         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1729         int ret;
1730         struct page *pages[16];
1731         unsigned long index = start >> PAGE_CACHE_SHIFT;
1732         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1733         unsigned long nr_pages = end_index - index + 1;
1734         int i;
1735
1736         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1737         if (page_ops == 0)
1738                 return 0;
1739
1740         while (nr_pages > 0) {
1741                 ret = find_get_pages_contig(inode->i_mapping, index,
1742                                      min_t(unsigned long,
1743                                      nr_pages, ARRAY_SIZE(pages)), pages);
1744                 for (i = 0; i < ret; i++) {
1745
1746                         if (page_ops & PAGE_SET_PRIVATE2)
1747                                 SetPagePrivate2(pages[i]);
1748
1749                         if (pages[i] == locked_page) {
1750                                 page_cache_release(pages[i]);
1751                                 continue;
1752                         }
1753                         if (page_ops & PAGE_CLEAR_DIRTY)
1754                                 clear_page_dirty_for_io(pages[i]);
1755                         if (page_ops & PAGE_SET_WRITEBACK)
1756                                 set_page_writeback(pages[i]);
1757                         if (page_ops & PAGE_END_WRITEBACK)
1758                                 end_page_writeback(pages[i]);
1759                         if (page_ops & PAGE_UNLOCK)
1760                                 unlock_page(pages[i]);
1761                         page_cache_release(pages[i]);
1762                 }
1763                 nr_pages -= ret;
1764                 index += ret;
1765                 cond_resched();
1766         }
1767         return 0;
1768 }
1769
1770 /*
1771  * count the number of bytes in the tree that have a given bit(s)
1772  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1773  * cached.  The total number found is returned.
1774  */
1775 u64 count_range_bits(struct extent_io_tree *tree,
1776                      u64 *start, u64 search_end, u64 max_bytes,
1777                      unsigned long bits, int contig)
1778 {
1779         struct rb_node *node;
1780         struct extent_state *state;
1781         u64 cur_start = *start;
1782         u64 total_bytes = 0;
1783         u64 last = 0;
1784         int found = 0;
1785
1786         if (WARN_ON(search_end <= cur_start))
1787                 return 0;
1788
1789         spin_lock(&tree->lock);
1790         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1791                 total_bytes = tree->dirty_bytes;
1792                 goto out;
1793         }
1794         /*
1795          * this search will find all the extents that end after
1796          * our range starts.
1797          */
1798         node = tree_search(tree, cur_start);
1799         if (!node)
1800                 goto out;
1801
1802         while (1) {
1803                 state = rb_entry(node, struct extent_state, rb_node);
1804                 if (state->start > search_end)
1805                         break;
1806                 if (contig && found && state->start > last + 1)
1807                         break;
1808                 if (state->end >= cur_start && (state->state & bits) == bits) {
1809                         total_bytes += min(search_end, state->end) + 1 -
1810                                        max(cur_start, state->start);
1811                         if (total_bytes >= max_bytes)
1812                                 break;
1813                         if (!found) {
1814                                 *start = max(cur_start, state->start);
1815                                 found = 1;
1816                         }
1817                         last = state->end;
1818                 } else if (contig && found) {
1819                         break;
1820                 }
1821                 node = rb_next(node);
1822                 if (!node)
1823                         break;
1824         }
1825 out:
1826         spin_unlock(&tree->lock);
1827         return total_bytes;
1828 }
1829
1830 /*
1831  * set the private field for a given byte offset in the tree.  If there isn't
1832  * an extent_state there already, this does nothing.
1833  */
1834 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1835 {
1836         struct rb_node *node;
1837         struct extent_state *state;
1838         int ret = 0;
1839
1840         spin_lock(&tree->lock);
1841         /*
1842          * this search will find all the extents that end after
1843          * our range starts.
1844          */
1845         node = tree_search(tree, start);
1846         if (!node) {
1847                 ret = -ENOENT;
1848                 goto out;
1849         }
1850         state = rb_entry(node, struct extent_state, rb_node);
1851         if (state->start != start) {
1852                 ret = -ENOENT;
1853                 goto out;
1854         }
1855         state->private = private;
1856 out:
1857         spin_unlock(&tree->lock);
1858         return ret;
1859 }
1860
1861 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1862 {
1863         struct rb_node *node;
1864         struct extent_state *state;
1865         int ret = 0;
1866
1867         spin_lock(&tree->lock);
1868         /*
1869          * this search will find all the extents that end after
1870          * our range starts.
1871          */
1872         node = tree_search(tree, start);
1873         if (!node) {
1874                 ret = -ENOENT;
1875                 goto out;
1876         }
1877         state = rb_entry(node, struct extent_state, rb_node);
1878         if (state->start != start) {
1879                 ret = -ENOENT;
1880                 goto out;
1881         }
1882         *private = state->private;
1883 out:
1884         spin_unlock(&tree->lock);
1885         return ret;
1886 }
1887
1888 /*
1889  * searches a range in the state tree for a given mask.
1890  * If 'filled' == 1, this returns 1 only if every extent in the tree
1891  * has the bits set.  Otherwise, 1 is returned if any bit in the
1892  * range is found set.
1893  */
1894 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1895                    unsigned long bits, int filled, struct extent_state *cached)
1896 {
1897         struct extent_state *state = NULL;
1898         struct rb_node *node;
1899         int bitset = 0;
1900
1901         spin_lock(&tree->lock);
1902         if (cached && cached->tree && cached->start <= start &&
1903             cached->end > start)
1904                 node = &cached->rb_node;
1905         else
1906                 node = tree_search(tree, start);
1907         while (node && start <= end) {
1908                 state = rb_entry(node, struct extent_state, rb_node);
1909
1910                 if (filled && state->start > start) {
1911                         bitset = 0;
1912                         break;
1913                 }
1914
1915                 if (state->start > end)
1916                         break;
1917
1918                 if (state->state & bits) {
1919                         bitset = 1;
1920                         if (!filled)
1921                                 break;
1922                 } else if (filled) {
1923                         bitset = 0;
1924                         break;
1925                 }
1926
1927                 if (state->end == (u64)-1)
1928                         break;
1929
1930                 start = state->end + 1;
1931                 if (start > end)
1932                         break;
1933                 node = rb_next(node);
1934                 if (!node) {
1935                         if (filled)
1936                                 bitset = 0;
1937                         break;
1938                 }
1939         }
1940         spin_unlock(&tree->lock);
1941         return bitset;
1942 }
1943
1944 /*
1945  * helper function to set a given page up to date if all the
1946  * extents in the tree for that page are up to date
1947  */
1948 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1949 {
1950         u64 start = page_offset(page);
1951         u64 end = start + PAGE_CACHE_SIZE - 1;
1952         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1953                 SetPageUptodate(page);
1954 }
1955
1956 /*
1957  * When IO fails, either with EIO or csum verification fails, we
1958  * try other mirrors that might have a good copy of the data.  This
1959  * io_failure_record is used to record state as we go through all the
1960  * mirrors.  If another mirror has good data, the page is set up to date
1961  * and things continue.  If a good mirror can't be found, the original
1962  * bio end_io callback is called to indicate things have failed.
1963  */
1964 struct io_failure_record {
1965         struct page *page;
1966         u64 start;
1967         u64 len;
1968         u64 logical;
1969         unsigned long bio_flags;
1970         int this_mirror;
1971         int failed_mirror;
1972         int in_validation;
1973 };
1974
1975 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1976                                 int did_repair)
1977 {
1978         int ret;
1979         int err = 0;
1980         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1981
1982         set_state_private(failure_tree, rec->start, 0);
1983         ret = clear_extent_bits(failure_tree, rec->start,
1984                                 rec->start + rec->len - 1,
1985                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1986         if (ret)
1987                 err = ret;
1988
1989         ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1990                                 rec->start + rec->len - 1,
1991                                 EXTENT_DAMAGED, GFP_NOFS);
1992         if (ret && !err)
1993                 err = ret;
1994
1995         kfree(rec);
1996         return err;
1997 }
1998
1999 /*
2000  * this bypasses the standard btrfs submit functions deliberately, as
2001  * the standard behavior is to write all copies in a raid setup. here we only
2002  * want to write the one bad copy. so we do the mapping for ourselves and issue
2003  * submit_bio directly.
2004  * to avoid any synchronization issues, wait for the data after writing, which
2005  * actually prevents the read that triggered the error from finishing.
2006  * currently, there can be no more than two copies of every data bit. thus,
2007  * exactly one rewrite is required.
2008  */
2009 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2010                         u64 length, u64 logical, struct page *page,
2011                         int mirror_num)
2012 {
2013         struct bio *bio;
2014         struct btrfs_device *dev;
2015         u64 map_length = 0;
2016         u64 sector;
2017         struct btrfs_bio *bbio = NULL;
2018         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2019         int ret;
2020
2021         ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2022         BUG_ON(!mirror_num);
2023
2024         /* we can't repair anything in raid56 yet */
2025         if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2026                 return 0;
2027
2028         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2029         if (!bio)
2030                 return -EIO;
2031         bio->bi_size = 0;
2032         map_length = length;
2033
2034         ret = btrfs_map_block(fs_info, WRITE, logical,
2035                               &map_length, &bbio, mirror_num);
2036         if (ret) {
2037                 bio_put(bio);
2038                 return -EIO;
2039         }
2040         BUG_ON(mirror_num != bbio->mirror_num);
2041         sector = bbio->stripes[mirror_num-1].physical >> 9;
2042         bio->bi_sector = sector;
2043         dev = bbio->stripes[mirror_num-1].dev;
2044         kfree(bbio);
2045         if (!dev || !dev->bdev || !dev->writeable) {
2046                 bio_put(bio);
2047                 return -EIO;
2048         }
2049         bio->bi_bdev = dev->bdev;
2050         bio_add_page(bio, page, length, start - page_offset(page));
2051
2052         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2053                 /* try to remap that extent elsewhere? */
2054                 bio_put(bio);
2055                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2056                 return -EIO;
2057         }
2058
2059         printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
2060                       "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2061                       start, rcu_str_deref(dev->name), sector);
2062
2063         bio_put(bio);
2064         return 0;
2065 }
2066
2067 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2068                          int mirror_num)
2069 {
2070         u64 start = eb->start;
2071         unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2072         int ret = 0;
2073
2074         if (root->fs_info->sb->s_flags & MS_RDONLY)
2075                 return -EROFS;
2076
2077         for (i = 0; i < num_pages; i++) {
2078                 struct page *p = extent_buffer_page(eb, i);
2079                 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
2080                                         start, p, mirror_num);
2081                 if (ret)
2082                         break;
2083                 start += PAGE_CACHE_SIZE;
2084         }
2085
2086         return ret;
2087 }
2088
2089 /*
2090  * each time an IO finishes, we do a fast check in the IO failure tree
2091  * to see if we need to process or clean up an io_failure_record
2092  */
2093 static int clean_io_failure(u64 start, struct page *page)
2094 {
2095         u64 private;
2096         u64 private_failure;
2097         struct io_failure_record *failrec;
2098         struct inode *inode = page->mapping->host;
2099         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2100         struct extent_state *state;
2101         int num_copies;
2102         int did_repair = 0;
2103         int ret;
2104
2105         private = 0;
2106         ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2107                                 (u64)-1, 1, EXTENT_DIRTY, 0);
2108         if (!ret)
2109                 return 0;
2110
2111         ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2112                                 &private_failure);
2113         if (ret)
2114                 return 0;
2115
2116         failrec = (struct io_failure_record *)(unsigned long) private_failure;
2117         BUG_ON(!failrec->this_mirror);
2118
2119         if (failrec->in_validation) {
2120                 /* there was no real error, just free the record */
2121                 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2122                          failrec->start);
2123                 did_repair = 1;
2124                 goto out;
2125         }
2126         if (fs_info->sb->s_flags & MS_RDONLY)
2127                 goto out;
2128
2129         spin_lock(&BTRFS_I(inode)->io_tree.lock);
2130         state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2131                                             failrec->start,
2132                                             EXTENT_LOCKED);
2133         spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2134
2135         if (state && state->start <= failrec->start &&
2136             state->end >= failrec->start + failrec->len - 1) {
2137                 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2138                                               failrec->len);
2139                 if (num_copies > 1)  {
2140                         ret = repair_io_failure(fs_info, start, failrec->len,
2141                                                 failrec->logical, page,
2142                                                 failrec->failed_mirror);
2143                         did_repair = !ret;
2144                 }
2145                 ret = 0;
2146         }
2147
2148 out:
2149         if (!ret)
2150                 ret = free_io_failure(inode, failrec, did_repair);
2151
2152         return ret;
2153 }
2154
2155 /*
2156  * this is a generic handler for readpage errors (default
2157  * readpage_io_failed_hook). if other copies exist, read those and write back
2158  * good data to the failed position. does not investigate in remapping the
2159  * failed extent elsewhere, hoping the device will be smart enough to do this as
2160  * needed
2161  */
2162
2163 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2164                               struct page *page, u64 start, u64 end,
2165                               int failed_mirror)
2166 {
2167         struct io_failure_record *failrec = NULL;
2168         u64 private;
2169         struct extent_map *em;
2170         struct inode *inode = page->mapping->host;
2171         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2172         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2173         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2174         struct bio *bio;
2175         struct btrfs_io_bio *btrfs_failed_bio;
2176         struct btrfs_io_bio *btrfs_bio;
2177         int num_copies;
2178         int ret;
2179         int read_mode;
2180         u64 logical;
2181
2182         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2183
2184         ret = get_state_private(failure_tree, start, &private);
2185         if (ret) {
2186                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2187                 if (!failrec)
2188                         return -ENOMEM;
2189                 failrec->start = start;
2190                 failrec->len = end - start + 1;
2191                 failrec->this_mirror = 0;
2192                 failrec->bio_flags = 0;
2193                 failrec->in_validation = 0;
2194
2195                 read_lock(&em_tree->lock);
2196                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2197                 if (!em) {
2198                         read_unlock(&em_tree->lock);
2199                         kfree(failrec);
2200                         return -EIO;
2201                 }
2202
2203                 if (em->start > start || em->start + em->len <= start) {
2204                         free_extent_map(em);
2205                         em = NULL;
2206                 }
2207                 read_unlock(&em_tree->lock);
2208
2209                 if (!em) {
2210                         kfree(failrec);
2211                         return -EIO;
2212                 }
2213                 logical = start - em->start;
2214                 logical = em->block_start + logical;
2215                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2216                         logical = em->block_start;
2217                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2218                         extent_set_compress_type(&failrec->bio_flags,
2219                                                  em->compress_type);
2220                 }
2221                 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2222                          "len=%llu\n", logical, start, failrec->len);
2223                 failrec->logical = logical;
2224                 free_extent_map(em);
2225
2226                 /* set the bits in the private failure tree */
2227                 ret = set_extent_bits(failure_tree, start, end,
2228                                         EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2229                 if (ret >= 0)
2230                         ret = set_state_private(failure_tree, start,
2231                                                 (u64)(unsigned long)failrec);
2232                 /* set the bits in the inode's tree */
2233                 if (ret >= 0)
2234                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2235                                                 GFP_NOFS);
2236                 if (ret < 0) {
2237                         kfree(failrec);
2238                         return ret;
2239                 }
2240         } else {
2241                 failrec = (struct io_failure_record *)(unsigned long)private;
2242                 pr_debug("bio_readpage_error: (found) logical=%llu, "
2243                          "start=%llu, len=%llu, validation=%d\n",
2244                          failrec->logical, failrec->start, failrec->len,
2245                          failrec->in_validation);
2246                 /*
2247                  * when data can be on disk more than twice, add to failrec here
2248                  * (e.g. with a list for failed_mirror) to make
2249                  * clean_io_failure() clean all those errors at once.
2250                  */
2251         }
2252         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2253                                       failrec->logical, failrec->len);
2254         if (num_copies == 1) {
2255                 /*
2256                  * we only have a single copy of the data, so don't bother with
2257                  * all the retry and error correction code that follows. no
2258                  * matter what the error is, it is very likely to persist.
2259                  */
2260                 pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2261                          num_copies, failrec->this_mirror, failed_mirror);
2262                 free_io_failure(inode, failrec, 0);
2263                 return -EIO;
2264         }
2265
2266         /*
2267          * there are two premises:
2268          *      a) deliver good data to the caller
2269          *      b) correct the bad sectors on disk
2270          */
2271         if (failed_bio->bi_vcnt > 1) {
2272                 /*
2273                  * to fulfill b), we need to know the exact failing sectors, as
2274                  * we don't want to rewrite any more than the failed ones. thus,
2275                  * we need separate read requests for the failed bio
2276                  *
2277                  * if the following BUG_ON triggers, our validation request got
2278                  * merged. we need separate requests for our algorithm to work.
2279                  */
2280                 BUG_ON(failrec->in_validation);
2281                 failrec->in_validation = 1;
2282                 failrec->this_mirror = failed_mirror;
2283                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2284         } else {
2285                 /*
2286                  * we're ready to fulfill a) and b) alongside. get a good copy
2287                  * of the failed sector and if we succeed, we have setup
2288                  * everything for repair_io_failure to do the rest for us.
2289                  */
2290                 if (failrec->in_validation) {
2291                         BUG_ON(failrec->this_mirror != failed_mirror);
2292                         failrec->in_validation = 0;
2293                         failrec->this_mirror = 0;
2294                 }
2295                 failrec->failed_mirror = failed_mirror;
2296                 failrec->this_mirror++;
2297                 if (failrec->this_mirror == failed_mirror)
2298                         failrec->this_mirror++;
2299                 read_mode = READ_SYNC;
2300         }
2301
2302         if (failrec->this_mirror > num_copies) {
2303                 pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2304                          num_copies, failrec->this_mirror, failed_mirror);
2305                 free_io_failure(inode, failrec, 0);
2306                 return -EIO;
2307         }
2308
2309         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2310         if (!bio) {
2311                 free_io_failure(inode, failrec, 0);
2312                 return -EIO;
2313         }
2314         bio->bi_end_io = failed_bio->bi_end_io;
2315         bio->bi_sector = failrec->logical >> 9;
2316         bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2317         bio->bi_size = 0;
2318
2319         btrfs_failed_bio = btrfs_io_bio(failed_bio);
2320         if (btrfs_failed_bio->csum) {
2321                 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2322                 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2323
2324                 btrfs_bio = btrfs_io_bio(bio);
2325                 btrfs_bio->csum = btrfs_bio->csum_inline;
2326                 phy_offset >>= inode->i_sb->s_blocksize_bits;
2327                 phy_offset *= csum_size;
2328                 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
2329                        csum_size);
2330         }
2331
2332         bio_add_page(bio, page, failrec->len, start - page_offset(page));
2333
2334         pr_debug("bio_readpage_error: submitting new read[%#x] to "
2335                  "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2336                  failrec->this_mirror, num_copies, failrec->in_validation);
2337
2338         ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2339                                          failrec->this_mirror,
2340                                          failrec->bio_flags, 0);
2341         return ret;
2342 }
2343
2344 /* lots and lots of room for performance fixes in the end_bio funcs */
2345
2346 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2347 {
2348         int uptodate = (err == 0);
2349         struct extent_io_tree *tree;
2350         int ret;
2351
2352         tree = &BTRFS_I(page->mapping->host)->io_tree;
2353
2354         if (tree->ops && tree->ops->writepage_end_io_hook) {
2355                 ret = tree->ops->writepage_end_io_hook(page, start,
2356                                                end, NULL, uptodate);
2357                 if (ret)
2358                         uptodate = 0;
2359         }
2360
2361         if (!uptodate) {
2362                 ClearPageUptodate(page);
2363                 SetPageError(page);
2364         }
2365         return 0;
2366 }
2367
2368 /*
2369  * after a writepage IO is done, we need to:
2370  * clear the uptodate bits on error
2371  * clear the writeback bits in the extent tree for this IO
2372  * end_page_writeback if the page has no more pending IO
2373  *
2374  * Scheduling is not allowed, so the extent state tree is expected
2375  * to have one and only one object corresponding to this IO.
2376  */
2377 static void end_bio_extent_writepage(struct bio *bio, int err)
2378 {
2379         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2380         u64 start;
2381         u64 end;
2382
2383         do {
2384                 struct page *page = bvec->bv_page;
2385
2386                 /* We always issue full-page reads, but if some block
2387                  * in a page fails to read, blk_update_request() will
2388                  * advance bv_offset and adjust bv_len to compensate.
2389                  * Print a warning for nonzero offsets, and an error
2390                  * if they don't add up to a full page.  */
2391                 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2392                         printk("%s page write in btrfs with offset %u and length %u\n",
2393                                bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2394                                ? KERN_ERR "partial" : KERN_INFO "incomplete",
2395                                bvec->bv_offset, bvec->bv_len);
2396
2397                 start = page_offset(page);
2398                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2399
2400                 if (--bvec >= bio->bi_io_vec)
2401                         prefetchw(&bvec->bv_page->flags);
2402
2403                 if (end_extent_writepage(page, err, start, end))
2404                         continue;
2405
2406                 end_page_writeback(page);
2407         } while (bvec >= bio->bi_io_vec);
2408
2409         bio_put(bio);
2410 }
2411
2412 static void
2413 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2414                               int uptodate)
2415 {
2416         struct extent_state *cached = NULL;
2417         u64 end = start + len - 1;
2418
2419         if (uptodate && tree->track_uptodate)
2420                 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2421         unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2422 }
2423
2424 /*
2425  * after a readpage IO is done, we need to:
2426  * clear the uptodate bits on error
2427  * set the uptodate bits if things worked
2428  * set the page up to date if all extents in the tree are uptodate
2429  * clear the lock bit in the extent tree
2430  * unlock the page if there are no other extents locked for it
2431  *
2432  * Scheduling is not allowed, so the extent state tree is expected
2433  * to have one and only one object corresponding to this IO.
2434  */
2435 static void end_bio_extent_readpage(struct bio *bio, int err)
2436 {
2437         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2438         struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2439         struct bio_vec *bvec = bio->bi_io_vec;
2440         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2441         struct extent_io_tree *tree;
2442         u64 offset = 0;
2443         u64 start;
2444         u64 end;
2445         u64 len;
2446         u64 extent_start = 0;
2447         u64 extent_len = 0;
2448         int mirror;
2449         int ret;
2450
2451         if (err)
2452                 uptodate = 0;
2453
2454         do {
2455                 struct page *page = bvec->bv_page;
2456                 struct inode *inode = page->mapping->host;
2457
2458                 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2459                          "mirror=%lu\n", (u64)bio->bi_sector, err,
2460                          io_bio->mirror_num);
2461                 tree = &BTRFS_I(inode)->io_tree;
2462
2463                 /* We always issue full-page reads, but if some block
2464                  * in a page fails to read, blk_update_request() will
2465                  * advance bv_offset and adjust bv_len to compensate.
2466                  * Print a warning for nonzero offsets, and an error
2467                  * if they don't add up to a full page.  */
2468                 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2469                         printk("%s page read in btrfs with offset %u and length %u\n",
2470                                bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2471                                ? KERN_ERR "partial" : KERN_INFO "incomplete",
2472                                bvec->bv_offset, bvec->bv_len);
2473
2474                 start = page_offset(page);
2475                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2476                 len = bvec->bv_len;
2477
2478                 if (++bvec <= bvec_end)
2479                         prefetchw(&bvec->bv_page->flags);
2480
2481                 mirror = io_bio->mirror_num;
2482                 if (likely(uptodate && tree->ops &&
2483                            tree->ops->readpage_end_io_hook)) {
2484                         ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2485                                                               page, start, end,
2486                                                               mirror);
2487                         if (ret)
2488                                 uptodate = 0;
2489                         else
2490                                 clean_io_failure(start, page);
2491                 }
2492
2493                 if (likely(uptodate))
2494                         goto readpage_ok;
2495
2496                 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2497                         ret = tree->ops->readpage_io_failed_hook(page, mirror);
2498                         if (!ret && !err &&
2499                             test_bit(BIO_UPTODATE, &bio->bi_flags))
2500                                 uptodate = 1;
2501                 } else {
2502                         /*
2503                          * The generic bio_readpage_error handles errors the
2504                          * following way: If possible, new read requests are
2505                          * created and submitted and will end up in
2506                          * end_bio_extent_readpage as well (if we're lucky, not
2507                          * in the !uptodate case). In that case it returns 0 and
2508                          * we just go on with the next page in our bio. If it
2509                          * can't handle the error it will return -EIO and we
2510                          * remain responsible for that page.
2511                          */
2512                         ret = bio_readpage_error(bio, offset, page, start, end,
2513                                                  mirror);
2514                         if (ret == 0) {
2515                                 uptodate =
2516                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
2517                                 if (err)
2518                                         uptodate = 0;
2519                                 continue;
2520                         }
2521                 }
2522 readpage_ok:
2523                 if (likely(uptodate)) {
2524                         loff_t i_size = i_size_read(inode);
2525                         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2526                         unsigned offset;
2527
2528                         /* Zero out the end if this page straddles i_size */
2529                         offset = i_size & (PAGE_CACHE_SIZE-1);
2530                         if (page->index == end_index && offset)
2531                                 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2532                         SetPageUptodate(page);
2533                 } else {
2534                         ClearPageUptodate(page);
2535                         SetPageError(page);
2536                 }
2537                 unlock_page(page);
2538                 offset += len;
2539
2540                 if (unlikely(!uptodate)) {
2541                         if (extent_len) {
2542                                 endio_readpage_release_extent(tree,
2543                                                               extent_start,
2544                                                               extent_len, 1);
2545                                 extent_start = 0;
2546                                 extent_len = 0;
2547                         }
2548                         endio_readpage_release_extent(tree, start,
2549                                                       end - start + 1, 0);
2550                 } else if (!extent_len) {
2551                         extent_start = start;
2552                         extent_len = end + 1 - start;
2553                 } else if (extent_start + extent_len == start) {
2554                         extent_len += end + 1 - start;
2555                 } else {
2556                         endio_readpage_release_extent(tree, extent_start,
2557                                                       extent_len, uptodate);
2558                         extent_start = start;
2559                         extent_len = end + 1 - start;
2560                 }
2561         } while (bvec <= bvec_end);
2562
2563         if (extent_len)
2564                 endio_readpage_release_extent(tree, extent_start, extent_len,
2565                                               uptodate);
2566         if (io_bio->end_io)
2567                 io_bio->end_io(io_bio, err);
2568         bio_put(bio);
2569 }
2570
2571 /*
2572  * this allocates from the btrfs_bioset.  We're returning a bio right now
2573  * but you can call btrfs_io_bio for the appropriate container_of magic
2574  */
2575 struct bio *
2576 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2577                 gfp_t gfp_flags)
2578 {
2579         struct btrfs_io_bio *btrfs_bio;
2580         struct bio *bio;
2581
2582         bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2583
2584         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2585                 while (!bio && (nr_vecs /= 2)) {
2586                         bio = bio_alloc_bioset(gfp_flags,
2587                                                nr_vecs, btrfs_bioset);
2588                 }
2589         }
2590
2591         if (bio) {
2592                 bio->bi_size = 0;
2593                 bio->bi_bdev = bdev;
2594                 bio->bi_sector = first_sector;
2595                 btrfs_bio = btrfs_io_bio(bio);
2596                 btrfs_bio->csum = NULL;
2597                 btrfs_bio->csum_allocated = NULL;
2598                 btrfs_bio->end_io = NULL;
2599         }
2600         return bio;
2601 }
2602
2603 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2604 {
2605         return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2606 }
2607
2608
2609 /* this also allocates from the btrfs_bioset */
2610 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2611 {
2612         struct btrfs_io_bio *btrfs_bio;
2613         struct bio *bio;
2614
2615         bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2616         if (bio) {
2617                 btrfs_bio = btrfs_io_bio(bio);
2618                 btrfs_bio->csum = NULL;
2619                 btrfs_bio->csum_allocated = NULL;
2620                 btrfs_bio->end_io = NULL;
2621         }
2622         return bio;
2623 }
2624
2625
2626 static int __must_check submit_one_bio(int rw, struct bio *bio,
2627                                        int mirror_num, unsigned long bio_flags)
2628 {
2629         int ret = 0;
2630         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2631         struct page *page = bvec->bv_page;
2632         struct extent_io_tree *tree = bio->bi_private;
2633         u64 start;
2634
2635         start = page_offset(page) + bvec->bv_offset;
2636
2637         bio->bi_private = NULL;
2638
2639         bio_get(bio);
2640
2641         if (tree->ops && tree->ops->submit_bio_hook)
2642                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2643                                            mirror_num, bio_flags, start);
2644         else
2645                 btrfsic_submit_bio(rw, bio);
2646
2647         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2648                 ret = -EOPNOTSUPP;
2649         bio_put(bio);
2650         return ret;
2651 }
2652
2653 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2654                      unsigned long offset, size_t size, struct bio *bio,
2655                      unsigned long bio_flags)
2656 {
2657         int ret = 0;
2658         if (tree->ops && tree->ops->merge_bio_hook)
2659                 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2660                                                 bio_flags);
2661         BUG_ON(ret < 0);
2662         return ret;
2663
2664 }
2665
2666 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2667                               struct page *page, sector_t sector,
2668                               size_t size, unsigned long offset,
2669                               struct block_device *bdev,
2670                               struct bio **bio_ret,
2671                               unsigned long max_pages,
2672                               bio_end_io_t end_io_func,
2673                               int mirror_num,
2674                               unsigned long prev_bio_flags,
2675                               unsigned long bio_flags)
2676 {
2677         int ret = 0;
2678         struct bio *bio;
2679         int nr;
2680         int contig = 0;
2681         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2682         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2683         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2684
2685         if (bio_ret && *bio_ret) {
2686                 bio = *bio_ret;
2687                 if (old_compressed)
2688                         contig = bio->bi_sector == sector;
2689                 else
2690                         contig = bio_end_sector(bio) == sector;
2691
2692                 if (prev_bio_flags != bio_flags || !contig ||
2693                     merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2694                     bio_add_page(bio, page, page_size, offset) < page_size) {
2695                         ret = submit_one_bio(rw, bio, mirror_num,
2696                                              prev_bio_flags);
2697                         if (ret < 0)
2698                                 return ret;
2699                         bio = NULL;
2700                 } else {
2701                         return 0;
2702                 }
2703         }
2704         if (this_compressed)
2705                 nr = BIO_MAX_PAGES;
2706         else
2707                 nr = bio_get_nr_vecs(bdev);
2708
2709         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2710         if (!bio)
2711                 return -ENOMEM;
2712
2713         bio_add_page(bio, page, page_size, offset);
2714         bio->bi_end_io = end_io_func;
2715         bio->bi_private = tree;
2716
2717         if (bio_ret)
2718                 *bio_ret = bio;
2719         else
2720                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2721
2722         return ret;
2723 }
2724
2725 static void attach_extent_buffer_page(struct extent_buffer *eb,
2726                                       struct page *page)
2727 {
2728         if (!PagePrivate(page)) {
2729                 SetPagePrivate(page);
2730                 page_cache_get(page);
2731                 set_page_private(page, (unsigned long)eb);
2732         } else {
2733                 WARN_ON(page->private != (unsigned long)eb);
2734         }
2735 }
2736
2737 void set_page_extent_mapped(struct page *page)
2738 {
2739         if (!PagePrivate(page)) {
2740                 SetPagePrivate(page);
2741                 page_cache_get(page);
2742                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2743         }
2744 }
2745
2746 static struct extent_map *
2747 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2748                  u64 start, u64 len, get_extent_t *get_extent,
2749                  struct extent_map **em_cached)
2750 {
2751         struct extent_map *em;
2752
2753         if (em_cached && *em_cached) {
2754                 em = *em_cached;
2755                 if (em->in_tree && start >= em->start &&
2756                     start < extent_map_end(em)) {
2757                         atomic_inc(&em->refs);
2758                         return em;
2759                 }
2760
2761                 free_extent_map(em);
2762                 *em_cached = NULL;
2763         }
2764
2765         em = get_extent(inode, page, pg_offset, start, len, 0);
2766         if (em_cached && !IS_ERR_OR_NULL(em)) {
2767                 BUG_ON(*em_cached);
2768                 atomic_inc(&em->refs);
2769                 *em_cached = em;
2770         }
2771         return em;
2772 }
2773 /*
2774  * basic readpage implementation.  Locked extent state structs are inserted
2775  * into the tree that are removed when the IO is done (by the end_io
2776  * handlers)
2777  * XXX JDM: This needs looking at to ensure proper page locking
2778  */
2779 static int __do_readpage(struct extent_io_tree *tree,
2780                          struct page *page,
2781                          get_extent_t *get_extent,
2782                          struct extent_map **em_cached,
2783                          struct bio **bio, int mirror_num,
2784                          unsigned long *bio_flags, int rw)
2785 {
2786         struct inode *inode = page->mapping->host;
2787         u64 start = page_offset(page);
2788         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2789         u64 end;
2790         u64 cur = start;
2791         u64 extent_offset;
2792         u64 last_byte = i_size_read(inode);
2793         u64 block_start;
2794         u64 cur_end;
2795         sector_t sector;
2796         struct extent_map *em;
2797         struct block_device *bdev;
2798         int ret;
2799         int nr = 0;
2800         int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2801         size_t pg_offset = 0;
2802         size_t iosize;
2803         size_t disk_io_size;
2804         size_t blocksize = inode->i_sb->s_blocksize;
2805         unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2806
2807         set_page_extent_mapped(page);
2808
2809         end = page_end;
2810         if (!PageUptodate(page)) {
2811                 if (cleancache_get_page(page) == 0) {
2812                         BUG_ON(blocksize != PAGE_SIZE);
2813                         unlock_extent(tree, start, end);
2814                         goto out;
2815                 }
2816         }
2817
2818         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2819                 char *userpage;
2820                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2821
2822                 if (zero_offset) {
2823                         iosize = PAGE_CACHE_SIZE - zero_offset;
2824                         userpage = kmap_atomic(page);
2825                         memset(userpage + zero_offset, 0, iosize);
2826                         flush_dcache_page(page);
2827                         kunmap_atomic(userpage);
2828                 }
2829         }
2830         while (cur <= end) {
2831                 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2832
2833                 if (cur >= last_byte) {
2834                         char *userpage;
2835                         struct extent_state *cached = NULL;
2836
2837                         iosize = PAGE_CACHE_SIZE - pg_offset;
2838                         userpage = kmap_atomic(page);
2839                         memset(userpage + pg_offset, 0, iosize);
2840                         flush_dcache_page(page);
2841                         kunmap_atomic(userpage);
2842                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2843                                             &cached, GFP_NOFS);
2844                         if (!parent_locked)
2845                                 unlock_extent_cached(tree, cur,
2846                                                      cur + iosize - 1,
2847                                                      &cached, GFP_NOFS);
2848                         break;
2849                 }
2850                 em = __get_extent_map(inode, page, pg_offset, cur,
2851                                       end - cur + 1, get_extent, em_cached);
2852                 if (IS_ERR_OR_NULL(em)) {
2853                         SetPageError(page);
2854                         if (!parent_locked)
2855                                 unlock_extent(tree, cur, end);
2856                         break;
2857                 }
2858                 extent_offset = cur - em->start;
2859                 BUG_ON(extent_map_end(em) <= cur);
2860                 BUG_ON(end < cur);
2861
2862                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2863                         this_bio_flag |= EXTENT_BIO_COMPRESSED;
2864                         extent_set_compress_type(&this_bio_flag,
2865                                                  em->compress_type);
2866                 }
2867
2868                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2869                 cur_end = min(extent_map_end(em) - 1, end);
2870                 iosize = ALIGN(iosize, blocksize);
2871                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2872                         disk_io_size = em->block_len;
2873                         sector = em->block_start >> 9;
2874                 } else {
2875                         sector = (em->block_start + extent_offset) >> 9;
2876                         disk_io_size = iosize;
2877                 }
2878                 bdev = em->bdev;
2879                 block_start = em->block_start;
2880                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2881                         block_start = EXTENT_MAP_HOLE;
2882                 free_extent_map(em);
2883                 em = NULL;
2884
2885                 /* we've found a hole, just zero and go on */
2886                 if (block_start == EXTENT_MAP_HOLE) {
2887                         char *userpage;
2888                         struct extent_state *cached = NULL;
2889
2890                         userpage = kmap_atomic(page);
2891                         memset(userpage + pg_offset, 0, iosize);
2892                         flush_dcache_page(page);
2893                         kunmap_atomic(userpage);
2894
2895                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2896                                             &cached, GFP_NOFS);
2897                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2898                                              &cached, GFP_NOFS);
2899                         cur = cur + iosize;
2900                         pg_offset += iosize;
2901                         continue;
2902                 }
2903                 /* the get_extent function already copied into the page */
2904                 if (test_range_bit(tree, cur, cur_end,
2905                                    EXTENT_UPTODATE, 1, NULL)) {
2906                         check_page_uptodate(tree, page);
2907                         if (!parent_locked)
2908                                 unlock_extent(tree, cur, cur + iosize - 1);
2909                         cur = cur + iosize;
2910                         pg_offset += iosize;
2911                         continue;
2912                 }
2913                 /* we have an inline extent but it didn't get marked up
2914                  * to date.  Error out
2915                  */
2916                 if (block_start == EXTENT_MAP_INLINE) {
2917                         SetPageError(page);
2918                         if (!parent_locked)
2919                                 unlock_extent(tree, cur, cur + iosize - 1);
2920                         cur = cur + iosize;
2921                         pg_offset += iosize;
2922                         continue;
2923                 }
2924
2925                 pnr -= page->index;
2926                 ret = submit_extent_page(rw, tree, page,
2927                                          sector, disk_io_size, pg_offset,
2928                                          bdev, bio, pnr,
2929                                          end_bio_extent_readpage, mirror_num,
2930                                          *bio_flags,
2931                                          this_bio_flag);
2932                 if (!ret) {
2933                         nr++;
2934                         *bio_flags = this_bio_flag;
2935                 } else {
2936                         SetPageError(page);
2937                         if (!parent_locked)
2938                                 unlock_extent(tree, cur, cur + iosize - 1);
2939                 }
2940                 cur = cur + iosize;
2941                 pg_offset += iosize;
2942         }
2943 out:
2944         if (!nr) {
2945                 if (!PageError(page))
2946                         SetPageUptodate(page);
2947                 unlock_page(page);
2948         }
2949         return 0;
2950 }
2951
2952 static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
2953                                              struct page *pages[], int nr_pages,
2954                                              u64 start, u64 end,
2955                                              get_extent_t *get_extent,
2956                                              struct extent_map **em_cached,
2957                                              struct bio **bio, int mirror_num,
2958                                              unsigned long *bio_flags, int rw)
2959 {
2960         struct inode *inode;
2961         struct btrfs_ordered_extent *ordered;
2962         int index;
2963
2964         inode = pages[0]->mapping->host;
2965         while (1) {
2966                 lock_extent(tree, start, end);
2967                 ordered = btrfs_lookup_ordered_range(inode, start,
2968                                                      end - start + 1);
2969                 if (!ordered)
2970                         break;
2971                 unlock_extent(tree, start, end);
2972                 btrfs_start_ordered_extent(inode, ordered, 1);
2973                 btrfs_put_ordered_extent(ordered);
2974         }
2975
2976         for (index = 0; index < nr_pages; index++) {
2977                 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
2978                               mirror_num, bio_flags, rw);
2979                 page_cache_release(pages[index]);
2980         }
2981 }
2982
2983 static void __extent_readpages(struct extent_io_tree *tree,
2984                                struct page *pages[],
2985                                int nr_pages, get_extent_t *get_extent,
2986                                struct extent_map **em_cached,
2987                                struct bio **bio, int mirror_num,
2988                                unsigned long *bio_flags, int rw)
2989 {
2990         u64 start = 0;
2991         u64 end = 0;
2992         u64 page_start;
2993         int index;
2994         int first_index = 0;
2995
2996         for (index = 0; index < nr_pages; index++) {
2997                 page_start = page_offset(pages[index]);
2998                 if (!end) {
2999                         start = page_start;
3000                         end = start + PAGE_CACHE_SIZE - 1;
3001                         first_index = index;
3002                 } else if (end + 1 == page_start) {
3003                         end += PAGE_CACHE_SIZE;
3004                 } else {
3005                         __do_contiguous_readpages(tree, &pages[first_index],
3006                                                   index - first_index, start,
3007                                                   end, get_extent, em_cached,
3008                                                   bio, mirror_num, bio_flags,
3009                                                   rw);
3010                         start = page_start;
3011                         end = start + PAGE_CACHE_SIZE - 1;
3012                         first_index = index;
3013                 }
3014         }
3015
3016         if (end)
3017                 __do_contiguous_readpages(tree, &pages[first_index],
3018                                           index - first_index, start,
3019                                           end, get_extent, em_cached, bio,
3020                                           mirror_num, bio_flags, rw);
3021 }
3022
3023 static int __extent_read_full_page(struct extent_io_tree *tree,
3024                                    struct page *page,
3025                                    get_extent_t *get_extent,
3026                                    struct bio **bio, int mirror_num,
3027                                    unsigned long *bio_flags, int rw)
3028 {
3029         struct inode *inode = page->mapping->host;
3030         struct btrfs_ordered_extent *ordered;
3031         u64 start = page_offset(page);
3032         u64 end = start + PAGE_CACHE_SIZE - 1;
3033         int ret;
3034
3035         while (1) {
3036                 lock_extent(tree, start, end);
3037                 ordered = btrfs_lookup_ordered_extent(inode, start);
3038                 if (!ordered)
3039                         break;
3040                 unlock_extent(tree, start, end);
3041                 btrfs_start_ordered_extent(inode, ordered, 1);
3042                 btrfs_put_ordered_extent(ordered);
3043         }
3044
3045         ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3046                             bio_flags, rw);
3047         return ret;
3048 }
3049
3050 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3051                             get_extent_t *get_extent, int mirror_num)
3052 {
3053         struct bio *bio = NULL;
3054         unsigned long bio_flags = 0;
3055         int ret;
3056
3057         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3058                                       &bio_flags, READ);
3059         if (bio)
3060                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3061         return ret;
3062 }
3063
3064 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3065                                  get_extent_t *get_extent, int mirror_num)
3066 {
3067         struct bio *bio = NULL;
3068         unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3069         int ret;
3070
3071         ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3072                                       &bio_flags, READ);
3073         if (bio)
3074                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3075         return ret;
3076 }
3077
3078 static noinline void update_nr_written(struct page *page,
3079                                       struct writeback_control *wbc,
3080                                       unsigned long nr_written)
3081 {
3082         wbc->nr_to_write -= nr_written;
3083         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3084             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3085                 page->mapping->writeback_index = page->index + nr_written;
3086 }
3087
3088 /*
3089  * the writepage semantics are similar to regular writepage.  extent
3090  * records are inserted to lock ranges in the tree, and as dirty areas
3091  * are found, they are marked writeback.  Then the lock bits are removed
3092  * and the end_io handler clears the writeback ranges
3093  */
3094 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3095                               void *data)
3096 {
3097         struct inode *inode = page->mapping->host;
3098         struct extent_page_data *epd = data;
3099         struct extent_io_tree *tree = epd->tree;
3100         u64 start = page_offset(page);
3101         u64 delalloc_start;
3102         u64 page_end = start + PAGE_CACHE_SIZE - 1;
3103         u64 end;
3104         u64 cur = start;
3105         u64 extent_offset;
3106         u64 last_byte = i_size_read(inode);
3107         u64 block_start;
3108         u64 iosize;
3109         sector_t sector;
3110         struct extent_state *cached_state = NULL;
3111         struct extent_map *em;
3112         struct block_device *bdev;
3113         int ret;
3114         int nr = 0;
3115         size_t pg_offset = 0;
3116         size_t blocksize;
3117         loff_t i_size = i_size_read(inode);
3118         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3119         u64 nr_delalloc;
3120         u64 delalloc_end;
3121         int page_started;
3122         int compressed;
3123         int write_flags;
3124         unsigned long nr_written = 0;
3125         bool fill_delalloc = true;
3126
3127         if (wbc->sync_mode == WB_SYNC_ALL)
3128                 write_flags = WRITE_SYNC;
3129         else
3130                 write_flags = WRITE;
3131
3132         trace___extent_writepage(page, inode, wbc);
3133
3134         WARN_ON(!PageLocked(page));
3135
3136         ClearPageError(page);
3137
3138         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3139         if (page->index > end_index ||
3140            (page->index == end_index && !pg_offset)) {
3141                 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3142                 unlock_page(page);
3143                 return 0;
3144         }
3145
3146         if (page->index == end_index) {
3147                 char *userpage;
3148
3149                 userpage = kmap_atomic(page);
3150                 memset(userpage + pg_offset, 0,
3151                        PAGE_CACHE_SIZE - pg_offset);
3152                 kunmap_atomic(userpage);
3153                 flush_dcache_page(page);
3154         }
3155         pg_offset = 0;
3156
3157         set_page_extent_mapped(page);
3158
3159         if (!tree->ops || !tree->ops->fill_delalloc)
3160                 fill_delalloc = false;
3161
3162         delalloc_start = start;
3163         delalloc_end = 0;
3164         page_started = 0;
3165         if (!epd->extent_locked && fill_delalloc) {
3166                 u64 delalloc_to_write = 0;
3167                 /*
3168                  * make sure the wbc mapping index is at least updated
3169                  * to this page.
3170                  */
3171                 update_nr_written(page, wbc, 0);
3172
3173                 while (delalloc_end < page_end) {
3174                         nr_delalloc = find_lock_delalloc_range(inode, tree,
3175                                                        page,
3176                                                        &delalloc_start,
3177                                                        &delalloc_end,
3178                                                        128 * 1024 * 1024);
3179                         if (nr_delalloc == 0) {
3180                                 delalloc_start = delalloc_end + 1;
3181                                 continue;
3182                         }
3183                         ret = tree->ops->fill_delalloc(inode, page,
3184                                                        delalloc_start,
3185                                                        delalloc_end,
3186                                                        &page_started,
3187                                                        &nr_written);
3188                         /* File system has been set read-only */
3189                         if (ret) {
3190                                 SetPageError(page);
3191                                 goto done;
3192                         }
3193                         /*
3194                          * delalloc_end is already one less than the total
3195                          * length, so we don't subtract one from
3196                          * PAGE_CACHE_SIZE
3197                          */
3198                         delalloc_to_write += (delalloc_end - delalloc_start +
3199                                               PAGE_CACHE_SIZE) >>
3200                                               PAGE_CACHE_SHIFT;
3201                         delalloc_start = delalloc_end + 1;
3202                 }
3203                 if (wbc->nr_to_write < delalloc_to_write) {
3204                         int thresh = 8192;
3205
3206                         if (delalloc_to_write < thresh * 2)
3207                                 thresh = delalloc_to_write;
3208                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
3209                                                  thresh);
3210                 }
3211
3212                 /* did the fill delalloc function already unlock and start
3213                  * the IO?
3214                  */
3215                 if (page_started) {
3216                         ret = 0;
3217                         /*
3218                          * we've unlocked the page, so we can't update
3219                          * the mapping's writeback index, just update
3220                          * nr_to_write.
3221                          */
3222                         wbc->nr_to_write -= nr_written;
3223                         goto done_unlocked;
3224                 }
3225         }
3226         if (tree->ops && tree->ops->writepage_start_hook) {
3227                 ret = tree->ops->writepage_start_hook(page, start,
3228                                                       page_end);
3229                 if (ret) {
3230                         /* Fixup worker will requeue */
3231                         if (ret == -EBUSY)
3232                                 wbc->pages_skipped++;
3233                         else
3234                                 redirty_page_for_writepage(wbc, page);
3235                         update_nr_written(page, wbc, nr_written);
3236                         unlock_page(page);
3237                         ret = 0;
3238                         goto done_unlocked;
3239                 }
3240         }
3241
3242         /*
3243          * we don't want to touch the inode after unlocking the page,
3244          * so we update the mapping writeback index now
3245          */
3246         update_nr_written(page, wbc, nr_written + 1);
3247
3248         end = page_end;
3249         if (last_byte <= start) {
3250                 if (tree->ops && tree->ops->writepage_end_io_hook)
3251                         tree->ops->writepage_end_io_hook(page, start,
3252                                                          page_end, NULL, 1);
3253                 goto done;
3254         }
3255
3256         blocksize = inode->i_sb->s_blocksize;
3257
3258         while (cur <= end) {
3259                 if (cur >= last_byte) {
3260                         if (tree->ops && tree->ops->writepage_end_io_hook)
3261                                 tree->ops->writepage_end_io_hook(page, cur,
3262                                                          page_end, NULL, 1);
3263                         break;
3264                 }
3265                 em = epd->get_extent(inode, page, pg_offset, cur,
3266                                      end - cur + 1, 1);
3267                 if (IS_ERR_OR_NULL(em)) {
3268                         SetPageError(page);
3269                         break;
3270                 }
3271
3272                 extent_offset = cur - em->start;
3273                 BUG_ON(extent_map_end(em) <= cur);
3274                 BUG_ON(end < cur);
3275                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3276                 iosize = ALIGN(iosize, blocksize);
3277                 sector = (em->block_start + extent_offset) >> 9;
3278                 bdev = em->bdev;
3279                 block_start = em->block_start;
3280                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3281                 free_extent_map(em);
3282                 em = NULL;
3283
3284                 /*
3285                  * compressed and inline extents are written through other
3286                  * paths in the FS
3287                  */
3288                 if (compressed || block_start == EXTENT_MAP_HOLE ||
3289                     block_start == EXTENT_MAP_INLINE) {
3290                         /*
3291                          * end_io notification does not happen here for
3292                          * compressed extents
3293                          */
3294                         if (!compressed && tree->ops &&
3295                             tree->ops->writepage_end_io_hook)
3296                                 tree->ops->writepage_end_io_hook(page, cur,
3297                                                          cur + iosize - 1,
3298                                                          NULL, 1);
3299                         else if (compressed) {
3300                                 /* we don't want to end_page_writeback on
3301                                  * a compressed extent.  this happens
3302                                  * elsewhere
3303                                  */
3304                                 nr++;
3305                         }
3306
3307                         cur += iosize;
3308                         pg_offset += iosize;
3309                         continue;
3310                 }
3311                 /* leave this out until we have a page_mkwrite call */
3312                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3313                                    EXTENT_DIRTY, 0, NULL)) {
3314                         cur = cur + iosize;
3315                         pg_offset += iosize;
3316                         continue;
3317                 }
3318
3319                 if (tree->ops && tree->ops->writepage_io_hook) {
3320                         ret = tree->ops->writepage_io_hook(page, cur,
3321                                                 cur + iosize - 1);
3322                 } else {
3323                         ret = 0;
3324                 }
3325                 if (ret) {
3326                         SetPageError(page);
3327                 } else {
3328                         unsigned long max_nr = end_index + 1;
3329
3330                         set_range_writeback(tree, cur, cur + iosize - 1);
3331                         if (!PageWriteback(page)) {
3332                                 printk(KERN_ERR "btrfs warning page %lu not "
3333                                        "writeback, cur %llu end %llu\n",
3334                                        page->index, cur, end);
3335                         }
3336
3337                         ret = submit_extent_page(write_flags, tree, page,
3338                                                  sector, iosize, pg_offset,
3339                                                  bdev, &epd->bio, max_nr,
3340                                                  end_bio_extent_writepage,
3341                                                  0, 0, 0);
3342                         if (ret)
3343                                 SetPageError(page);
3344                 }
3345                 cur = cur + iosize;
3346                 pg_offset += iosize;
3347                 nr++;
3348         }
3349 done:
3350         if (nr == 0) {
3351                 /* make sure the mapping tag for page dirty gets cleared */
3352                 set_page_writeback(page);
3353                 end_page_writeback(page);
3354         }
3355         unlock_page(page);
3356
3357 done_unlocked:
3358
3359         /* drop our reference on any cached states */
3360         free_extent_state(cached_state);
3361         return 0;
3362 }
3363
3364 static int eb_wait(void *word)
3365 {
3366         io_schedule();
3367         return 0;
3368 }
3369
3370 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3371 {
3372         wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3373                     TASK_UNINTERRUPTIBLE);
3374 }
3375
3376 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3377                                      struct btrfs_fs_info *fs_info,
3378                                      struct extent_page_data *epd)
3379 {
3380         unsigned long i, num_pages;
3381         int flush = 0;
3382         int ret = 0;
3383
3384         if (!btrfs_try_tree_write_lock(eb)) {
3385                 flush = 1;
3386                 flush_write_bio(epd);
3387                 btrfs_tree_lock(eb);
3388         }
3389
3390         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3391                 btrfs_tree_unlock(eb);
3392                 if (!epd->sync_io)
3393                         return 0;
3394                 if (!flush) {
3395                         flush_write_bio(epd);
3396                         flush = 1;
3397                 }
3398                 while (1) {
3399                         wait_on_extent_buffer_writeback(eb);
3400                         btrfs_tree_lock(eb);
3401                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3402                                 break;
3403                         btrfs_tree_unlock(eb);
3404                 }
3405         }
3406
3407         /*
3408          * We need to do this to prevent races in people who check if the eb is
3409          * under IO since we can end up having no IO bits set for a short period
3410          * of time.
3411          */
3412         spin_lock(&eb->refs_lock);
3413         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3414                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3415                 spin_unlock(&eb->refs_lock);
3416                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3417                 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3418                                      -eb->len,
3419                                      fs_info->dirty_metadata_batch);
3420                 ret = 1;
3421         } else {
3422                 spin_unlock(&eb->refs_lock);
3423         }
3424
3425         btrfs_tree_unlock(eb);
3426
3427         if (!ret)
3428                 return ret;
3429
3430         num_pages = num_extent_pages(eb->start, eb->len);
3431         for (i = 0; i < num_pages; i++) {
3432                 struct page *p = extent_buffer_page(eb, i);
3433
3434                 if (!trylock_page(p)) {
3435                         if (!flush) {
3436                                 flush_write_bio(epd);
3437                                 flush = 1;
3438                         }
3439                         lock_page(p);
3440                 }
3441         }
3442
3443         return ret;
3444 }
3445
3446 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3447 {
3448         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3449         smp_mb__after_clear_bit();
3450         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3451 }
3452
3453 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3454 {
3455         int uptodate = err == 0;
3456         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3457         struct extent_buffer *eb;
3458         int done;
3459
3460         do {
3461                 struct page *page = bvec->bv_page;
3462
3463                 bvec--;
3464                 eb = (struct extent_buffer *)page->private;
3465                 BUG_ON(!eb);
3466                 done = atomic_dec_and_test(&eb->io_pages);
3467
3468                 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3469                         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3470                         ClearPageUptodate(page);
3471                         SetPageError(page);
3472                 }
3473
3474                 end_page_writeback(page);
3475
3476                 if (!done)
3477                         continue;
3478
3479                 end_extent_buffer_writeback(eb);
3480         } while (bvec >= bio->bi_io_vec);
3481
3482         bio_put(bio);
3483
3484 }
3485
3486 static int write_one_eb(struct extent_buffer *eb,
3487                         struct btrfs_fs_info *fs_info,
3488                         struct writeback_control *wbc,
3489                         struct extent_page_data *epd)
3490 {
3491         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3492         u64 offset = eb->start;
3493         unsigned long i, num_pages;
3494         unsigned long bio_flags = 0;
3495         int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3496         int ret = 0;
3497
3498         clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3499         num_pages = num_extent_pages(eb->start, eb->len);
3500         atomic_set(&eb->io_pages, num_pages);
3501         if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3502                 bio_flags = EXTENT_BIO_TREE_LOG;
3503
3504         for (i = 0; i < num_pages; i++) {
3505                 struct page *p = extent_buffer_page(eb, i);
3506
3507                 clear_page_dirty_for_io(p);
3508                 set_page_writeback(p);
3509                 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3510                                          PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3511                                          -1, end_bio_extent_buffer_writepage,
3512                                          0, epd->bio_flags, bio_flags);
3513                 epd->bio_flags = bio_flags;
3514                 if (ret) {
3515                         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3516                         SetPageError(p);
3517                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3518                                 end_extent_buffer_writeback(eb);
3519                         ret = -EIO;
3520                         break;
3521                 }
3522                 offset += PAGE_CACHE_SIZE;
3523                 update_nr_written(p, wbc, 1);
3524                 unlock_page(p);
3525         }
3526
3527         if (unlikely(ret)) {
3528                 for (; i < num_pages; i++) {
3529                         struct page *p = extent_buffer_page(eb, i);
3530                         unlock_page(p);
3531                 }
3532         }
3533
3534         return ret;
3535 }
3536
3537 int btree_write_cache_pages(struct address_space *mapping,
3538                                    struct writeback_control *wbc)
3539 {
3540         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3541         struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3542         struct extent_buffer *eb, *prev_eb = NULL;
3543         struct extent_page_data epd = {
3544                 .bio = NULL,
3545                 .tree = tree,
3546                 .extent_locked = 0,
3547                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3548                 .bio_flags = 0,
3549         };
3550         int ret = 0;
3551         int done = 0;
3552         int nr_to_write_done = 0;
3553         struct pagevec pvec;
3554         int nr_pages;
3555         pgoff_t index;
3556         pgoff_t end;            /* Inclusive */
3557         int scanned = 0;
3558         int tag;
3559
3560         pagevec_init(&pvec, 0);
3561         if (wbc->range_cyclic) {
3562                 index = mapping->writeback_index; /* Start from prev offset */
3563                 end = -1;
3564         } else {
3565                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3566                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3567                 scanned = 1;
3568         }
3569         if (wbc->sync_mode == WB_SYNC_ALL)
3570                 tag = PAGECACHE_TAG_TOWRITE;
3571         else
3572                 tag = PAGECACHE_TAG_DIRTY;
3573 retry:
3574         if (wbc->sync_mode == WB_SYNC_ALL)
3575                 tag_pages_for_writeback(mapping, index, end);
3576         while (!done && !nr_to_write_done && (index <= end) &&
3577                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3578                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3579                 unsigned i;
3580
3581                 scanned = 1;
3582                 for (i = 0; i < nr_pages; i++) {
3583                         struct page *page = pvec.pages[i];
3584
3585                         if (!PagePrivate(page))
3586                                 continue;
3587
3588                         if (!wbc->range_cyclic && page->index > end) {
3589                                 done = 1;
3590                                 break;
3591                         }
3592
3593                         spin_lock(&mapping->private_lock);
3594                         if (!PagePrivate(page)) {
3595                                 spin_unlock(&mapping->private_lock);
3596                                 continue;
3597                         }
3598
3599                         eb = (struct extent_buffer *)page->private;
3600
3601                         /*
3602                          * Shouldn't happen and normally this would be a BUG_ON
3603                          * but no sense in crashing the users box for something
3604                          * we can survive anyway.
3605                          */
3606                         if (WARN_ON(!eb)) {
3607                                 spin_unlock(&mapping->private_lock);
3608                                 continue;
3609                         }
3610
3611                         if (eb == prev_eb) {
3612                                 spin_unlock(&mapping->private_lock);
3613                                 continue;
3614                         }
3615
3616                         ret = atomic_inc_not_zero(&eb->refs);
3617                         spin_unlock(&mapping->private_lock);
3618                         if (!ret)
3619                                 continue;
3620
3621                         prev_eb = eb;
3622                         ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3623                         if (!ret) {
3624                                 free_extent_buffer(eb);
3625                                 continue;
3626                         }
3627
3628                         ret = write_one_eb(eb, fs_info, wbc, &epd);
3629                         if (ret) {
3630                                 done = 1;
3631                                 free_extent_buffer(eb);
3632                                 break;
3633                         }
3634                         free_extent_buffer(eb);
3635
3636                         /*
3637                          * the filesystem may choose to bump up nr_to_write.
3638                          * We have to make sure to honor the new nr_to_write
3639                          * at any time
3640                          */
3641                         nr_to_write_done = wbc->nr_to_write <= 0;
3642                 }
3643                 pagevec_release(&pvec);
3644                 cond_resched();
3645         }
3646         if (!scanned && !done) {
3647                 /*
3648                  * We hit the last page and there is more work to be done: wrap
3649                  * back to the start of the file
3650                  */
3651                 scanned = 1;
3652                 index = 0;
3653                 goto retry;
3654         }
3655         flush_write_bio(&epd);
3656         return ret;
3657 }
3658
3659 /**
3660  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3661  * @mapping: address space structure to write
3662  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3663  * @writepage: function called for each page
3664  * @data: data passed to writepage function
3665  *
3666  * If a page is already under I/O, write_cache_pages() skips it, even
3667  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3668  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3669  * and msync() need to guarantee that all the data which was dirty at the time
3670  * the call was made get new I/O started against them.  If wbc->sync_mode is
3671  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3672  * existing IO to complete.
3673  */
3674 static int extent_write_cache_pages(struct extent_io_tree *tree,
3675                              struct address_space *mapping,
3676                              struct writeback_control *wbc,
3677                              writepage_t writepage, void *data,
3678                              void (*flush_fn)(void *))
3679 {
3680         struct inode *inode = mapping->host;
3681         int ret = 0;
3682         int done = 0;
3683         int nr_to_write_done = 0;
3684         struct pagevec pvec;
3685         int nr_pages;
3686         pgoff_t index;
3687         pgoff_t end;            /* Inclusive */
3688         int scanned = 0;
3689         int tag;
3690
3691         /*
3692          * We have to hold onto the inode so that ordered extents can do their
3693          * work when the IO finishes.  The alternative to this is failing to add
3694          * an ordered extent if the igrab() fails there and that is a huge pain
3695          * to deal with, so instead just hold onto the inode throughout the
3696          * writepages operation.  If it fails here we are freeing up the inode
3697          * anyway and we'd rather not waste our time writing out stuff that is
3698          * going to be truncated anyway.
3699          */
3700         if (!igrab(inode))
3701                 return 0;
3702
3703         pagevec_init(&pvec, 0);
3704         if (wbc->range_cyclic) {
3705                 index = mapping->writeback_index; /* Start from prev offset */
3706                 end = -1;
3707         } else {
3708                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3709                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3710                 scanned = 1;
3711         }
3712         if (wbc->sync_mode == WB_SYNC_ALL)
3713                 tag = PAGECACHE_TAG_TOWRITE;
3714         else
3715                 tag = PAGECACHE_TAG_DIRTY;
3716 retry:
3717         if (wbc->sync_mode == WB_SYNC_ALL)
3718                 tag_pages_for_writeback(mapping, index, end);
3719         while (!done && !nr_to_write_done && (index <= end) &&
3720                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3721                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3722                 unsigned i;
3723
3724                 scanned = 1;
3725                 for (i = 0; i < nr_pages; i++) {
3726                         struct page *page = pvec.pages[i];
3727
3728                         /*
3729                          * At this point we hold neither mapping->tree_lock nor
3730                          * lock on the page itself: the page may be truncated or
3731                          * invalidated (changing page->mapping to NULL), or even
3732                          * swizzled back from swapper_space to tmpfs file
3733                          * mapping
3734                          */
3735                         if (!trylock_page(page)) {
3736                                 flush_fn(data);
3737                                 lock_page(page);
3738                         }
3739
3740                         if (unlikely(page->mapping != mapping)) {
3741                                 unlock_page(page);
3742                                 continue;
3743                         }
3744
3745                         if (!wbc->range_cyclic && page->index > end) {
3746                                 done = 1;
3747                                 unlock_page(page);
3748                                 continue;
3749                         }
3750
3751                         if (wbc->sync_mode != WB_SYNC_NONE) {
3752                                 if (PageWriteback(page))
3753                                         flush_fn(data);
3754                                 wait_on_page_writeback(page);
3755                         }
3756
3757                         if (PageWriteback(page) ||
3758                             !clear_page_dirty_for_io(page)) {
3759                                 unlock_page(page);
3760                                 continue;
3761                         }
3762
3763                         ret = (*writepage)(page, wbc, data);
3764
3765                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3766                                 unlock_page(page);
3767                                 ret = 0;
3768                         }
3769                         if (ret)
3770                                 done = 1;
3771
3772                         /*
3773                          * the filesystem may choose to bump up nr_to_write.
3774                          * We have to make sure to honor the new nr_to_write
3775                          * at any time
3776                          */
3777                         nr_to_write_done = wbc->nr_to_write <= 0;
3778                 }
3779                 pagevec_release(&pvec);
3780                 cond_resched();
3781         }
3782         if (!scanned && !done) {
3783                 /*
3784                  * We hit the last page and there is more work to be done: wrap
3785                  * back to the start of the file
3786                  */
3787                 scanned = 1;
3788                 index = 0;
3789                 goto retry;
3790         }
3791         btrfs_add_delayed_iput(inode);
3792         return ret;
3793 }
3794
3795 static void flush_epd_write_bio(struct extent_page_data *epd)
3796 {
3797         if (epd->bio) {
3798                 int rw = WRITE;
3799                 int ret;
3800
3801                 if (epd->sync_io)
3802                         rw = WRITE_SYNC;
3803
3804                 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3805                 BUG_ON(ret < 0); /* -ENOMEM */
3806                 epd->bio = NULL;
3807         }
3808 }
3809
3810 static noinline void flush_write_bio(void *data)
3811 {
3812         struct extent_page_data *epd = data;
3813         flush_epd_write_bio(epd);
3814 }
3815
3816 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3817                           get_extent_t *get_extent,
3818                           struct writeback_control *wbc)
3819 {
3820         int ret;
3821         struct extent_page_data epd = {
3822                 .bio = NULL,
3823                 .tree = tree,
3824                 .get_extent = get_extent,
3825                 .extent_locked = 0,
3826                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3827                 .bio_flags = 0,
3828         };
3829
3830         ret = __extent_writepage(page, wbc, &epd);
3831
3832         flush_epd_write_bio(&epd);
3833         return ret;
3834 }
3835
3836 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3837                               u64 start, u64 end, get_extent_t *get_extent,
3838                               int mode)
3839 {
3840         int ret = 0;
3841         struct address_space *mapping = inode->i_mapping;
3842         struct page *page;
3843         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3844                 PAGE_CACHE_SHIFT;
3845
3846         struct extent_page_data epd = {
3847                 .bio = NULL,
3848                 .tree = tree,
3849                 .get_extent = get_extent,
3850                 .extent_locked = 1,
3851                 .sync_io = mode == WB_SYNC_ALL,
3852                 .bio_flags = 0,
3853         };
3854         struct writeback_control wbc_writepages = {
3855                 .sync_mode      = mode,
3856                 .nr_to_write    = nr_pages * 2,
3857                 .range_start    = start,
3858                 .range_end      = end + 1,
3859         };
3860
3861         while (start <= end) {
3862                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3863                 if (clear_page_dirty_for_io(page))
3864                         ret = __extent_writepage(page, &wbc_writepages, &epd);
3865                 else {
3866                         if (tree->ops && tree->ops->writepage_end_io_hook)
3867                                 tree->ops->writepage_end_io_hook(page, start,
3868                                                  start + PAGE_CACHE_SIZE - 1,
3869                                                  NULL, 1);
3870                         unlock_page(page);
3871                 }
3872                 page_cache_release(page);
3873                 start += PAGE_CACHE_SIZE;
3874         }
3875
3876         flush_epd_write_bio(&epd);
3877         return ret;
3878 }
3879
3880 int extent_writepages(struct extent_io_tree *tree,
3881                       struct address_space *mapping,
3882                       get_extent_t *get_extent,
3883                       struct writeback_control *wbc)
3884 {
3885         int ret = 0;
3886         struct extent_page_data epd = {
3887                 .bio = NULL,
3888                 .tree = tree,
3889                 .get_extent = get_extent,
3890                 .extent_locked = 0,
3891                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3892                 .bio_flags = 0,
3893         };
3894
3895         ret = extent_write_cache_pages(tree, mapping, wbc,
3896                                        __extent_writepage, &epd,
3897                                        flush_write_bio);
3898         flush_epd_write_bio(&epd);
3899         return ret;
3900 }
3901
3902 int extent_readpages(struct extent_io_tree *tree,
3903                      struct address_space *mapping,
3904                      struct list_head *pages, unsigned nr_pages,
3905                      get_extent_t get_extent)
3906 {
3907         struct bio *bio = NULL;
3908         unsigned page_idx;
3909         unsigned long bio_flags = 0;
3910         struct page *pagepool[16];
3911         struct page *page;
3912         struct extent_map *em_cached = NULL;
3913         int nr = 0;
3914
3915         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3916                 page = list_entry(pages->prev, struct page, lru);
3917
3918                 prefetchw(&page->flags);
3919                 list_del(&page->lru);
3920                 if (add_to_page_cache_lru(page, mapping,
3921                                         page->index, GFP_NOFS)) {
3922                         page_cache_release(page);
3923                         continue;
3924                 }
3925
3926                 pagepool[nr++] = page;
3927                 if (nr < ARRAY_SIZE(pagepool))
3928                         continue;
3929                 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3930                                    &bio, 0, &bio_flags, READ);
3931                 nr = 0;
3932         }
3933         if (nr)
3934                 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3935                                    &bio, 0, &bio_flags, READ);
3936
3937         if (em_cached)
3938                 free_extent_map(em_cached);
3939
3940         BUG_ON(!list_empty(pages));
3941         if (bio)
3942                 return submit_one_bio(READ, bio, 0, bio_flags);
3943         return 0;
3944 }
3945
3946 /*
3947  * basic invalidatepage code, this waits on any locked or writeback
3948  * ranges corresponding to the page, and then deletes any extent state
3949  * records from the tree
3950  */
3951 int extent_invalidatepage(struct extent_io_tree *tree,
3952                           struct page *page, unsigned long offset)
3953 {
3954         struct extent_state *cached_state = NULL;
3955         u64 start = page_offset(page);
3956         u64 end = start + PAGE_CACHE_SIZE - 1;
3957         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3958
3959         start += ALIGN(offset, blocksize);
3960         if (start > end)
3961                 return 0;
3962
3963         lock_extent_bits(tree, start, end, 0, &cached_state);
3964         wait_on_page_writeback(page);
3965         clear_extent_bit(tree, start, end,
3966                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3967                          EXTENT_DO_ACCOUNTING,
3968                          1, 1, &cached_state, GFP_NOFS);
3969         return 0;
3970 }
3971
3972 /*
3973  * a helper for releasepage, this tests for areas of the page that
3974  * are locked or under IO and drops the related state bits if it is safe
3975  * to drop the page.
3976  */
3977 static int try_release_extent_state(struct extent_map_tree *map,
3978                                     struct extent_io_tree *tree,
3979                                     struct page *page, gfp_t mask)
3980 {
3981         u64 start = page_offset(page);
3982         u64 end = start + PAGE_CACHE_SIZE - 1;
3983         int ret = 1;
3984
3985         if (test_range_bit(tree, start, end,
3986                            EXTENT_IOBITS, 0, NULL))
3987                 ret = 0;
3988         else {
3989                 if ((mask & GFP_NOFS) == GFP_NOFS)
3990                         mask = GFP_NOFS;
3991                 /*
3992                  * at this point we can safely clear everything except the
3993                  * locked bit and the nodatasum bit
3994                  */
3995                 ret = clear_extent_bit(tree, start, end,
3996                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3997                                  0, 0, NULL, mask);
3998
3999                 /* if clear_extent_bit failed for enomem reasons,
4000                  * we can't allow the release to continue.
4001                  */
4002                 if (ret < 0)
4003                         ret = 0;
4004                 else
4005                         ret = 1;
4006         }
4007         return ret;
4008 }
4009
4010 /*
4011  * a helper for releasepage.  As long as there are no locked extents
4012  * in the range corresponding to the page, both state records and extent
4013  * map records are removed
4014  */
4015 int try_release_extent_mapping(struct extent_map_tree *map,
4016                                struct extent_io_tree *tree, struct page *page,
4017                                gfp_t mask)
4018 {
4019         struct extent_map *em;
4020         u64 start = page_offset(page);
4021         u64 end = start + PAGE_CACHE_SIZE - 1;
4022
4023         if ((mask & __GFP_WAIT) &&
4024             page->mapping->host->i_size > 16 * 1024 * 1024) {
4025                 u64 len;
4026                 while (start <= end) {
4027                         len = end - start + 1;
4028                         write_lock(&map->lock);
4029                         em = lookup_extent_mapping(map, start, len);
4030                         if (!em) {
4031                                 write_unlock(&map->lock);
4032                                 break;
4033                         }
4034                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4035                             em->start != start) {
4036                                 write_unlock(&map->lock);
4037                                 free_extent_map(em);
4038                                 break;
4039                         }
4040                         if (!test_range_bit(tree, em->start,
4041                                             extent_map_end(em) - 1,
4042                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
4043                                             0, NULL)) {
4044                                 remove_extent_mapping(map, em);
4045                                 /* once for the rb tree */
4046                                 free_extent_map(em);
4047                         }
4048                         start = extent_map_end(em);
4049                         write_unlock(&map->lock);
4050
4051                         /* once for us */
4052                         free_extent_map(em);
4053                 }
4054         }
4055         return try_release_extent_state(map, tree, page, mask);
4056 }
4057
4058 /*
4059  * helper function for fiemap, which doesn't want to see any holes.
4060  * This maps until we find something past 'last'
4061  */
4062 static struct extent_map *get_extent_skip_holes(struct inode *inode,
4063                                                 u64 offset,
4064                                                 u64 last,
4065                                                 get_extent_t *get_extent)
4066 {
4067         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4068         struct extent_map *em;
4069         u64 len;
4070
4071         if (offset >= last)
4072                 return NULL;
4073
4074         while (1) {
4075                 len = last - offset;
4076                 if (len == 0)
4077                         break;
4078                 len = ALIGN(len, sectorsize);
4079                 em = get_extent(inode, NULL, 0, offset, len, 0);
4080                 if (IS_ERR_OR_NULL(em))
4081                         return em;
4082
4083                 /* if this isn't a hole return it */
4084                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4085                     em->block_start != EXTENT_MAP_HOLE) {
4086                         return em;
4087                 }
4088
4089                 /* this is a hole, advance to the next extent */
4090                 offset = extent_map_end(em);
4091                 free_extent_map(em);
4092                 if (offset >= last)
4093                         break;
4094         }
4095         return NULL;
4096 }
4097
4098 static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
4099 {
4100         unsigned long cnt = *((unsigned long *)ctx);
4101
4102         cnt++;
4103         *((unsigned long *)ctx) = cnt;
4104
4105         /* Now we're sure that the extent is shared. */
4106         if (cnt > 1)
4107                 return 1;
4108         return 0;
4109 }
4110
4111 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4112                 __u64 start, __u64 len, get_extent_t *get_extent)
4113 {
4114         int ret = 0;
4115         u64 off = start;
4116         u64 max = start + len;
4117         u32 flags = 0;
4118         u32 found_type;
4119         u64 last;
4120         u64 last_for_get_extent = 0;
4121         u64 disko = 0;
4122         u64 isize = i_size_read(inode);
4123         struct btrfs_key found_key;
4124         struct extent_map *em = NULL;
4125         struct extent_state *cached_state = NULL;
4126         struct btrfs_path *path;
4127         int end = 0;
4128         u64 em_start = 0;
4129         u64 em_len = 0;
4130         u64 em_end = 0;
4131
4132         if (len == 0)
4133                 return -EINVAL;
4134
4135         path = btrfs_alloc_path();
4136         if (!path)
4137                 return -ENOMEM;
4138         path->leave_spinning = 1;
4139
4140         start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
4141         len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
4142
4143         /*
4144          * lookup the last file extent.  We're not using i_size here
4145          * because there might be preallocation past i_size
4146          */
4147         ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
4148                                        path, btrfs_ino(inode), -1, 0);
4149         if (ret < 0) {
4150                 btrfs_free_path(path);
4151                 return ret;
4152         }
4153         WARN_ON(!ret);
4154         path->slots[0]--;
4155         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4156         found_type = btrfs_key_type(&found_key);
4157
4158         /* No extents, but there might be delalloc bits */
4159         if (found_key.objectid != btrfs_ino(inode) ||
4160             found_type != BTRFS_EXTENT_DATA_KEY) {
4161                 /* have to trust i_size as the end */
4162                 last = (u64)-1;
4163                 last_for_get_extent = isize;
4164         } else {
4165                 /*
4166                  * remember the start of the last extent.  There are a
4167                  * bunch of different factors that go into the length of the
4168                  * extent, so its much less complex to remember where it started
4169                  */
4170                 last = found_key.offset;
4171                 last_for_get_extent = last + 1;
4172         }
4173         btrfs_release_path(path);
4174
4175         /*
4176          * we might have some extents allocated but more delalloc past those
4177          * extents.  so, we trust isize unless the start of the last extent is
4178          * beyond isize
4179          */
4180         if (last < isize) {
4181                 last = (u64)-1;
4182                 last_for_get_extent = isize;
4183         }
4184
4185         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4186                          &cached_state);
4187
4188         em = get_extent_skip_holes(inode, start, last_for_get_extent,
4189                                    get_extent);
4190         if (!em)
4191                 goto out;
4192         if (IS_ERR(em)) {
4193                 ret = PTR_ERR(em);
4194                 goto out;
4195         }
4196
4197         while (!end) {
4198                 u64 offset_in_extent = 0;
4199
4200                 /* break if the extent we found is outside the range */
4201                 if (em->start >= max || extent_map_end(em) < off)
4202                         break;
4203
4204                 /*
4205                  * get_extent may return an extent that starts before our
4206                  * requested range.  We have to make sure the ranges
4207                  * we return to fiemap always move forward and don't
4208                  * overlap, so adjust the offsets here
4209                  */
4210                 em_start = max(em->start, off);
4211
4212                 /*
4213                  * record the offset from the start of the extent
4214                  * for adjusting the disk offset below.  Only do this if the
4215                  * extent isn't compressed since our in ram offset may be past
4216                  * what we have actually allocated on disk.
4217                  */
4218                 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4219                         offset_in_extent = em_start - em->start;
4220                 em_end = extent_map_end(em);
4221                 em_len = em_end - em_start;
4222                 disko = 0;
4223                 flags = 0;
4224
4225                 /*
4226                  * bump off for our next call to get_extent
4227                  */
4228                 off = extent_map_end(em);
4229                 if (off >= max)
4230                         end = 1;
4231
4232                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4233                         end = 1;
4234                         flags |= FIEMAP_EXTENT_LAST;
4235                 } else if (em->block_start == EXTENT_MAP_INLINE) {
4236                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
4237                                   FIEMAP_EXTENT_NOT_ALIGNED);
4238                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4239                         flags |= (FIEMAP_EXTENT_DELALLOC |
4240                                   FIEMAP_EXTENT_UNKNOWN);
4241                 } else {
4242                         unsigned long ref_cnt = 0;
4243
4244                         disko = em->block_start + offset_in_extent;
4245
4246                         /*
4247                          * As btrfs supports shared space, this information
4248                          * can be exported to userspace tools via
4249                          * flag FIEMAP_EXTENT_SHARED.
4250                          */
4251                         ret = iterate_inodes_from_logical(
4252                                         em->block_start,
4253                                         BTRFS_I(inode)->root->fs_info,
4254                                         path, count_ext_ref, &ref_cnt);
4255                         if (ret < 0 && ret != -ENOENT)
4256                                 goto out_free;
4257
4258                         if (ref_cnt > 1)
4259                                 flags |= FIEMAP_EXTENT_SHARED;
4260                 }
4261                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4262                         flags |= FIEMAP_EXTENT_ENCODED;
4263
4264                 free_extent_map(em);
4265                 em = NULL;
4266                 if ((em_start >= last) || em_len == (u64)-1 ||
4267                    (last == (u64)-1 && isize <= em_end)) {
4268                         flags |= FIEMAP_EXTENT_LAST;
4269                         end = 1;
4270                 }
4271
4272                 /* now scan forward to see if this is really the last extent. */
4273                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4274                                            get_extent);
4275                 if (IS_ERR(em)) {
4276                         ret = PTR_ERR(em);
4277                         goto out;
4278                 }
4279                 if (!em) {
4280                         flags |= FIEMAP_EXTENT_LAST;
4281                         end = 1;
4282                 }
4283                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4284                                               em_len, flags);
4285                 if (ret)
4286                         goto out_free;
4287         }
4288 out_free:
4289         free_extent_map(em);
4290 out:
4291         btrfs_free_path(path);
4292         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4293                              &cached_state, GFP_NOFS);
4294         return ret;
4295 }
4296
4297 static void __free_extent_buffer(struct extent_buffer *eb)
4298 {
4299         btrfs_leak_debug_del(&eb->leak_list);
4300         kmem_cache_free(extent_buffer_cache, eb);
4301 }
4302
4303 static int extent_buffer_under_io(struct extent_buffer *eb)
4304 {
4305         return (atomic_read(&eb->io_pages) ||
4306                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4307                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4308 }
4309
4310 /*
4311  * Helper for releasing extent buffer page.
4312  */
4313 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4314                                                 unsigned long start_idx)
4315 {
4316         unsigned long index;
4317         unsigned long num_pages;
4318         struct page *page;
4319         int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4320
4321         BUG_ON(extent_buffer_under_io(eb));
4322
4323         num_pages = num_extent_pages(eb->start, eb->len);
4324         index = start_idx + num_pages;
4325         if (start_idx >= index)
4326                 return;
4327
4328         do {
4329                 index--;
4330                 page = extent_buffer_page(eb, index);
4331                 if (page && mapped) {
4332                         spin_lock(&page->mapping->private_lock);
4333                         /*
4334                          * We do this since we'll remove the pages after we've
4335                          * removed the eb from the radix tree, so we could race
4336                          * and have this page now attached to the new eb.  So
4337                          * only clear page_private if it's still connected to
4338                          * this eb.
4339                          */
4340                         if (PagePrivate(page) &&
4341                             page->private == (unsigned long)eb) {
4342                                 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4343                                 BUG_ON(PageDirty(page));
4344                                 BUG_ON(PageWriteback(page));
4345                                 /*
4346                                  * We need to make sure we haven't be attached
4347                                  * to a new eb.
4348                                  */
4349                                 ClearPagePrivate(page);
4350                                 set_page_private(page, 0);
4351                                 /* One for the page private */
4352                                 page_cache_release(page);
4353                         }
4354                         spin_unlock(&page->mapping->private_lock);
4355
4356                 }
4357                 if (page) {
4358                         /* One for when we alloced the page */
4359                         page_cache_release(page);
4360                 }
4361         } while (index != start_idx);
4362 }
4363
4364 /*
4365  * Helper for releasing the extent buffer.
4366  */
4367 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4368 {
4369         btrfs_release_extent_buffer_page(eb, 0);
4370         __free_extent_buffer(eb);
4371 }
4372
4373 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
4374                                                    u64 start,
4375                                                    unsigned long len,
4376                                                    gfp_t mask)
4377 {
4378         struct extent_buffer *eb = NULL;
4379
4380         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4381         if (eb == NULL)
4382                 return NULL;
4383         eb->start = start;
4384         eb->len = len;
4385         eb->tree = tree;
4386         eb->bflags = 0;
4387         rwlock_init(&eb->lock);
4388         atomic_set(&eb->write_locks, 0);
4389         atomic_set(&eb->read_locks, 0);
4390         atomic_set(&eb->blocking_readers, 0);
4391         atomic_set(&eb->blocking_writers, 0);
4392         atomic_set(&eb->spinning_readers, 0);
4393         atomic_set(&eb->spinning_writers, 0);
4394         eb->lock_nested = 0;
4395         init_waitqueue_head(&eb->write_lock_wq);
4396         init_waitqueue_head(&eb->read_lock_wq);
4397
4398         btrfs_leak_debug_add(&eb->leak_list, &buffers);
4399
4400         spin_lock_init(&eb->refs_lock);
4401         atomic_set(&eb->refs, 1);
4402         atomic_set(&eb->io_pages, 0);
4403
4404         /*
4405          * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4406          */
4407         BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4408                 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4409         BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4410
4411         return eb;
4412 }
4413
4414 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4415 {
4416         unsigned long i;
4417         struct page *p;
4418         struct extent_buffer *new;
4419         unsigned long num_pages = num_extent_pages(src->start, src->len);
4420
4421         new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4422         if (new == NULL)
4423                 return NULL;
4424
4425         for (i = 0; i < num_pages; i++) {
4426                 p = alloc_page(GFP_NOFS);
4427                 if (!p) {
4428                         btrfs_release_extent_buffer(new);
4429                         return NULL;
4430                 }
4431                 attach_extent_buffer_page(new, p);
4432                 WARN_ON(PageDirty(p));
4433                 SetPageUptodate(p);
4434                 new->pages[i] = p;
4435         }
4436
4437         copy_extent_buffer(new, src, 0, 0, src->len);
4438         set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4439         set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4440
4441         return new;
4442 }
4443
4444 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4445 {
4446         struct extent_buffer *eb;
4447         unsigned long num_pages = num_extent_pages(0, len);
4448         unsigned long i;
4449
4450         eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4451         if (!eb)
4452                 return NULL;
4453
4454         for (i = 0; i < num_pages; i++) {
4455                 eb->pages[i] = alloc_page(GFP_NOFS);
4456                 if (!eb->pages[i])
4457                         goto err;
4458         }
4459         set_extent_buffer_uptodate(eb);
4460         btrfs_set_header_nritems(eb, 0);
4461         set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4462
4463         return eb;
4464 err:
4465         for (; i > 0; i--)
4466                 __free_page(eb->pages[i - 1]);
4467         __free_extent_buffer(eb);
4468         return NULL;
4469 }
4470
4471 static void check_buffer_tree_ref(struct extent_buffer *eb)
4472 {
4473         int refs;
4474         /* the ref bit is tricky.  We have to make sure it is set
4475          * if we have the buffer dirty.   Otherwise the
4476          * code to free a buffer can end up dropping a dirty
4477          * page
4478          *
4479          * Once the ref bit is set, it won't go away while the
4480          * buffer is dirty or in writeback, and it also won't
4481          * go away while we have the reference count on the
4482          * eb bumped.
4483          *
4484          * We can't just set the ref bit without bumping the
4485          * ref on the eb because free_extent_buffer might
4486          * see the ref bit and try to clear it.  If this happens
4487          * free_extent_buffer might end up dropping our original
4488          * ref by mistake and freeing the page before we are able
4489          * to add one more ref.
4490          *
4491          * So bump the ref count first, then set the bit.  If someone
4492          * beat us to it, drop the ref we added.
4493          */
4494         refs = atomic_read(&eb->refs);
4495         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4496                 return;
4497
4498         spin_lock(&eb->refs_lock);
4499         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4500                 atomic_inc(&eb->refs);
4501         spin_unlock(&eb->refs_lock);
4502 }
4503
4504 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4505 {
4506         unsigned long num_pages, i;
4507
4508         check_buffer_tree_ref(eb);
4509
4510         num_pages = num_extent_pages(eb->start, eb->len);
4511         for (i = 0; i < num_pages; i++) {
4512                 struct page *p = extent_buffer_page(eb, i);
4513                 mark_page_accessed(p);
4514         }
4515 }
4516
4517 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4518                                                         u64 start)
4519 {
4520         struct extent_buffer *eb;
4521
4522         rcu_read_lock();
4523         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4524         if (eb && atomic_inc_not_zero(&eb->refs)) {
4525                 rcu_read_unlock();
4526                 mark_extent_buffer_accessed(eb);
4527                 return eb;
4528         }
4529         rcu_read_unlock();
4530
4531         return NULL;
4532 }
4533
4534 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4535                                           u64 start, unsigned long len)
4536 {
4537         unsigned long num_pages = num_extent_pages(start, len);
4538         unsigned long i;
4539         unsigned long index = start >> PAGE_CACHE_SHIFT;
4540         struct extent_buffer *eb;
4541         struct extent_buffer *exists = NULL;
4542         struct page *p;
4543         struct address_space *mapping = tree->mapping;
4544         int uptodate = 1;
4545         int ret;
4546
4547
4548         eb = find_extent_buffer(tree, start);
4549         if (eb)
4550                 return eb;
4551
4552         eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4553         if (!eb)
4554                 return NULL;
4555
4556         for (i = 0; i < num_pages; i++, index++) {
4557                 p = find_or_create_page(mapping, index, GFP_NOFS);
4558                 if (!p)
4559                         goto free_eb;
4560
4561                 spin_lock(&mapping->private_lock);
4562                 if (PagePrivate(p)) {
4563                         /*
4564                          * We could have already allocated an eb for this page
4565                          * and attached one so lets see if we can get a ref on
4566                          * the existing eb, and if we can we know it's good and
4567                          * we can just return that one, else we know we can just
4568                          * overwrite page->private.
4569                          */
4570                         exists = (struct extent_buffer *)p->private;
4571                         if (atomic_inc_not_zero(&exists->refs)) {
4572                                 spin_unlock(&mapping->private_lock);
4573                                 unlock_page(p);
4574                                 page_cache_release(p);
4575                                 mark_extent_buffer_accessed(exists);
4576                                 goto free_eb;
4577                         }
4578
4579                         /*
4580                          * Do this so attach doesn't complain and we need to
4581                          * drop the ref the old guy had.
4582                          */
4583                         ClearPagePrivate(p);
4584                         WARN_ON(PageDirty(p));
4585                         page_cache_release(p);
4586                 }
4587                 attach_extent_buffer_page(eb, p);
4588                 spin_unlock(&mapping->private_lock);
4589                 WARN_ON(PageDirty(p));
4590                 mark_page_accessed(p);
4591                 eb->pages[i] = p;
4592                 if (!PageUptodate(p))
4593                         uptodate = 0;
4594
4595                 /*
4596                  * see below about how we avoid a nasty race with release page
4597                  * and why we unlock later
4598                  */
4599         }
4600         if (uptodate)
4601                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4602 again:
4603         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4604         if (ret)
4605                 goto free_eb;
4606
4607         spin_lock(&tree->buffer_lock);
4608         ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4609         spin_unlock(&tree->buffer_lock);
4610         radix_tree_preload_end();
4611         if (ret == -EEXIST) {
4612                 exists = find_extent_buffer(tree, start);
4613                 if (exists)
4614                         goto free_eb;
4615                 else
4616                         goto again;
4617         }
4618         /* add one reference for the tree */
4619         check_buffer_tree_ref(eb);
4620
4621         /*
4622          * there is a race where release page may have
4623          * tried to find this extent buffer in the radix
4624          * but failed.  It will tell the VM it is safe to
4625          * reclaim the, and it will clear the page private bit.
4626          * We must make sure to set the page private bit properly
4627          * after the extent buffer is in the radix tree so
4628          * it doesn't get lost
4629          */
4630         SetPageChecked(eb->pages[0]);
4631         for (i = 1; i < num_pages; i++) {
4632                 p = extent_buffer_page(eb, i);
4633                 ClearPageChecked(p);
4634                 unlock_page(p);
4635         }
4636         unlock_page(eb->pages[0]);
4637         return eb;
4638
4639 free_eb:
4640         for (i = 0; i < num_pages; i++) {
4641                 if (eb->pages[i])
4642                         unlock_page(eb->pages[i]);
4643         }
4644
4645         WARN_ON(!atomic_dec_and_test(&eb->refs));
4646         btrfs_release_extent_buffer(eb);
4647         return exists;
4648 }
4649
4650 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4651 {
4652         struct extent_buffer *eb =
4653                         container_of(head, struct extent_buffer, rcu_head);
4654
4655         __free_extent_buffer(eb);
4656 }
4657
4658 /* Expects to have eb->eb_lock already held */
4659 static int release_extent_buffer(struct extent_buffer *eb)
4660 {
4661         WARN_ON(atomic_read(&eb->refs) == 0);
4662         if (atomic_dec_and_test(&eb->refs)) {
4663                 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4664                         spin_unlock(&eb->refs_lock);
4665                 } else {
4666                         struct extent_io_tree *tree = eb->tree;
4667
4668                         spin_unlock(&eb->refs_lock);
4669
4670                         spin_lock(&tree->buffer_lock);
4671                         radix_tree_delete(&tree->buffer,
4672                                           eb->start >> PAGE_CACHE_SHIFT);
4673                         spin_unlock(&tree->buffer_lock);
4674                 }
4675
4676                 /* Should be safe to release our pages at this point */
4677                 btrfs_release_extent_buffer_page(eb, 0);
4678                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4679                 return 1;
4680         }
4681         spin_unlock(&eb->refs_lock);
4682
4683         return 0;
4684 }
4685
4686 void free_extent_buffer(struct extent_buffer *eb)
4687 {
4688         int refs;
4689         int old;
4690         if (!eb)
4691                 return;
4692
4693         while (1) {
4694                 refs = atomic_read(&eb->refs);
4695                 if (refs <= 3)
4696                         break;
4697                 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4698                 if (old == refs)
4699                         return;
4700         }
4701
4702         spin_lock(&eb->refs_lock);
4703         if (atomic_read(&eb->refs) == 2 &&
4704             test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4705                 atomic_dec(&eb->refs);
4706
4707         if (atomic_read(&eb->refs) == 2 &&
4708             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4709             !extent_buffer_under_io(eb) &&
4710             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4711                 atomic_dec(&eb->refs);
4712
4713         /*
4714          * I know this is terrible, but it's temporary until we stop tracking
4715          * the uptodate bits and such for the extent buffers.
4716          */
4717         release_extent_buffer(eb);
4718 }
4719
4720 void free_extent_buffer_stale(struct extent_buffer *eb)
4721 {
4722         if (!eb)
4723                 return;
4724
4725         spin_lock(&eb->refs_lock);
4726         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4727
4728         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4729             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4730                 atomic_dec(&eb->refs);
4731         release_extent_buffer(eb);
4732 }
4733
4734 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4735 {
4736         unsigned long i;
4737         unsigned long num_pages;
4738         struct page *page;
4739
4740         num_pages = num_extent_pages(eb->start, eb->len);
4741
4742         for (i = 0; i < num_pages; i++) {
4743                 page = extent_buffer_page(eb, i);
4744                 if (!PageDirty(page))
4745                         continue;
4746
4747                 lock_page(page);
4748                 WARN_ON(!PagePrivate(page));
4749
4750                 clear_page_dirty_for_io(page);
4751                 spin_lock_irq(&page->mapping->tree_lock);
4752                 if (!PageDirty(page)) {
4753                         radix_tree_tag_clear(&page->mapping->page_tree,
4754                                                 page_index(page),
4755                                                 PAGECACHE_TAG_DIRTY);
4756                 }
4757                 spin_unlock_irq(&page->mapping->tree_lock);
4758                 ClearPageError(page);
4759                 unlock_page(page);
4760         }
4761         WARN_ON(atomic_read(&eb->refs) == 0);
4762 }
4763
4764 int set_extent_buffer_dirty(struct extent_buffer *eb)
4765 {
4766         unsigned long i;
4767         unsigned long num_pages;
4768         int was_dirty = 0;
4769
4770         check_buffer_tree_ref(eb);
4771
4772         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4773
4774         num_pages = num_extent_pages(eb->start, eb->len);
4775         WARN_ON(atomic_read(&eb->refs) == 0);
4776         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4777
4778         for (i = 0; i < num_pages; i++)
4779                 set_page_dirty(extent_buffer_page(eb, i));
4780         return was_dirty;
4781 }
4782
4783 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4784 {
4785         unsigned long i;
4786         struct page *page;
4787         unsigned long num_pages;
4788
4789         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4790         num_pages = num_extent_pages(eb->start, eb->len);
4791         for (i = 0; i < num_pages; i++) {
4792                 page = extent_buffer_page(eb, i);
4793                 if (page)
4794                         ClearPageUptodate(page);
4795         }
4796         return 0;
4797 }
4798
4799 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4800 {
4801         unsigned long i;
4802         struct page *page;
4803         unsigned long num_pages;
4804
4805         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4806         num_pages = num_extent_pages(eb->start, eb->len);
4807         for (i = 0; i < num_pages; i++) {
4808                 page = extent_buffer_page(eb, i);
4809                 SetPageUptodate(page);
4810         }
4811         return 0;
4812 }
4813
4814 int extent_buffer_uptodate(struct extent_buffer *eb)
4815 {
4816         return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4817 }
4818
4819 int read_extent_buffer_pages(struct extent_io_tree *tree,
4820                              struct extent_buffer *eb, u64 start, int wait,
4821                              get_extent_t *get_extent, int mirror_num)
4822 {
4823         unsigned long i;
4824         unsigned long start_i;
4825         struct page *page;
4826         int err;
4827         int ret = 0;
4828         int locked_pages = 0;
4829         int all_uptodate = 1;
4830         unsigned long num_pages;
4831         unsigned long num_reads = 0;
4832         struct bio *bio = NULL;
4833         unsigned long bio_flags = 0;
4834
4835         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4836                 return 0;
4837
4838         if (start) {
4839                 WARN_ON(start < eb->start);
4840                 start_i = (start >> PAGE_CACHE_SHIFT) -
4841                         (eb->start >> PAGE_CACHE_SHIFT);
4842         } else {
4843                 start_i = 0;
4844         }
4845
4846         num_pages = num_extent_pages(eb->start, eb->len);
4847         for (i = start_i; i < num_pages; i++) {
4848                 page = extent_buffer_page(eb, i);
4849                 if (wait == WAIT_NONE) {
4850                         if (!trylock_page(page))
4851                                 goto unlock_exit;
4852                 } else {
4853                         lock_page(page);
4854                 }
4855                 locked_pages++;
4856                 if (!PageUptodate(page)) {
4857                         num_reads++;
4858                         all_uptodate = 0;
4859                 }
4860         }
4861         if (all_uptodate) {
4862                 if (start_i == 0)
4863                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4864                 goto unlock_exit;
4865         }
4866
4867         clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4868         eb->read_mirror = 0;
4869         atomic_set(&eb->io_pages, num_reads);
4870         for (i = start_i; i < num_pages; i++) {
4871                 page = extent_buffer_page(eb, i);
4872                 if (!PageUptodate(page)) {
4873                         ClearPageError(page);
4874                         err = __extent_read_full_page(tree, page,
4875                                                       get_extent, &bio,
4876                                                       mirror_num, &bio_flags,
4877                                                       READ | REQ_META);
4878                         if (err)
4879                                 ret = err;
4880                 } else {
4881                         unlock_page(page);
4882                 }
4883         }
4884
4885         if (bio) {
4886                 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4887                                      bio_flags);
4888                 if (err)
4889                         return err;
4890         }
4891
4892         if (ret || wait != WAIT_COMPLETE)
4893                 return ret;
4894
4895         for (i = start_i; i < num_pages; i++) {
4896                 page = extent_buffer_page(eb, i);
4897                 wait_on_page_locked(page);
4898                 if (!PageUptodate(page))
4899                         ret = -EIO;
4900         }
4901
4902         return ret;
4903
4904 unlock_exit:
4905         i = start_i;
4906         while (locked_pages > 0) {
4907                 page = extent_buffer_page(eb, i);
4908                 i++;
4909                 unlock_page(page);
4910                 locked_pages--;
4911         }
4912         return ret;
4913 }
4914
4915 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4916                         unsigned long start,
4917                         unsigned long len)
4918 {
4919         size_t cur;
4920         size_t offset;
4921         struct page *page;
4922         char *kaddr;
4923         char *dst = (char *)dstv;
4924         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4925         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4926
4927         WARN_ON(start > eb->len);
4928         WARN_ON(start + len > eb->start + eb->len);
4929
4930         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4931
4932         while (len > 0) {
4933                 page = extent_buffer_page(eb, i);
4934
4935                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4936                 kaddr = page_address(page);
4937                 memcpy(dst, kaddr + offset, cur);
4938
4939                 dst += cur;
4940                 len -= cur;
4941                 offset = 0;
4942                 i++;
4943         }
4944 }
4945
4946 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4947                                unsigned long min_len, char **map,
4948                                unsigned long *map_start,
4949                                unsigned long *map_len)
4950 {
4951         size_t offset = start & (PAGE_CACHE_SIZE - 1);
4952         char *kaddr;
4953         struct page *p;
4954         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4955         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4956         unsigned long end_i = (start_offset + start + min_len - 1) >>
4957                 PAGE_CACHE_SHIFT;
4958
4959         if (i != end_i)
4960                 return -EINVAL;
4961
4962         if (i == 0) {
4963                 offset = start_offset;
4964                 *map_start = 0;
4965         } else {
4966                 offset = 0;
4967                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4968         }
4969
4970         if (start + min_len > eb->len) {
4971                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4972                        "wanted %lu %lu\n",
4973                        eb->start, eb->len, start, min_len);
4974                 return -EINVAL;
4975         }
4976
4977         p = extent_buffer_page(eb, i);
4978         kaddr = page_address(p);
4979         *map = kaddr + offset;
4980         *map_len = PAGE_CACHE_SIZE - offset;
4981         return 0;
4982 }
4983
4984 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4985                           unsigned long start,
4986                           unsigned long len)
4987 {
4988         size_t cur;
4989         size_t offset;
4990         struct page *page;
4991         char *kaddr;
4992         char *ptr = (char *)ptrv;
4993         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4994         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4995         int ret = 0;
4996
4997         WARN_ON(start > eb->len);
4998         WARN_ON(start + len > eb->start + eb->len);
4999
5000         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5001
5002         while (len > 0) {
5003                 page = extent_buffer_page(eb, i);
5004
5005                 cur = min(len, (PAGE_CACHE_SIZE - offset));
5006
5007                 kaddr = page_address(page);
5008                 ret = memcmp(ptr, kaddr + offset, cur);
5009                 if (ret)
5010                         break;
5011
5012                 ptr += cur;
5013                 len -= cur;
5014                 offset = 0;
5015                 i++;
5016         }
5017         return ret;
5018 }
5019
5020 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5021                          unsigned long start, unsigned long len)
5022 {
5023         size_t cur;
5024         size_t offset;
5025         struct page *page;
5026         char *kaddr;
5027         char *src = (char *)srcv;
5028         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5029         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5030
5031         WARN_ON(start > eb->len);
5032         WARN_ON(start + len > eb->start + eb->len);
5033
5034         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5035
5036         while (len > 0) {
5037                 page = extent_buffer_page(eb, i);
5038                 WARN_ON(!PageUptodate(page));
5039
5040                 cur = min(len, PAGE_CACHE_SIZE - offset);
5041                 kaddr = page_address(page);
5042                 memcpy(kaddr + offset, src, cur);
5043
5044                 src += cur;
5045                 len -= cur;
5046                 offset = 0;
5047                 i++;
5048         }
5049 }
5050
5051 void memset_extent_buffer(struct extent_buffer *eb, char c,
5052                           unsigned long start, unsigned long len)
5053 {
5054         size_t cur;
5055         size_t offset;
5056         struct page *page;
5057         char *kaddr;
5058         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5059         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5060
5061         WARN_ON(start > eb->len);
5062         WARN_ON(start + len > eb->start + eb->len);
5063
5064         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5065
5066         while (len > 0) {
5067                 page = extent_buffer_page(eb, i);
5068                 WARN_ON(!PageUptodate(page));
5069
5070                 cur = min(len, PAGE_CACHE_SIZE - offset);
5071                 kaddr = page_address(page);
5072                 memset(kaddr + offset, c, cur);
5073
5074                 len -= cur;
5075                 offset = 0;
5076                 i++;
5077         }
5078 }
5079
5080 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5081                         unsigned long dst_offset, unsigned long src_offset,
5082                         unsigned long len)
5083 {
5084         u64 dst_len = dst->len;
5085         size_t cur;
5086         size_t offset;
5087         struct page *page;
5088         char *kaddr;
5089         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5090         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5091
5092         WARN_ON(src->len != dst_len);
5093
5094         offset = (start_offset + dst_offset) &
5095                 (PAGE_CACHE_SIZE - 1);
5096
5097         while (len > 0) {
5098                 page = extent_buffer_page(dst, i);
5099                 WARN_ON(!PageUptodate(page));
5100
5101                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5102
5103                 kaddr = page_address(page);
5104                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5105
5106                 src_offset += cur;
5107                 len -= cur;
5108                 offset = 0;
5109                 i++;
5110         }
5111 }
5112
5113 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5114 {
5115         unsigned long distance = (src > dst) ? src - dst : dst - src;
5116         return distance < len;
5117 }
5118
5119 static void copy_pages(struct page *dst_page, struct page *src_page,
5120                        unsigned long dst_off, unsigned long src_off,
5121                        unsigned long len)
5122 {
5123         char *dst_kaddr = page_address(dst_page);
5124         char *src_kaddr;
5125         int must_memmove = 0;
5126
5127         if (dst_page != src_page) {
5128                 src_kaddr = page_address(src_page);
5129         } else {
5130                 src_kaddr = dst_kaddr;
5131                 if (areas_overlap(src_off, dst_off, len))
5132                         must_memmove = 1;
5133         }
5134
5135         if (must_memmove)
5136                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5137         else
5138                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5139 }
5140
5141 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5142                            unsigned long src_offset, unsigned long len)
5143 {
5144         size_t cur;
5145         size_t dst_off_in_page;
5146         size_t src_off_in_page;
5147         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5148         unsigned long dst_i;
5149         unsigned long src_i;
5150
5151         if (src_offset + len > dst->len) {
5152                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5153                        "len %lu dst len %lu\n", src_offset, len, dst->len);
5154                 BUG_ON(1);
5155         }
5156         if (dst_offset + len > dst->len) {
5157                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5158                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
5159                 BUG_ON(1);
5160         }
5161
5162         while (len > 0) {
5163                 dst_off_in_page = (start_offset + dst_offset) &
5164                         (PAGE_CACHE_SIZE - 1);
5165                 src_off_in_page = (start_offset + src_offset) &
5166                         (PAGE_CACHE_SIZE - 1);
5167
5168                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5169                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5170
5171                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5172                                                src_off_in_page));
5173                 cur = min_t(unsigned long, cur,
5174                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5175
5176                 copy_pages(extent_buffer_page(dst, dst_i),
5177                            extent_buffer_page(dst, src_i),
5178                            dst_off_in_page, src_off_in_page, cur);
5179
5180                 src_offset += cur;
5181                 dst_offset += cur;
5182                 len -= cur;
5183         }
5184 }
5185
5186 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5187                            unsigned long src_offset, unsigned long len)
5188 {
5189         size_t cur;
5190         size_t dst_off_in_page;
5191         size_t src_off_in_page;
5192         unsigned long dst_end = dst_offset + len - 1;
5193         unsigned long src_end = src_offset + len - 1;
5194         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5195         unsigned long dst_i;
5196         unsigned long src_i;
5197
5198         if (src_offset + len > dst->len) {
5199                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5200                        "len %lu len %lu\n", src_offset, len, dst->len);
5201                 BUG_ON(1);
5202         }
5203         if (dst_offset + len > dst->len) {
5204                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5205                        "len %lu len %lu\n", dst_offset, len, dst->len);
5206                 BUG_ON(1);
5207         }
5208         if (dst_offset < src_offset) {
5209                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5210                 return;
5211         }
5212         while (len > 0) {
5213                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5214                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5215
5216                 dst_off_in_page = (start_offset + dst_end) &
5217                         (PAGE_CACHE_SIZE - 1);
5218                 src_off_in_page = (start_offset + src_end) &
5219                         (PAGE_CACHE_SIZE - 1);
5220
5221                 cur = min_t(unsigned long, len, src_off_in_page + 1);
5222                 cur = min(cur, dst_off_in_page + 1);
5223                 copy_pages(extent_buffer_page(dst, dst_i),
5224                            extent_buffer_page(dst, src_i),
5225                            dst_off_in_page - cur + 1,
5226                            src_off_in_page - cur + 1, cur);
5227
5228                 dst_end -= cur;
5229                 src_end -= cur;
5230                 len -= cur;
5231         }
5232 }
5233
5234 int try_release_extent_buffer(struct page *page)
5235 {
5236         struct extent_buffer *eb;
5237
5238         /*
5239          * We need to make sure noboody is attaching this page to an eb right
5240          * now.
5241          */
5242         spin_lock(&page->mapping->private_lock);
5243         if (!PagePrivate(page)) {
5244                 spin_unlock(&page->mapping->private_lock);
5245                 return 1;
5246         }
5247
5248         eb = (struct extent_buffer *)page->private;
5249         BUG_ON(!eb);
5250
5251         /*
5252          * This is a little awful but should be ok, we need to make sure that
5253          * the eb doesn't disappear out from under us while we're looking at
5254          * this page.
5255          */
5256         spin_lock(&eb->refs_lock);
5257         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5258                 spin_unlock(&eb->refs_lock);
5259                 spin_unlock(&page->mapping->private_lock);
5260                 return 0;
5261         }
5262         spin_unlock(&page->mapping->private_lock);
5263
5264         /*
5265          * If tree ref isn't set then we know the ref on this eb is a real ref,
5266          * so just return, this page will likely be freed soon anyway.
5267          */
5268         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5269                 spin_unlock(&eb->refs_lock);
5270                 return 0;
5271         }
5272
5273         return release_extent_buffer(eb);
5274 }