]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/file.c
2d70849cec92b714476657d5a75441a47c4812d5
[karo-tx-linux.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include <linux/btrfs.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "transaction.h"
37 #include "btrfs_inode.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43
44 static struct kmem_cache *btrfs_inode_defrag_cachep;
45 /*
46  * when auto defrag is enabled we
47  * queue up these defrag structs to remember which
48  * inodes need defragging passes
49  */
50 struct inode_defrag {
51         struct rb_node rb_node;
52         /* objectid */
53         u64 ino;
54         /*
55          * transid where the defrag was added, we search for
56          * extents newer than this
57          */
58         u64 transid;
59
60         /* root objectid */
61         u64 root;
62
63         /* last offset we were able to defrag */
64         u64 last_offset;
65
66         /* if we've wrapped around back to zero once already */
67         int cycled;
68 };
69
70 static int __compare_inode_defrag(struct inode_defrag *defrag1,
71                                   struct inode_defrag *defrag2)
72 {
73         if (defrag1->root > defrag2->root)
74                 return 1;
75         else if (defrag1->root < defrag2->root)
76                 return -1;
77         else if (defrag1->ino > defrag2->ino)
78                 return 1;
79         else if (defrag1->ino < defrag2->ino)
80                 return -1;
81         else
82                 return 0;
83 }
84
85 /* pop a record for an inode into the defrag tree.  The lock
86  * must be held already
87  *
88  * If you're inserting a record for an older transid than an
89  * existing record, the transid already in the tree is lowered
90  *
91  * If an existing record is found the defrag item you
92  * pass in is freed
93  */
94 static int __btrfs_add_inode_defrag(struct inode *inode,
95                                     struct inode_defrag *defrag)
96 {
97         struct btrfs_root *root = BTRFS_I(inode)->root;
98         struct inode_defrag *entry;
99         struct rb_node **p;
100         struct rb_node *parent = NULL;
101         int ret;
102
103         p = &root->fs_info->defrag_inodes.rb_node;
104         while (*p) {
105                 parent = *p;
106                 entry = rb_entry(parent, struct inode_defrag, rb_node);
107
108                 ret = __compare_inode_defrag(defrag, entry);
109                 if (ret < 0)
110                         p = &parent->rb_left;
111                 else if (ret > 0)
112                         p = &parent->rb_right;
113                 else {
114                         /* if we're reinserting an entry for
115                          * an old defrag run, make sure to
116                          * lower the transid of our existing record
117                          */
118                         if (defrag->transid < entry->transid)
119                                 entry->transid = defrag->transid;
120                         if (defrag->last_offset > entry->last_offset)
121                                 entry->last_offset = defrag->last_offset;
122                         return -EEXIST;
123                 }
124         }
125         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
126         rb_link_node(&defrag->rb_node, parent, p);
127         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
128         return 0;
129 }
130
131 static inline int __need_auto_defrag(struct btrfs_root *root)
132 {
133         if (!btrfs_test_opt(root, AUTO_DEFRAG))
134                 return 0;
135
136         if (btrfs_fs_closing(root->fs_info))
137                 return 0;
138
139         return 1;
140 }
141
142 /*
143  * insert a defrag record for this inode if auto defrag is
144  * enabled
145  */
146 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
147                            struct inode *inode)
148 {
149         struct btrfs_root *root = BTRFS_I(inode)->root;
150         struct inode_defrag *defrag;
151         u64 transid;
152         int ret;
153
154         if (!__need_auto_defrag(root))
155                 return 0;
156
157         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
158                 return 0;
159
160         if (trans)
161                 transid = trans->transid;
162         else
163                 transid = BTRFS_I(inode)->root->last_trans;
164
165         defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
166         if (!defrag)
167                 return -ENOMEM;
168
169         defrag->ino = btrfs_ino(inode);
170         defrag->transid = transid;
171         defrag->root = root->root_key.objectid;
172
173         spin_lock(&root->fs_info->defrag_inodes_lock);
174         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
175                 /*
176                  * If we set IN_DEFRAG flag and evict the inode from memory,
177                  * and then re-read this inode, this new inode doesn't have
178                  * IN_DEFRAG flag. At the case, we may find the existed defrag.
179                  */
180                 ret = __btrfs_add_inode_defrag(inode, defrag);
181                 if (ret)
182                         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
183         } else {
184                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
185         }
186         spin_unlock(&root->fs_info->defrag_inodes_lock);
187         return 0;
188 }
189
190 /*
191  * Requeue the defrag object. If there is a defrag object that points to
192  * the same inode in the tree, we will merge them together (by
193  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
194  */
195 static void btrfs_requeue_inode_defrag(struct inode *inode,
196                                        struct inode_defrag *defrag)
197 {
198         struct btrfs_root *root = BTRFS_I(inode)->root;
199         int ret;
200
201         if (!__need_auto_defrag(root))
202                 goto out;
203
204         /*
205          * Here we don't check the IN_DEFRAG flag, because we need merge
206          * them together.
207          */
208         spin_lock(&root->fs_info->defrag_inodes_lock);
209         ret = __btrfs_add_inode_defrag(inode, defrag);
210         spin_unlock(&root->fs_info->defrag_inodes_lock);
211         if (ret)
212                 goto out;
213         return;
214 out:
215         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
216 }
217
218 /*
219  * pick the defragable inode that we want, if it doesn't exist, we will get
220  * the next one.
221  */
222 static struct inode_defrag *
223 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
224 {
225         struct inode_defrag *entry = NULL;
226         struct inode_defrag tmp;
227         struct rb_node *p;
228         struct rb_node *parent = NULL;
229         int ret;
230
231         tmp.ino = ino;
232         tmp.root = root;
233
234         spin_lock(&fs_info->defrag_inodes_lock);
235         p = fs_info->defrag_inodes.rb_node;
236         while (p) {
237                 parent = p;
238                 entry = rb_entry(parent, struct inode_defrag, rb_node);
239
240                 ret = __compare_inode_defrag(&tmp, entry);
241                 if (ret < 0)
242                         p = parent->rb_left;
243                 else if (ret > 0)
244                         p = parent->rb_right;
245                 else
246                         goto out;
247         }
248
249         if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
250                 parent = rb_next(parent);
251                 if (parent)
252                         entry = rb_entry(parent, struct inode_defrag, rb_node);
253                 else
254                         entry = NULL;
255         }
256 out:
257         if (entry)
258                 rb_erase(parent, &fs_info->defrag_inodes);
259         spin_unlock(&fs_info->defrag_inodes_lock);
260         return entry;
261 }
262
263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
264 {
265         struct inode_defrag *defrag;
266         struct rb_node *node;
267
268         spin_lock(&fs_info->defrag_inodes_lock);
269         node = rb_first(&fs_info->defrag_inodes);
270         while (node) {
271                 rb_erase(node, &fs_info->defrag_inodes);
272                 defrag = rb_entry(node, struct inode_defrag, rb_node);
273                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
274
275                 if (need_resched()) {
276                         spin_unlock(&fs_info->defrag_inodes_lock);
277                         cond_resched();
278                         spin_lock(&fs_info->defrag_inodes_lock);
279                 }
280
281                 node = rb_first(&fs_info->defrag_inodes);
282         }
283         spin_unlock(&fs_info->defrag_inodes_lock);
284 }
285
286 #define BTRFS_DEFRAG_BATCH      1024
287
288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
289                                     struct inode_defrag *defrag)
290 {
291         struct btrfs_root *inode_root;
292         struct inode *inode;
293         struct btrfs_key key;
294         struct btrfs_ioctl_defrag_range_args range;
295         int num_defrag;
296         int index;
297         int ret;
298
299         /* get the inode */
300         key.objectid = defrag->root;
301         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
302         key.offset = (u64)-1;
303
304         index = srcu_read_lock(&fs_info->subvol_srcu);
305
306         inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
307         if (IS_ERR(inode_root)) {
308                 ret = PTR_ERR(inode_root);
309                 goto cleanup;
310         }
311
312         key.objectid = defrag->ino;
313         btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
314         key.offset = 0;
315         inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
316         if (IS_ERR(inode)) {
317                 ret = PTR_ERR(inode);
318                 goto cleanup;
319         }
320         srcu_read_unlock(&fs_info->subvol_srcu, index);
321
322         /* do a chunk of defrag */
323         clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
324         memset(&range, 0, sizeof(range));
325         range.len = (u64)-1;
326         range.start = defrag->last_offset;
327
328         sb_start_write(fs_info->sb);
329         num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
330                                        BTRFS_DEFRAG_BATCH);
331         sb_end_write(fs_info->sb);
332         /*
333          * if we filled the whole defrag batch, there
334          * must be more work to do.  Queue this defrag
335          * again
336          */
337         if (num_defrag == BTRFS_DEFRAG_BATCH) {
338                 defrag->last_offset = range.start;
339                 btrfs_requeue_inode_defrag(inode, defrag);
340         } else if (defrag->last_offset && !defrag->cycled) {
341                 /*
342                  * we didn't fill our defrag batch, but
343                  * we didn't start at zero.  Make sure we loop
344                  * around to the start of the file.
345                  */
346                 defrag->last_offset = 0;
347                 defrag->cycled = 1;
348                 btrfs_requeue_inode_defrag(inode, defrag);
349         } else {
350                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
351         }
352
353         iput(inode);
354         return 0;
355 cleanup:
356         srcu_read_unlock(&fs_info->subvol_srcu, index);
357         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
358         return ret;
359 }
360
361 /*
362  * run through the list of inodes in the FS that need
363  * defragging
364  */
365 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
366 {
367         struct inode_defrag *defrag;
368         u64 first_ino = 0;
369         u64 root_objectid = 0;
370
371         atomic_inc(&fs_info->defrag_running);
372         while(1) {
373                 /* Pause the auto defragger. */
374                 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
375                              &fs_info->fs_state))
376                         break;
377
378                 if (!__need_auto_defrag(fs_info->tree_root))
379                         break;
380
381                 /* find an inode to defrag */
382                 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
383                                                  first_ino);
384                 if (!defrag) {
385                         if (root_objectid || first_ino) {
386                                 root_objectid = 0;
387                                 first_ino = 0;
388                                 continue;
389                         } else {
390                                 break;
391                         }
392                 }
393
394                 first_ino = defrag->ino + 1;
395                 root_objectid = defrag->root;
396
397                 __btrfs_run_defrag_inode(fs_info, defrag);
398         }
399         atomic_dec(&fs_info->defrag_running);
400
401         /*
402          * during unmount, we use the transaction_wait queue to
403          * wait for the defragger to stop
404          */
405         wake_up(&fs_info->transaction_wait);
406         return 0;
407 }
408
409 /* simple helper to fault in pages and copy.  This should go away
410  * and be replaced with calls into generic code.
411  */
412 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
413                                          size_t write_bytes,
414                                          struct page **prepared_pages,
415                                          struct iov_iter *i)
416 {
417         size_t copied = 0;
418         size_t total_copied = 0;
419         int pg = 0;
420         int offset = pos & (PAGE_CACHE_SIZE - 1);
421
422         while (write_bytes > 0) {
423                 size_t count = min_t(size_t,
424                                      PAGE_CACHE_SIZE - offset, write_bytes);
425                 struct page *page = prepared_pages[pg];
426                 /*
427                  * Copy data from userspace to the current page
428                  *
429                  * Disable pagefault to avoid recursive lock since
430                  * the pages are already locked
431                  */
432                 pagefault_disable();
433                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
434                 pagefault_enable();
435
436                 /* Flush processor's dcache for this page */
437                 flush_dcache_page(page);
438
439                 /*
440                  * if we get a partial write, we can end up with
441                  * partially up to date pages.  These add
442                  * a lot of complexity, so make sure they don't
443                  * happen by forcing this copy to be retried.
444                  *
445                  * The rest of the btrfs_file_write code will fall
446                  * back to page at a time copies after we return 0.
447                  */
448                 if (!PageUptodate(page) && copied < count)
449                         copied = 0;
450
451                 iov_iter_advance(i, copied);
452                 write_bytes -= copied;
453                 total_copied += copied;
454
455                 /* Return to btrfs_file_aio_write to fault page */
456                 if (unlikely(copied == 0))
457                         break;
458
459                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
460                         offset += copied;
461                 } else {
462                         pg++;
463                         offset = 0;
464                 }
465         }
466         return total_copied;
467 }
468
469 /*
470  * unlocks pages after btrfs_file_write is done with them
471  */
472 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
473 {
474         size_t i;
475         for (i = 0; i < num_pages; i++) {
476                 /* page checked is some magic around finding pages that
477                  * have been modified without going through btrfs_set_page_dirty
478                  * clear it here
479                  */
480                 ClearPageChecked(pages[i]);
481                 unlock_page(pages[i]);
482                 mark_page_accessed(pages[i]);
483                 page_cache_release(pages[i]);
484         }
485 }
486
487 /*
488  * after copy_from_user, pages need to be dirtied and we need to make
489  * sure holes are created between the current EOF and the start of
490  * any next extents (if required).
491  *
492  * this also makes the decision about creating an inline extent vs
493  * doing real data extents, marking pages dirty and delalloc as required.
494  */
495 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
496                              struct page **pages, size_t num_pages,
497                              loff_t pos, size_t write_bytes,
498                              struct extent_state **cached)
499 {
500         int err = 0;
501         int i;
502         u64 num_bytes;
503         u64 start_pos;
504         u64 end_of_last_block;
505         u64 end_pos = pos + write_bytes;
506         loff_t isize = i_size_read(inode);
507
508         start_pos = pos & ~((u64)root->sectorsize - 1);
509         num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
510
511         end_of_last_block = start_pos + num_bytes - 1;
512         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
513                                         cached);
514         if (err)
515                 return err;
516
517         for (i = 0; i < num_pages; i++) {
518                 struct page *p = pages[i];
519                 SetPageUptodate(p);
520                 ClearPageChecked(p);
521                 set_page_dirty(p);
522         }
523
524         /*
525          * we've only changed i_size in ram, and we haven't updated
526          * the disk i_size.  There is no need to log the inode
527          * at this time.
528          */
529         if (end_pos > isize)
530                 i_size_write(inode, end_pos);
531         return 0;
532 }
533
534 /*
535  * this drops all the extents in the cache that intersect the range
536  * [start, end].  Existing extents are split as required.
537  */
538 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
539                              int skip_pinned)
540 {
541         struct extent_map *em;
542         struct extent_map *split = NULL;
543         struct extent_map *split2 = NULL;
544         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
545         u64 len = end - start + 1;
546         u64 gen;
547         int ret;
548         int testend = 1;
549         unsigned long flags;
550         int compressed = 0;
551         bool modified;
552
553         WARN_ON(end < start);
554         if (end == (u64)-1) {
555                 len = (u64)-1;
556                 testend = 0;
557         }
558         while (1) {
559                 int no_splits = 0;
560
561                 modified = false;
562                 if (!split)
563                         split = alloc_extent_map();
564                 if (!split2)
565                         split2 = alloc_extent_map();
566                 if (!split || !split2)
567                         no_splits = 1;
568
569                 write_lock(&em_tree->lock);
570                 em = lookup_extent_mapping(em_tree, start, len);
571                 if (!em) {
572                         write_unlock(&em_tree->lock);
573                         break;
574                 }
575                 flags = em->flags;
576                 gen = em->generation;
577                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
578                         if (testend && em->start + em->len >= start + len) {
579                                 free_extent_map(em);
580                                 write_unlock(&em_tree->lock);
581                                 break;
582                         }
583                         start = em->start + em->len;
584                         if (testend)
585                                 len = start + len - (em->start + em->len);
586                         free_extent_map(em);
587                         write_unlock(&em_tree->lock);
588                         continue;
589                 }
590                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
591                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
592                 clear_bit(EXTENT_FLAG_LOGGING, &flags);
593                 modified = !list_empty(&em->list);
594                 remove_extent_mapping(em_tree, em);
595                 if (no_splits)
596                         goto next;
597
598                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
599                     em->start < start) {
600                         split->start = em->start;
601                         split->len = start - em->start;
602                         split->orig_start = em->orig_start;
603                         split->block_start = em->block_start;
604
605                         if (compressed)
606                                 split->block_len = em->block_len;
607                         else
608                                 split->block_len = split->len;
609                         split->ram_bytes = em->ram_bytes;
610                         split->orig_block_len = max(split->block_len,
611                                                     em->orig_block_len);
612                         split->generation = gen;
613                         split->bdev = em->bdev;
614                         split->flags = flags;
615                         split->compress_type = em->compress_type;
616                         ret = add_extent_mapping(em_tree, split, modified);
617                         BUG_ON(ret); /* Logic error */
618                         free_extent_map(split);
619                         split = split2;
620                         split2 = NULL;
621                 }
622                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
623                     testend && em->start + em->len > start + len) {
624                         u64 diff = start + len - em->start;
625
626                         split->start = start + len;
627                         split->len = em->start + em->len - (start + len);
628                         split->bdev = em->bdev;
629                         split->flags = flags;
630                         split->compress_type = em->compress_type;
631                         split->generation = gen;
632                         split->orig_block_len = max(em->block_len,
633                                                     em->orig_block_len);
634                         split->ram_bytes = em->ram_bytes;
635
636                         if (compressed) {
637                                 split->block_len = em->block_len;
638                                 split->block_start = em->block_start;
639                                 split->orig_start = em->orig_start;
640                         } else {
641                                 split->block_len = split->len;
642                                 split->block_start = em->block_start + diff;
643                                 split->orig_start = em->orig_start;
644                         }
645
646                         ret = add_extent_mapping(em_tree, split, modified);
647                         BUG_ON(ret); /* Logic error */
648                         free_extent_map(split);
649                         split = NULL;
650                 }
651 next:
652                 write_unlock(&em_tree->lock);
653
654                 /* once for us */
655                 free_extent_map(em);
656                 /* once for the tree*/
657                 free_extent_map(em);
658         }
659         if (split)
660                 free_extent_map(split);
661         if (split2)
662                 free_extent_map(split2);
663 }
664
665 /*
666  * this is very complex, but the basic idea is to drop all extents
667  * in the range start - end.  hint_block is filled in with a block number
668  * that would be a good hint to the block allocator for this file.
669  *
670  * If an extent intersects the range but is not entirely inside the range
671  * it is either truncated or split.  Anything entirely inside the range
672  * is deleted from the tree.
673  */
674 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
675                          struct btrfs_root *root, struct inode *inode,
676                          struct btrfs_path *path, u64 start, u64 end,
677                          u64 *drop_end, int drop_cache)
678 {
679         struct extent_buffer *leaf;
680         struct btrfs_file_extent_item *fi;
681         struct btrfs_key key;
682         struct btrfs_key new_key;
683         u64 ino = btrfs_ino(inode);
684         u64 search_start = start;
685         u64 disk_bytenr = 0;
686         u64 num_bytes = 0;
687         u64 extent_offset = 0;
688         u64 extent_end = 0;
689         int del_nr = 0;
690         int del_slot = 0;
691         int extent_type;
692         int recow;
693         int ret;
694         int modify_tree = -1;
695         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
696         int found = 0;
697
698         if (drop_cache)
699                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
700
701         if (start >= BTRFS_I(inode)->disk_i_size)
702                 modify_tree = 0;
703
704         while (1) {
705                 recow = 0;
706                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
707                                                search_start, modify_tree);
708                 if (ret < 0)
709                         break;
710                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
711                         leaf = path->nodes[0];
712                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
713                         if (key.objectid == ino &&
714                             key.type == BTRFS_EXTENT_DATA_KEY)
715                                 path->slots[0]--;
716                 }
717                 ret = 0;
718 next_slot:
719                 leaf = path->nodes[0];
720                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
721                         BUG_ON(del_nr > 0);
722                         ret = btrfs_next_leaf(root, path);
723                         if (ret < 0)
724                                 break;
725                         if (ret > 0) {
726                                 ret = 0;
727                                 break;
728                         }
729                         leaf = path->nodes[0];
730                         recow = 1;
731                 }
732
733                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
734                 if (key.objectid > ino ||
735                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
736                         break;
737
738                 fi = btrfs_item_ptr(leaf, path->slots[0],
739                                     struct btrfs_file_extent_item);
740                 extent_type = btrfs_file_extent_type(leaf, fi);
741
742                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
743                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
744                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
745                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
746                         extent_offset = btrfs_file_extent_offset(leaf, fi);
747                         extent_end = key.offset +
748                                 btrfs_file_extent_num_bytes(leaf, fi);
749                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
750                         extent_end = key.offset +
751                                 btrfs_file_extent_inline_len(leaf, fi);
752                 } else {
753                         WARN_ON(1);
754                         extent_end = search_start;
755                 }
756
757                 if (extent_end <= search_start) {
758                         path->slots[0]++;
759                         goto next_slot;
760                 }
761
762                 found = 1;
763                 search_start = max(key.offset, start);
764                 if (recow || !modify_tree) {
765                         modify_tree = -1;
766                         btrfs_release_path(path);
767                         continue;
768                 }
769
770                 /*
771                  *     | - range to drop - |
772                  *  | -------- extent -------- |
773                  */
774                 if (start > key.offset && end < extent_end) {
775                         BUG_ON(del_nr > 0);
776                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
777
778                         memcpy(&new_key, &key, sizeof(new_key));
779                         new_key.offset = start;
780                         ret = btrfs_duplicate_item(trans, root, path,
781                                                    &new_key);
782                         if (ret == -EAGAIN) {
783                                 btrfs_release_path(path);
784                                 continue;
785                         }
786                         if (ret < 0)
787                                 break;
788
789                         leaf = path->nodes[0];
790                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
791                                             struct btrfs_file_extent_item);
792                         btrfs_set_file_extent_num_bytes(leaf, fi,
793                                                         start - key.offset);
794
795                         fi = btrfs_item_ptr(leaf, path->slots[0],
796                                             struct btrfs_file_extent_item);
797
798                         extent_offset += start - key.offset;
799                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
800                         btrfs_set_file_extent_num_bytes(leaf, fi,
801                                                         extent_end - start);
802                         btrfs_mark_buffer_dirty(leaf);
803
804                         if (update_refs && disk_bytenr > 0) {
805                                 ret = btrfs_inc_extent_ref(trans, root,
806                                                 disk_bytenr, num_bytes, 0,
807                                                 root->root_key.objectid,
808                                                 new_key.objectid,
809                                                 start - extent_offset, 0);
810                                 BUG_ON(ret); /* -ENOMEM */
811                         }
812                         key.offset = start;
813                 }
814                 /*
815                  *  | ---- range to drop ----- |
816                  *      | -------- extent -------- |
817                  */
818                 if (start <= key.offset && end < extent_end) {
819                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
820
821                         memcpy(&new_key, &key, sizeof(new_key));
822                         new_key.offset = end;
823                         btrfs_set_item_key_safe(root, path, &new_key);
824
825                         extent_offset += end - key.offset;
826                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
827                         btrfs_set_file_extent_num_bytes(leaf, fi,
828                                                         extent_end - end);
829                         btrfs_mark_buffer_dirty(leaf);
830                         if (update_refs && disk_bytenr > 0)
831                                 inode_sub_bytes(inode, end - key.offset);
832                         break;
833                 }
834
835                 search_start = extent_end;
836                 /*
837                  *       | ---- range to drop ----- |
838                  *  | -------- extent -------- |
839                  */
840                 if (start > key.offset && end >= extent_end) {
841                         BUG_ON(del_nr > 0);
842                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
843
844                         btrfs_set_file_extent_num_bytes(leaf, fi,
845                                                         start - key.offset);
846                         btrfs_mark_buffer_dirty(leaf);
847                         if (update_refs && disk_bytenr > 0)
848                                 inode_sub_bytes(inode, extent_end - start);
849                         if (end == extent_end)
850                                 break;
851
852                         path->slots[0]++;
853                         goto next_slot;
854                 }
855
856                 /*
857                  *  | ---- range to drop ----- |
858                  *    | ------ extent ------ |
859                  */
860                 if (start <= key.offset && end >= extent_end) {
861                         if (del_nr == 0) {
862                                 del_slot = path->slots[0];
863                                 del_nr = 1;
864                         } else {
865                                 BUG_ON(del_slot + del_nr != path->slots[0]);
866                                 del_nr++;
867                         }
868
869                         if (update_refs &&
870                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
871                                 inode_sub_bytes(inode,
872                                                 extent_end - key.offset);
873                                 extent_end = ALIGN(extent_end,
874                                                    root->sectorsize);
875                         } else if (update_refs && disk_bytenr > 0) {
876                                 ret = btrfs_free_extent(trans, root,
877                                                 disk_bytenr, num_bytes, 0,
878                                                 root->root_key.objectid,
879                                                 key.objectid, key.offset -
880                                                 extent_offset, 0);
881                                 BUG_ON(ret); /* -ENOMEM */
882                                 inode_sub_bytes(inode,
883                                                 extent_end - key.offset);
884                         }
885
886                         if (end == extent_end)
887                                 break;
888
889                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
890                                 path->slots[0]++;
891                                 goto next_slot;
892                         }
893
894                         ret = btrfs_del_items(trans, root, path, del_slot,
895                                               del_nr);
896                         if (ret) {
897                                 btrfs_abort_transaction(trans, root, ret);
898                                 break;
899                         }
900
901                         del_nr = 0;
902                         del_slot = 0;
903
904                         btrfs_release_path(path);
905                         continue;
906                 }
907
908                 BUG_ON(1);
909         }
910
911         if (!ret && del_nr > 0) {
912                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
913                 if (ret)
914                         btrfs_abort_transaction(trans, root, ret);
915         }
916
917         if (drop_end)
918                 *drop_end = found ? min(end, extent_end) : end;
919         btrfs_release_path(path);
920         return ret;
921 }
922
923 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
924                        struct btrfs_root *root, struct inode *inode, u64 start,
925                        u64 end, int drop_cache)
926 {
927         struct btrfs_path *path;
928         int ret;
929
930         path = btrfs_alloc_path();
931         if (!path)
932                 return -ENOMEM;
933         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
934                                    drop_cache);
935         btrfs_free_path(path);
936         return ret;
937 }
938
939 static int extent_mergeable(struct extent_buffer *leaf, int slot,
940                             u64 objectid, u64 bytenr, u64 orig_offset,
941                             u64 *start, u64 *end)
942 {
943         struct btrfs_file_extent_item *fi;
944         struct btrfs_key key;
945         u64 extent_end;
946
947         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
948                 return 0;
949
950         btrfs_item_key_to_cpu(leaf, &key, slot);
951         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
952                 return 0;
953
954         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
955         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
956             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
957             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
958             btrfs_file_extent_compression(leaf, fi) ||
959             btrfs_file_extent_encryption(leaf, fi) ||
960             btrfs_file_extent_other_encoding(leaf, fi))
961                 return 0;
962
963         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
964         if ((*start && *start != key.offset) || (*end && *end != extent_end))
965                 return 0;
966
967         *start = key.offset;
968         *end = extent_end;
969         return 1;
970 }
971
972 /*
973  * Mark extent in the range start - end as written.
974  *
975  * This changes extent type from 'pre-allocated' to 'regular'. If only
976  * part of extent is marked as written, the extent will be split into
977  * two or three.
978  */
979 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
980                               struct inode *inode, u64 start, u64 end)
981 {
982         struct btrfs_root *root = BTRFS_I(inode)->root;
983         struct extent_buffer *leaf;
984         struct btrfs_path *path;
985         struct btrfs_file_extent_item *fi;
986         struct btrfs_key key;
987         struct btrfs_key new_key;
988         u64 bytenr;
989         u64 num_bytes;
990         u64 extent_end;
991         u64 orig_offset;
992         u64 other_start;
993         u64 other_end;
994         u64 split;
995         int del_nr = 0;
996         int del_slot = 0;
997         int recow;
998         int ret;
999         u64 ino = btrfs_ino(inode);
1000
1001         path = btrfs_alloc_path();
1002         if (!path)
1003                 return -ENOMEM;
1004 again:
1005         recow = 0;
1006         split = start;
1007         key.objectid = ino;
1008         key.type = BTRFS_EXTENT_DATA_KEY;
1009         key.offset = split;
1010
1011         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1012         if (ret < 0)
1013                 goto out;
1014         if (ret > 0 && path->slots[0] > 0)
1015                 path->slots[0]--;
1016
1017         leaf = path->nodes[0];
1018         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1019         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1020         fi = btrfs_item_ptr(leaf, path->slots[0],
1021                             struct btrfs_file_extent_item);
1022         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1023                BTRFS_FILE_EXTENT_PREALLOC);
1024         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1025         BUG_ON(key.offset > start || extent_end < end);
1026
1027         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1028         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1029         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1030         memcpy(&new_key, &key, sizeof(new_key));
1031
1032         if (start == key.offset && end < extent_end) {
1033                 other_start = 0;
1034                 other_end = start;
1035                 if (extent_mergeable(leaf, path->slots[0] - 1,
1036                                      ino, bytenr, orig_offset,
1037                                      &other_start, &other_end)) {
1038                         new_key.offset = end;
1039                         btrfs_set_item_key_safe(root, path, &new_key);
1040                         fi = btrfs_item_ptr(leaf, path->slots[0],
1041                                             struct btrfs_file_extent_item);
1042                         btrfs_set_file_extent_generation(leaf, fi,
1043                                                          trans->transid);
1044                         btrfs_set_file_extent_num_bytes(leaf, fi,
1045                                                         extent_end - end);
1046                         btrfs_set_file_extent_offset(leaf, fi,
1047                                                      end - orig_offset);
1048                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1049                                             struct btrfs_file_extent_item);
1050                         btrfs_set_file_extent_generation(leaf, fi,
1051                                                          trans->transid);
1052                         btrfs_set_file_extent_num_bytes(leaf, fi,
1053                                                         end - other_start);
1054                         btrfs_mark_buffer_dirty(leaf);
1055                         goto out;
1056                 }
1057         }
1058
1059         if (start > key.offset && end == extent_end) {
1060                 other_start = end;
1061                 other_end = 0;
1062                 if (extent_mergeable(leaf, path->slots[0] + 1,
1063                                      ino, bytenr, orig_offset,
1064                                      &other_start, &other_end)) {
1065                         fi = btrfs_item_ptr(leaf, path->slots[0],
1066                                             struct btrfs_file_extent_item);
1067                         btrfs_set_file_extent_num_bytes(leaf, fi,
1068                                                         start - key.offset);
1069                         btrfs_set_file_extent_generation(leaf, fi,
1070                                                          trans->transid);
1071                         path->slots[0]++;
1072                         new_key.offset = start;
1073                         btrfs_set_item_key_safe(root, path, &new_key);
1074
1075                         fi = btrfs_item_ptr(leaf, path->slots[0],
1076                                             struct btrfs_file_extent_item);
1077                         btrfs_set_file_extent_generation(leaf, fi,
1078                                                          trans->transid);
1079                         btrfs_set_file_extent_num_bytes(leaf, fi,
1080                                                         other_end - start);
1081                         btrfs_set_file_extent_offset(leaf, fi,
1082                                                      start - orig_offset);
1083                         btrfs_mark_buffer_dirty(leaf);
1084                         goto out;
1085                 }
1086         }
1087
1088         while (start > key.offset || end < extent_end) {
1089                 if (key.offset == start)
1090                         split = end;
1091
1092                 new_key.offset = split;
1093                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1094                 if (ret == -EAGAIN) {
1095                         btrfs_release_path(path);
1096                         goto again;
1097                 }
1098                 if (ret < 0) {
1099                         btrfs_abort_transaction(trans, root, ret);
1100                         goto out;
1101                 }
1102
1103                 leaf = path->nodes[0];
1104                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1105                                     struct btrfs_file_extent_item);
1106                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1107                 btrfs_set_file_extent_num_bytes(leaf, fi,
1108                                                 split - key.offset);
1109
1110                 fi = btrfs_item_ptr(leaf, path->slots[0],
1111                                     struct btrfs_file_extent_item);
1112
1113                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1114                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1115                 btrfs_set_file_extent_num_bytes(leaf, fi,
1116                                                 extent_end - split);
1117                 btrfs_mark_buffer_dirty(leaf);
1118
1119                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1120                                            root->root_key.objectid,
1121                                            ino, orig_offset, 0);
1122                 BUG_ON(ret); /* -ENOMEM */
1123
1124                 if (split == start) {
1125                         key.offset = start;
1126                 } else {
1127                         BUG_ON(start != key.offset);
1128                         path->slots[0]--;
1129                         extent_end = end;
1130                 }
1131                 recow = 1;
1132         }
1133
1134         other_start = end;
1135         other_end = 0;
1136         if (extent_mergeable(leaf, path->slots[0] + 1,
1137                              ino, bytenr, orig_offset,
1138                              &other_start, &other_end)) {
1139                 if (recow) {
1140                         btrfs_release_path(path);
1141                         goto again;
1142                 }
1143                 extent_end = other_end;
1144                 del_slot = path->slots[0] + 1;
1145                 del_nr++;
1146                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1147                                         0, root->root_key.objectid,
1148                                         ino, orig_offset, 0);
1149                 BUG_ON(ret); /* -ENOMEM */
1150         }
1151         other_start = 0;
1152         other_end = start;
1153         if (extent_mergeable(leaf, path->slots[0] - 1,
1154                              ino, bytenr, orig_offset,
1155                              &other_start, &other_end)) {
1156                 if (recow) {
1157                         btrfs_release_path(path);
1158                         goto again;
1159                 }
1160                 key.offset = other_start;
1161                 del_slot = path->slots[0];
1162                 del_nr++;
1163                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1164                                         0, root->root_key.objectid,
1165                                         ino, orig_offset, 0);
1166                 BUG_ON(ret); /* -ENOMEM */
1167         }
1168         if (del_nr == 0) {
1169                 fi = btrfs_item_ptr(leaf, path->slots[0],
1170                            struct btrfs_file_extent_item);
1171                 btrfs_set_file_extent_type(leaf, fi,
1172                                            BTRFS_FILE_EXTENT_REG);
1173                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1174                 btrfs_mark_buffer_dirty(leaf);
1175         } else {
1176                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1177                            struct btrfs_file_extent_item);
1178                 btrfs_set_file_extent_type(leaf, fi,
1179                                            BTRFS_FILE_EXTENT_REG);
1180                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1181                 btrfs_set_file_extent_num_bytes(leaf, fi,
1182                                                 extent_end - key.offset);
1183                 btrfs_mark_buffer_dirty(leaf);
1184
1185                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1186                 if (ret < 0) {
1187                         btrfs_abort_transaction(trans, root, ret);
1188                         goto out;
1189                 }
1190         }
1191 out:
1192         btrfs_free_path(path);
1193         return 0;
1194 }
1195
1196 /*
1197  * on error we return an unlocked page and the error value
1198  * on success we return a locked page and 0
1199  */
1200 static int prepare_uptodate_page(struct page *page, u64 pos,
1201                                  bool force_uptodate)
1202 {
1203         int ret = 0;
1204
1205         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1206             !PageUptodate(page)) {
1207                 ret = btrfs_readpage(NULL, page);
1208                 if (ret)
1209                         return ret;
1210                 lock_page(page);
1211                 if (!PageUptodate(page)) {
1212                         unlock_page(page);
1213                         return -EIO;
1214                 }
1215         }
1216         return 0;
1217 }
1218
1219 /*
1220  * this gets pages into the page cache and locks them down, it also properly
1221  * waits for data=ordered extents to finish before allowing the pages to be
1222  * modified.
1223  */
1224 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1225                          struct page **pages, size_t num_pages,
1226                          loff_t pos, unsigned long first_index,
1227                          size_t write_bytes, bool force_uptodate)
1228 {
1229         struct extent_state *cached_state = NULL;
1230         int i;
1231         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1232         struct inode *inode = file_inode(file);
1233         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1234         int err = 0;
1235         int faili = 0;
1236         u64 start_pos;
1237         u64 last_pos;
1238
1239         start_pos = pos & ~((u64)root->sectorsize - 1);
1240         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1241
1242 again:
1243         for (i = 0; i < num_pages; i++) {
1244                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1245                                                mask | __GFP_WRITE);
1246                 if (!pages[i]) {
1247                         faili = i - 1;
1248                         err = -ENOMEM;
1249                         goto fail;
1250                 }
1251
1252                 if (i == 0)
1253                         err = prepare_uptodate_page(pages[i], pos,
1254                                                     force_uptodate);
1255                 if (i == num_pages - 1)
1256                         err = prepare_uptodate_page(pages[i],
1257                                                     pos + write_bytes, false);
1258                 if (err) {
1259                         page_cache_release(pages[i]);
1260                         faili = i - 1;
1261                         goto fail;
1262                 }
1263                 wait_on_page_writeback(pages[i]);
1264         }
1265         err = 0;
1266         if (start_pos < inode->i_size) {
1267                 struct btrfs_ordered_extent *ordered;
1268                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1269                                  start_pos, last_pos - 1, 0, &cached_state);
1270                 ordered = btrfs_lookup_first_ordered_extent(inode,
1271                                                             last_pos - 1);
1272                 if (ordered &&
1273                     ordered->file_offset + ordered->len > start_pos &&
1274                     ordered->file_offset < last_pos) {
1275                         btrfs_put_ordered_extent(ordered);
1276                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1277                                              start_pos, last_pos - 1,
1278                                              &cached_state, GFP_NOFS);
1279                         for (i = 0; i < num_pages; i++) {
1280                                 unlock_page(pages[i]);
1281                                 page_cache_release(pages[i]);
1282                         }
1283                         btrfs_wait_ordered_range(inode, start_pos,
1284                                                  last_pos - start_pos);
1285                         goto again;
1286                 }
1287                 if (ordered)
1288                         btrfs_put_ordered_extent(ordered);
1289
1290                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1291                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1292                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1293                                   0, 0, &cached_state, GFP_NOFS);
1294                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1295                                      start_pos, last_pos - 1, &cached_state,
1296                                      GFP_NOFS);
1297         }
1298         for (i = 0; i < num_pages; i++) {
1299                 if (clear_page_dirty_for_io(pages[i]))
1300                         account_page_redirty(pages[i]);
1301                 set_page_extent_mapped(pages[i]);
1302                 WARN_ON(!PageLocked(pages[i]));
1303         }
1304         return 0;
1305 fail:
1306         while (faili >= 0) {
1307                 unlock_page(pages[faili]);
1308                 page_cache_release(pages[faili]);
1309                 faili--;
1310         }
1311         return err;
1312
1313 }
1314
1315 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1316                                     size_t *write_bytes)
1317 {
1318         struct btrfs_trans_handle *trans;
1319         struct btrfs_root *root = BTRFS_I(inode)->root;
1320         struct btrfs_ordered_extent *ordered;
1321         u64 lockstart, lockend;
1322         u64 num_bytes;
1323         int ret;
1324
1325         lockstart = round_down(pos, root->sectorsize);
1326         lockend = lockstart + round_up(*write_bytes, root->sectorsize) - 1;
1327
1328         while (1) {
1329                 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1330                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1331                                                      lockend - lockstart + 1);
1332                 if (!ordered) {
1333                         break;
1334                 }
1335                 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1336                 btrfs_start_ordered_extent(inode, ordered, 1);
1337                 btrfs_put_ordered_extent(ordered);
1338         }
1339
1340         trans = btrfs_join_transaction(root);
1341         if (IS_ERR(trans)) {
1342                 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1343                 return PTR_ERR(trans);
1344         }
1345
1346         num_bytes = lockend - lockstart + 1;
1347         ret = can_nocow_extent(trans, inode, lockstart, &num_bytes, NULL, NULL,
1348                                NULL);
1349         btrfs_end_transaction(trans, root);
1350         if (ret <= 0) {
1351                 ret = 0;
1352         } else {
1353                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1354                                  EXTENT_DIRTY | EXTENT_DELALLOC |
1355                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1356                                  NULL, GFP_NOFS);
1357                 *write_bytes = min_t(size_t, *write_bytes, num_bytes);
1358         }
1359
1360         unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1361
1362         return ret;
1363 }
1364
1365 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1366                                                struct iov_iter *i,
1367                                                loff_t pos)
1368 {
1369         struct inode *inode = file_inode(file);
1370         struct btrfs_root *root = BTRFS_I(inode)->root;
1371         struct page **pages = NULL;
1372         u64 release_bytes = 0;
1373         unsigned long first_index;
1374         size_t num_written = 0;
1375         int nrptrs;
1376         int ret = 0;
1377         bool only_release_metadata = false;
1378         bool force_page_uptodate = false;
1379
1380         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1381                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1382                      (sizeof(struct page *)));
1383         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1384         nrptrs = max(nrptrs, 8);
1385         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1386         if (!pages)
1387                 return -ENOMEM;
1388
1389         first_index = pos >> PAGE_CACHE_SHIFT;
1390
1391         while (iov_iter_count(i) > 0) {
1392                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1393                 size_t write_bytes = min(iov_iter_count(i),
1394                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1395                                          offset);
1396                 size_t num_pages = (write_bytes + offset +
1397                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1398                 size_t reserve_bytes;
1399                 size_t dirty_pages;
1400                 size_t copied;
1401
1402                 WARN_ON(num_pages > nrptrs);
1403
1404                 /*
1405                  * Fault pages before locking them in prepare_pages
1406                  * to avoid recursive lock
1407                  */
1408                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1409                         ret = -EFAULT;
1410                         break;
1411                 }
1412
1413                 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1414                 ret = btrfs_check_data_free_space(inode, reserve_bytes);
1415                 if (ret == -ENOSPC &&
1416                     (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1417                                               BTRFS_INODE_PREALLOC))) {
1418                         ret = check_can_nocow(inode, pos, &write_bytes);
1419                         if (ret > 0) {
1420                                 only_release_metadata = true;
1421                                 /*
1422                                  * our prealloc extent may be smaller than
1423                                  * write_bytes, so scale down.
1424                                  */
1425                                 num_pages = (write_bytes + offset +
1426                                              PAGE_CACHE_SIZE - 1) >>
1427                                         PAGE_CACHE_SHIFT;
1428                                 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1429                                 ret = 0;
1430                         } else {
1431                                 ret = -ENOSPC;
1432                         }
1433                 }
1434
1435                 if (ret)
1436                         break;
1437
1438                 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1439                 if (ret) {
1440                         if (!only_release_metadata)
1441                                 btrfs_free_reserved_data_space(inode,
1442                                                                reserve_bytes);
1443                         break;
1444                 }
1445
1446                 release_bytes = reserve_bytes;
1447
1448                 /*
1449                  * This is going to setup the pages array with the number of
1450                  * pages we want, so we don't really need to worry about the
1451                  * contents of pages from loop to loop
1452                  */
1453                 ret = prepare_pages(root, file, pages, num_pages,
1454                                     pos, first_index, write_bytes,
1455                                     force_page_uptodate);
1456                 if (ret)
1457                         break;
1458
1459                 copied = btrfs_copy_from_user(pos, num_pages,
1460                                            write_bytes, pages, i);
1461
1462                 /*
1463                  * if we have trouble faulting in the pages, fall
1464                  * back to one page at a time
1465                  */
1466                 if (copied < write_bytes)
1467                         nrptrs = 1;
1468
1469                 if (copied == 0) {
1470                         force_page_uptodate = true;
1471                         dirty_pages = 0;
1472                 } else {
1473                         force_page_uptodate = false;
1474                         dirty_pages = (copied + offset +
1475                                        PAGE_CACHE_SIZE - 1) >>
1476                                        PAGE_CACHE_SHIFT;
1477                 }
1478
1479                 /*
1480                  * If we had a short copy we need to release the excess delaloc
1481                  * bytes we reserved.  We need to increment outstanding_extents
1482                  * because btrfs_delalloc_release_space will decrement it, but
1483                  * we still have an outstanding extent for the chunk we actually
1484                  * managed to copy.
1485                  */
1486                 if (num_pages > dirty_pages) {
1487                         release_bytes = (num_pages - dirty_pages) <<
1488                                 PAGE_CACHE_SHIFT;
1489                         if (copied > 0) {
1490                                 spin_lock(&BTRFS_I(inode)->lock);
1491                                 BTRFS_I(inode)->outstanding_extents++;
1492                                 spin_unlock(&BTRFS_I(inode)->lock);
1493                         }
1494                         if (only_release_metadata)
1495                                 btrfs_delalloc_release_metadata(inode,
1496                                                                 release_bytes);
1497                         else
1498                                 btrfs_delalloc_release_space(inode,
1499                                                              release_bytes);
1500                 }
1501
1502                 release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
1503                 if (copied > 0) {
1504                         ret = btrfs_dirty_pages(root, inode, pages,
1505                                                 dirty_pages, pos, copied,
1506                                                 NULL);
1507                         if (ret) {
1508                                 btrfs_drop_pages(pages, num_pages);
1509                                 break;
1510                         }
1511                 }
1512
1513                 release_bytes = 0;
1514                 btrfs_drop_pages(pages, num_pages);
1515
1516                 if (only_release_metadata && copied > 0) {
1517                         u64 lockstart = round_down(pos, root->sectorsize);
1518                         u64 lockend = lockstart +
1519                                 (dirty_pages << PAGE_CACHE_SHIFT) - 1;
1520
1521                         set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1522                                        lockend, EXTENT_NORESERVE, NULL,
1523                                        NULL, GFP_NOFS);
1524                         only_release_metadata = false;
1525                 }
1526
1527                 cond_resched();
1528
1529                 balance_dirty_pages_ratelimited(inode->i_mapping);
1530                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1531                         btrfs_btree_balance_dirty(root);
1532
1533                 pos += copied;
1534                 num_written += copied;
1535         }
1536
1537         kfree(pages);
1538
1539         if (release_bytes) {
1540                 if (only_release_metadata)
1541                         btrfs_delalloc_release_metadata(inode, release_bytes);
1542                 else
1543                         btrfs_delalloc_release_space(inode, release_bytes);
1544         }
1545
1546         return num_written ? num_written : ret;
1547 }
1548
1549 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1550                                     const struct iovec *iov,
1551                                     unsigned long nr_segs, loff_t pos,
1552                                     loff_t *ppos, size_t count, size_t ocount)
1553 {
1554         struct file *file = iocb->ki_filp;
1555         struct iov_iter i;
1556         ssize_t written;
1557         ssize_t written_buffered;
1558         loff_t endbyte;
1559         int err;
1560
1561         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1562                                             count, ocount);
1563
1564         if (written < 0 || written == count)
1565                 return written;
1566
1567         pos += written;
1568         count -= written;
1569         iov_iter_init(&i, iov, nr_segs, count, written);
1570         written_buffered = __btrfs_buffered_write(file, &i, pos);
1571         if (written_buffered < 0) {
1572                 err = written_buffered;
1573                 goto out;
1574         }
1575         endbyte = pos + written_buffered - 1;
1576         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1577         if (err)
1578                 goto out;
1579         written += written_buffered;
1580         *ppos = pos + written_buffered;
1581         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1582                                  endbyte >> PAGE_CACHE_SHIFT);
1583 out:
1584         return written ? written : err;
1585 }
1586
1587 static void update_time_for_write(struct inode *inode)
1588 {
1589         struct timespec now;
1590
1591         if (IS_NOCMTIME(inode))
1592                 return;
1593
1594         now = current_fs_time(inode->i_sb);
1595         if (!timespec_equal(&inode->i_mtime, &now))
1596                 inode->i_mtime = now;
1597
1598         if (!timespec_equal(&inode->i_ctime, &now))
1599                 inode->i_ctime = now;
1600
1601         if (IS_I_VERSION(inode))
1602                 inode_inc_iversion(inode);
1603 }
1604
1605 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1606                                     const struct iovec *iov,
1607                                     unsigned long nr_segs, loff_t pos)
1608 {
1609         struct file *file = iocb->ki_filp;
1610         struct inode *inode = file_inode(file);
1611         struct btrfs_root *root = BTRFS_I(inode)->root;
1612         loff_t *ppos = &iocb->ki_pos;
1613         u64 start_pos;
1614         ssize_t num_written = 0;
1615         ssize_t err = 0;
1616         size_t count, ocount;
1617         bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1618
1619         sb_start_write(inode->i_sb);
1620
1621         mutex_lock(&inode->i_mutex);
1622
1623         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1624         if (err) {
1625                 mutex_unlock(&inode->i_mutex);
1626                 goto out;
1627         }
1628         count = ocount;
1629
1630         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1631         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1632         if (err) {
1633                 mutex_unlock(&inode->i_mutex);
1634                 goto out;
1635         }
1636
1637         if (count == 0) {
1638                 mutex_unlock(&inode->i_mutex);
1639                 goto out;
1640         }
1641
1642         err = file_remove_suid(file);
1643         if (err) {
1644                 mutex_unlock(&inode->i_mutex);
1645                 goto out;
1646         }
1647
1648         /*
1649          * If BTRFS flips readonly due to some impossible error
1650          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1651          * although we have opened a file as writable, we have
1652          * to stop this write operation to ensure FS consistency.
1653          */
1654         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1655                 mutex_unlock(&inode->i_mutex);
1656                 err = -EROFS;
1657                 goto out;
1658         }
1659
1660         /*
1661          * We reserve space for updating the inode when we reserve space for the
1662          * extent we are going to write, so we will enospc out there.  We don't
1663          * need to start yet another transaction to update the inode as we will
1664          * update the inode when we finish writing whatever data we write.
1665          */
1666         update_time_for_write(inode);
1667
1668         start_pos = round_down(pos, root->sectorsize);
1669         if (start_pos > i_size_read(inode)) {
1670                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1671                 if (err) {
1672                         mutex_unlock(&inode->i_mutex);
1673                         goto out;
1674                 }
1675         }
1676
1677         if (sync)
1678                 atomic_inc(&BTRFS_I(inode)->sync_writers);
1679
1680         if (unlikely(file->f_flags & O_DIRECT)) {
1681                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1682                                                    pos, ppos, count, ocount);
1683         } else {
1684                 struct iov_iter i;
1685
1686                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1687
1688                 num_written = __btrfs_buffered_write(file, &i, pos);
1689                 if (num_written > 0)
1690                         *ppos = pos + num_written;
1691         }
1692
1693         mutex_unlock(&inode->i_mutex);
1694
1695         /*
1696          * we want to make sure fsync finds this change
1697          * but we haven't joined a transaction running right now.
1698          *
1699          * Later on, someone is sure to update the inode and get the
1700          * real transid recorded.
1701          *
1702          * We set last_trans now to the fs_info generation + 1,
1703          * this will either be one more than the running transaction
1704          * or the generation used for the next transaction if there isn't
1705          * one running right now.
1706          *
1707          * We also have to set last_sub_trans to the current log transid,
1708          * otherwise subsequent syncs to a file that's been synced in this
1709          * transaction will appear to have already occured.
1710          */
1711         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1712         BTRFS_I(inode)->last_sub_trans = root->log_transid;
1713         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1714                 err = generic_write_sync(file, pos, num_written);
1715                 if (err < 0 && num_written > 0)
1716                         num_written = err;
1717         }
1718
1719         if (sync)
1720                 atomic_dec(&BTRFS_I(inode)->sync_writers);
1721 out:
1722         sb_end_write(inode->i_sb);
1723         current->backing_dev_info = NULL;
1724         return num_written ? num_written : err;
1725 }
1726
1727 int btrfs_release_file(struct inode *inode, struct file *filp)
1728 {
1729         /*
1730          * ordered_data_close is set by settattr when we are about to truncate
1731          * a file from a non-zero size to a zero size.  This tries to
1732          * flush down new bytes that may have been written if the
1733          * application were using truncate to replace a file in place.
1734          */
1735         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1736                                &BTRFS_I(inode)->runtime_flags)) {
1737                 struct btrfs_trans_handle *trans;
1738                 struct btrfs_root *root = BTRFS_I(inode)->root;
1739
1740                 /*
1741                  * We need to block on a committing transaction to keep us from
1742                  * throwing a ordered operation on to the list and causing
1743                  * something like sync to deadlock trying to flush out this
1744                  * inode.
1745                  */
1746                 trans = btrfs_start_transaction(root, 0);
1747                 if (IS_ERR(trans))
1748                         return PTR_ERR(trans);
1749                 btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
1750                 btrfs_end_transaction(trans, root);
1751                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1752                         filemap_flush(inode->i_mapping);
1753         }
1754         if (filp->private_data)
1755                 btrfs_ioctl_trans_end(filp);
1756         return 0;
1757 }
1758
1759 /*
1760  * fsync call for both files and directories.  This logs the inode into
1761  * the tree log instead of forcing full commits whenever possible.
1762  *
1763  * It needs to call filemap_fdatawait so that all ordered extent updates are
1764  * in the metadata btree are up to date for copying to the log.
1765  *
1766  * It drops the inode mutex before doing the tree log commit.  This is an
1767  * important optimization for directories because holding the mutex prevents
1768  * new operations on the dir while we write to disk.
1769  */
1770 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1771 {
1772         struct dentry *dentry = file->f_path.dentry;
1773         struct inode *inode = dentry->d_inode;
1774         struct btrfs_root *root = BTRFS_I(inode)->root;
1775         int ret = 0;
1776         struct btrfs_trans_handle *trans;
1777         bool full_sync = 0;
1778
1779         trace_btrfs_sync_file(file, datasync);
1780
1781         /*
1782          * We write the dirty pages in the range and wait until they complete
1783          * out of the ->i_mutex. If so, we can flush the dirty pages by
1784          * multi-task, and make the performance up.  See
1785          * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1786          */
1787         atomic_inc(&BTRFS_I(inode)->sync_writers);
1788         ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1789         if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1790                              &BTRFS_I(inode)->runtime_flags))
1791                 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1792         atomic_dec(&BTRFS_I(inode)->sync_writers);
1793         if (ret)
1794                 return ret;
1795
1796         mutex_lock(&inode->i_mutex);
1797
1798         /*
1799          * We flush the dirty pages again to avoid some dirty pages in the
1800          * range being left.
1801          */
1802         atomic_inc(&root->log_batch);
1803         full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1804                              &BTRFS_I(inode)->runtime_flags);
1805         if (full_sync)
1806                 btrfs_wait_ordered_range(inode, start, end - start + 1);
1807         atomic_inc(&root->log_batch);
1808
1809         /*
1810          * check the transaction that last modified this inode
1811          * and see if its already been committed
1812          */
1813         if (!BTRFS_I(inode)->last_trans) {
1814                 mutex_unlock(&inode->i_mutex);
1815                 goto out;
1816         }
1817
1818         /*
1819          * if the last transaction that changed this file was before
1820          * the current transaction, we can bail out now without any
1821          * syncing
1822          */
1823         smp_mb();
1824         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1825             BTRFS_I(inode)->last_trans <=
1826             root->fs_info->last_trans_committed) {
1827                 BTRFS_I(inode)->last_trans = 0;
1828
1829                 /*
1830                  * We'v had everything committed since the last time we were
1831                  * modified so clear this flag in case it was set for whatever
1832                  * reason, it's no longer relevant.
1833                  */
1834                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1835                           &BTRFS_I(inode)->runtime_flags);
1836                 mutex_unlock(&inode->i_mutex);
1837                 goto out;
1838         }
1839
1840         /*
1841          * ok we haven't committed the transaction yet, lets do a commit
1842          */
1843         if (file->private_data)
1844                 btrfs_ioctl_trans_end(file);
1845
1846         trans = btrfs_start_transaction(root, 0);
1847         if (IS_ERR(trans)) {
1848                 ret = PTR_ERR(trans);
1849                 mutex_unlock(&inode->i_mutex);
1850                 goto out;
1851         }
1852
1853         ret = btrfs_log_dentry_safe(trans, root, dentry);
1854         if (ret < 0) {
1855                 mutex_unlock(&inode->i_mutex);
1856                 goto out;
1857         }
1858
1859         /* we've logged all the items and now have a consistent
1860          * version of the file in the log.  It is possible that
1861          * someone will come in and modify the file, but that's
1862          * fine because the log is consistent on disk, and we
1863          * have references to all of the file's extents
1864          *
1865          * It is possible that someone will come in and log the
1866          * file again, but that will end up using the synchronization
1867          * inside btrfs_sync_log to keep things safe.
1868          */
1869         mutex_unlock(&inode->i_mutex);
1870
1871         if (ret != BTRFS_NO_LOG_SYNC) {
1872                 if (ret > 0) {
1873                         /*
1874                          * If we didn't already wait for ordered extents we need
1875                          * to do that now.
1876                          */
1877                         if (!full_sync)
1878                                 btrfs_wait_ordered_range(inode, start,
1879                                                          end - start + 1);
1880                         ret = btrfs_commit_transaction(trans, root);
1881                 } else {
1882                         ret = btrfs_sync_log(trans, root);
1883                         if (ret == 0) {
1884                                 ret = btrfs_end_transaction(trans, root);
1885                         } else {
1886                                 if (!full_sync)
1887                                         btrfs_wait_ordered_range(inode, start,
1888                                                                  end -
1889                                                                  start + 1);
1890                                 ret = btrfs_commit_transaction(trans, root);
1891                         }
1892                 }
1893         } else {
1894                 ret = btrfs_end_transaction(trans, root);
1895         }
1896 out:
1897         return ret > 0 ? -EIO : ret;
1898 }
1899
1900 static const struct vm_operations_struct btrfs_file_vm_ops = {
1901         .fault          = filemap_fault,
1902         .page_mkwrite   = btrfs_page_mkwrite,
1903         .remap_pages    = generic_file_remap_pages,
1904 };
1905
1906 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1907 {
1908         struct address_space *mapping = filp->f_mapping;
1909
1910         if (!mapping->a_ops->readpage)
1911                 return -ENOEXEC;
1912
1913         file_accessed(filp);
1914         vma->vm_ops = &btrfs_file_vm_ops;
1915
1916         return 0;
1917 }
1918
1919 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1920                           int slot, u64 start, u64 end)
1921 {
1922         struct btrfs_file_extent_item *fi;
1923         struct btrfs_key key;
1924
1925         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1926                 return 0;
1927
1928         btrfs_item_key_to_cpu(leaf, &key, slot);
1929         if (key.objectid != btrfs_ino(inode) ||
1930             key.type != BTRFS_EXTENT_DATA_KEY)
1931                 return 0;
1932
1933         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1934
1935         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1936                 return 0;
1937
1938         if (btrfs_file_extent_disk_bytenr(leaf, fi))
1939                 return 0;
1940
1941         if (key.offset == end)
1942                 return 1;
1943         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1944                 return 1;
1945         return 0;
1946 }
1947
1948 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1949                       struct btrfs_path *path, u64 offset, u64 end)
1950 {
1951         struct btrfs_root *root = BTRFS_I(inode)->root;
1952         struct extent_buffer *leaf;
1953         struct btrfs_file_extent_item *fi;
1954         struct extent_map *hole_em;
1955         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1956         struct btrfs_key key;
1957         int ret;
1958
1959         key.objectid = btrfs_ino(inode);
1960         key.type = BTRFS_EXTENT_DATA_KEY;
1961         key.offset = offset;
1962
1963
1964         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1965         if (ret < 0)
1966                 return ret;
1967         BUG_ON(!ret);
1968
1969         leaf = path->nodes[0];
1970         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1971                 u64 num_bytes;
1972
1973                 path->slots[0]--;
1974                 fi = btrfs_item_ptr(leaf, path->slots[0],
1975                                     struct btrfs_file_extent_item);
1976                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1977                         end - offset;
1978                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1979                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1980                 btrfs_set_file_extent_offset(leaf, fi, 0);
1981                 btrfs_mark_buffer_dirty(leaf);
1982                 goto out;
1983         }
1984
1985         if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1986                 u64 num_bytes;
1987
1988                 path->slots[0]++;
1989                 key.offset = offset;
1990                 btrfs_set_item_key_safe(root, path, &key);
1991                 fi = btrfs_item_ptr(leaf, path->slots[0],
1992                                     struct btrfs_file_extent_item);
1993                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1994                         offset;
1995                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1996                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1997                 btrfs_set_file_extent_offset(leaf, fi, 0);
1998                 btrfs_mark_buffer_dirty(leaf);
1999                 goto out;
2000         }
2001         btrfs_release_path(path);
2002
2003         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2004                                        0, 0, end - offset, 0, end - offset,
2005                                        0, 0, 0);
2006         if (ret)
2007                 return ret;
2008
2009 out:
2010         btrfs_release_path(path);
2011
2012         hole_em = alloc_extent_map();
2013         if (!hole_em) {
2014                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2015                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2016                         &BTRFS_I(inode)->runtime_flags);
2017         } else {
2018                 hole_em->start = offset;
2019                 hole_em->len = end - offset;
2020                 hole_em->ram_bytes = hole_em->len;
2021                 hole_em->orig_start = offset;
2022
2023                 hole_em->block_start = EXTENT_MAP_HOLE;
2024                 hole_em->block_len = 0;
2025                 hole_em->orig_block_len = 0;
2026                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2027                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2028                 hole_em->generation = trans->transid;
2029
2030                 do {
2031                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2032                         write_lock(&em_tree->lock);
2033                         ret = add_extent_mapping(em_tree, hole_em, 1);
2034                         write_unlock(&em_tree->lock);
2035                 } while (ret == -EEXIST);
2036                 free_extent_map(hole_em);
2037                 if (ret)
2038                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2039                                 &BTRFS_I(inode)->runtime_flags);
2040         }
2041
2042         return 0;
2043 }
2044
2045 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2046 {
2047         struct btrfs_root *root = BTRFS_I(inode)->root;
2048         struct extent_state *cached_state = NULL;
2049         struct btrfs_path *path;
2050         struct btrfs_block_rsv *rsv;
2051         struct btrfs_trans_handle *trans;
2052         u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
2053         u64 lockend = round_down(offset + len,
2054                                  BTRFS_I(inode)->root->sectorsize) - 1;
2055         u64 cur_offset = lockstart;
2056         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2057         u64 drop_end;
2058         int ret = 0;
2059         int err = 0;
2060         bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
2061                           ((offset + len - 1) >> PAGE_CACHE_SHIFT));
2062
2063         btrfs_wait_ordered_range(inode, offset, len);
2064
2065         mutex_lock(&inode->i_mutex);
2066         /*
2067          * We needn't truncate any page which is beyond the end of the file
2068          * because we are sure there is no data there.
2069          */
2070         /*
2071          * Only do this if we are in the same page and we aren't doing the
2072          * entire page.
2073          */
2074         if (same_page && len < PAGE_CACHE_SIZE) {
2075                 if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
2076                         ret = btrfs_truncate_page(inode, offset, len, 0);
2077                 mutex_unlock(&inode->i_mutex);
2078                 return ret;
2079         }
2080
2081         /* zero back part of the first page */
2082         if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
2083                 ret = btrfs_truncate_page(inode, offset, 0, 0);
2084                 if (ret) {
2085                         mutex_unlock(&inode->i_mutex);
2086                         return ret;
2087                 }
2088         }
2089
2090         /* zero the front end of the last page */
2091         if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
2092                 ret = btrfs_truncate_page(inode, offset + len, 0, 1);
2093                 if (ret) {
2094                         mutex_unlock(&inode->i_mutex);
2095                         return ret;
2096                 }
2097         }
2098
2099         if (lockend < lockstart) {
2100                 mutex_unlock(&inode->i_mutex);
2101                 return 0;
2102         }
2103
2104         while (1) {
2105                 struct btrfs_ordered_extent *ordered;
2106
2107                 truncate_pagecache_range(inode, lockstart, lockend);
2108
2109                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2110                                  0, &cached_state);
2111                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2112
2113                 /*
2114                  * We need to make sure we have no ordered extents in this range
2115                  * and nobody raced in and read a page in this range, if we did
2116                  * we need to try again.
2117                  */
2118                 if ((!ordered ||
2119                     (ordered->file_offset + ordered->len < lockstart ||
2120                      ordered->file_offset > lockend)) &&
2121                      !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
2122                                      lockend, EXTENT_UPTODATE, 0,
2123                                      cached_state)) {
2124                         if (ordered)
2125                                 btrfs_put_ordered_extent(ordered);
2126                         break;
2127                 }
2128                 if (ordered)
2129                         btrfs_put_ordered_extent(ordered);
2130                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2131                                      lockend, &cached_state, GFP_NOFS);
2132                 btrfs_wait_ordered_range(inode, lockstart,
2133                                          lockend - lockstart + 1);
2134         }
2135
2136         path = btrfs_alloc_path();
2137         if (!path) {
2138                 ret = -ENOMEM;
2139                 goto out;
2140         }
2141
2142         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2143         if (!rsv) {
2144                 ret = -ENOMEM;
2145                 goto out_free;
2146         }
2147         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2148         rsv->failfast = 1;
2149
2150         /*
2151          * 1 - update the inode
2152          * 1 - removing the extents in the range
2153          * 1 - adding the hole extent
2154          */
2155         trans = btrfs_start_transaction(root, 3);
2156         if (IS_ERR(trans)) {
2157                 err = PTR_ERR(trans);
2158                 goto out_free;
2159         }
2160
2161         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2162                                       min_size);
2163         BUG_ON(ret);
2164         trans->block_rsv = rsv;
2165
2166         while (cur_offset < lockend) {
2167                 ret = __btrfs_drop_extents(trans, root, inode, path,
2168                                            cur_offset, lockend + 1,
2169                                            &drop_end, 1);
2170                 if (ret != -ENOSPC)
2171                         break;
2172
2173                 trans->block_rsv = &root->fs_info->trans_block_rsv;
2174
2175                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2176                 if (ret) {
2177                         err = ret;
2178                         break;
2179                 }
2180
2181                 cur_offset = drop_end;
2182
2183                 ret = btrfs_update_inode(trans, root, inode);
2184                 if (ret) {
2185                         err = ret;
2186                         break;
2187                 }
2188
2189                 btrfs_end_transaction(trans, root);
2190                 btrfs_btree_balance_dirty(root);
2191
2192                 trans = btrfs_start_transaction(root, 3);
2193                 if (IS_ERR(trans)) {
2194                         ret = PTR_ERR(trans);
2195                         trans = NULL;
2196                         break;
2197                 }
2198
2199                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2200                                               rsv, min_size);
2201                 BUG_ON(ret);    /* shouldn't happen */
2202                 trans->block_rsv = rsv;
2203         }
2204
2205         if (ret) {
2206                 err = ret;
2207                 goto out_trans;
2208         }
2209
2210         trans->block_rsv = &root->fs_info->trans_block_rsv;
2211         ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2212         if (ret) {
2213                 err = ret;
2214                 goto out_trans;
2215         }
2216
2217 out_trans:
2218         if (!trans)
2219                 goto out_free;
2220
2221         inode_inc_iversion(inode);
2222         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2223
2224         trans->block_rsv = &root->fs_info->trans_block_rsv;
2225         ret = btrfs_update_inode(trans, root, inode);
2226         btrfs_end_transaction(trans, root);
2227         btrfs_btree_balance_dirty(root);
2228 out_free:
2229         btrfs_free_path(path);
2230         btrfs_free_block_rsv(root, rsv);
2231 out:
2232         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2233                              &cached_state, GFP_NOFS);
2234         mutex_unlock(&inode->i_mutex);
2235         if (ret && !err)
2236                 err = ret;
2237         return err;
2238 }
2239
2240 static long btrfs_fallocate(struct file *file, int mode,
2241                             loff_t offset, loff_t len)
2242 {
2243         struct inode *inode = file_inode(file);
2244         struct extent_state *cached_state = NULL;
2245         struct btrfs_root *root = BTRFS_I(inode)->root;
2246         u64 cur_offset;
2247         u64 last_byte;
2248         u64 alloc_start;
2249         u64 alloc_end;
2250         u64 alloc_hint = 0;
2251         u64 locked_end;
2252         struct extent_map *em;
2253         int blocksize = BTRFS_I(inode)->root->sectorsize;
2254         int ret;
2255
2256         alloc_start = round_down(offset, blocksize);
2257         alloc_end = round_up(offset + len, blocksize);
2258
2259         /* Make sure we aren't being give some crap mode */
2260         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2261                 return -EOPNOTSUPP;
2262
2263         if (mode & FALLOC_FL_PUNCH_HOLE)
2264                 return btrfs_punch_hole(inode, offset, len);
2265
2266         /*
2267          * Make sure we have enough space before we do the
2268          * allocation.
2269          */
2270         ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2271         if (ret)
2272                 return ret;
2273         if (root->fs_info->quota_enabled) {
2274                 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
2275                 if (ret)
2276                         goto out_reserve_fail;
2277         }
2278
2279         mutex_lock(&inode->i_mutex);
2280         ret = inode_newsize_ok(inode, alloc_end);
2281         if (ret)
2282                 goto out;
2283
2284         if (alloc_start > inode->i_size) {
2285                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2286                                         alloc_start);
2287                 if (ret)
2288                         goto out;
2289         } else {
2290                 /*
2291                  * If we are fallocating from the end of the file onward we
2292                  * need to zero out the end of the page if i_size lands in the
2293                  * middle of a page.
2294                  */
2295                 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
2296                 if (ret)
2297                         goto out;
2298         }
2299
2300         /*
2301          * wait for ordered IO before we have any locks.  We'll loop again
2302          * below with the locks held.
2303          */
2304         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2305
2306         locked_end = alloc_end - 1;
2307         while (1) {
2308                 struct btrfs_ordered_extent *ordered;
2309
2310                 /* the extent lock is ordered inside the running
2311                  * transaction
2312                  */
2313                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2314                                  locked_end, 0, &cached_state);
2315                 ordered = btrfs_lookup_first_ordered_extent(inode,
2316                                                             alloc_end - 1);
2317                 if (ordered &&
2318                     ordered->file_offset + ordered->len > alloc_start &&
2319                     ordered->file_offset < alloc_end) {
2320                         btrfs_put_ordered_extent(ordered);
2321                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2322                                              alloc_start, locked_end,
2323                                              &cached_state, GFP_NOFS);
2324                         /*
2325                          * we can't wait on the range with the transaction
2326                          * running or with the extent lock held
2327                          */
2328                         btrfs_wait_ordered_range(inode, alloc_start,
2329                                                  alloc_end - alloc_start);
2330                 } else {
2331                         if (ordered)
2332                                 btrfs_put_ordered_extent(ordered);
2333                         break;
2334                 }
2335         }
2336
2337         cur_offset = alloc_start;
2338         while (1) {
2339                 u64 actual_end;
2340
2341                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2342                                       alloc_end - cur_offset, 0);
2343                 if (IS_ERR_OR_NULL(em)) {
2344                         if (!em)
2345                                 ret = -ENOMEM;
2346                         else
2347                                 ret = PTR_ERR(em);
2348                         break;
2349                 }
2350                 last_byte = min(extent_map_end(em), alloc_end);
2351                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2352                 last_byte = ALIGN(last_byte, blocksize);
2353
2354                 if (em->block_start == EXTENT_MAP_HOLE ||
2355                     (cur_offset >= inode->i_size &&
2356                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2357                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2358                                                         last_byte - cur_offset,
2359                                                         1 << inode->i_blkbits,
2360                                                         offset + len,
2361                                                         &alloc_hint);
2362
2363                         if (ret < 0) {
2364                                 free_extent_map(em);
2365                                 break;
2366                         }
2367                 } else if (actual_end > inode->i_size &&
2368                            !(mode & FALLOC_FL_KEEP_SIZE)) {
2369                         /*
2370                          * We didn't need to allocate any more space, but we
2371                          * still extended the size of the file so we need to
2372                          * update i_size.
2373                          */
2374                         inode->i_ctime = CURRENT_TIME;
2375                         i_size_write(inode, actual_end);
2376                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2377                 }
2378                 free_extent_map(em);
2379
2380                 cur_offset = last_byte;
2381                 if (cur_offset >= alloc_end) {
2382                         ret = 0;
2383                         break;
2384                 }
2385         }
2386         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2387                              &cached_state, GFP_NOFS);
2388 out:
2389         mutex_unlock(&inode->i_mutex);
2390         if (root->fs_info->quota_enabled)
2391                 btrfs_qgroup_free(root, alloc_end - alloc_start);
2392 out_reserve_fail:
2393         /* Let go of our reservation. */
2394         btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2395         return ret;
2396 }
2397
2398 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2399 {
2400         struct btrfs_root *root = BTRFS_I(inode)->root;
2401         struct extent_map *em;
2402         struct extent_state *cached_state = NULL;
2403         u64 lockstart = *offset;
2404         u64 lockend = i_size_read(inode);
2405         u64 start = *offset;
2406         u64 orig_start = *offset;
2407         u64 len = i_size_read(inode);
2408         u64 last_end = 0;
2409         int ret = 0;
2410
2411         lockend = max_t(u64, root->sectorsize, lockend);
2412         if (lockend <= lockstart)
2413                 lockend = lockstart + root->sectorsize;
2414
2415         lockend--;
2416         len = lockend - lockstart + 1;
2417
2418         len = max_t(u64, len, root->sectorsize);
2419         if (inode->i_size == 0)
2420                 return -ENXIO;
2421
2422         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2423                          &cached_state);
2424
2425         /*
2426          * Delalloc is such a pain.  If we have a hole and we have pending
2427          * delalloc for a portion of the hole we will get back a hole that
2428          * exists for the entire range since it hasn't been actually written
2429          * yet.  So to take care of this case we need to look for an extent just
2430          * before the position we want in case there is outstanding delalloc
2431          * going on here.
2432          */
2433         if (whence == SEEK_HOLE && start != 0) {
2434                 if (start <= root->sectorsize)
2435                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2436                                                      root->sectorsize, 0);
2437                 else
2438                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
2439                                                      start - root->sectorsize,
2440                                                      root->sectorsize, 0);
2441                 if (IS_ERR(em)) {
2442                         ret = PTR_ERR(em);
2443                         goto out;
2444                 }
2445                 last_end = em->start + em->len;
2446                 if (em->block_start == EXTENT_MAP_DELALLOC)
2447                         last_end = min_t(u64, last_end, inode->i_size);
2448                 free_extent_map(em);
2449         }
2450
2451         while (1) {
2452                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2453                 if (IS_ERR(em)) {
2454                         ret = PTR_ERR(em);
2455                         break;
2456                 }
2457
2458                 if (em->block_start == EXTENT_MAP_HOLE) {
2459                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2460                                 if (last_end <= orig_start) {
2461                                         free_extent_map(em);
2462                                         ret = -ENXIO;
2463                                         break;
2464                                 }
2465                         }
2466
2467                         if (whence == SEEK_HOLE) {
2468                                 *offset = start;
2469                                 free_extent_map(em);
2470                                 break;
2471                         }
2472                 } else {
2473                         if (whence == SEEK_DATA) {
2474                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
2475                                         if (start >= inode->i_size) {
2476                                                 free_extent_map(em);
2477                                                 ret = -ENXIO;
2478                                                 break;
2479                                         }
2480                                 }
2481
2482                                 if (!test_bit(EXTENT_FLAG_PREALLOC,
2483                                               &em->flags)) {
2484                                         *offset = start;
2485                                         free_extent_map(em);
2486                                         break;
2487                                 }
2488                         }
2489                 }
2490
2491                 start = em->start + em->len;
2492                 last_end = em->start + em->len;
2493
2494                 if (em->block_start == EXTENT_MAP_DELALLOC)
2495                         last_end = min_t(u64, last_end, inode->i_size);
2496
2497                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2498                         free_extent_map(em);
2499                         ret = -ENXIO;
2500                         break;
2501                 }
2502                 free_extent_map(em);
2503                 cond_resched();
2504         }
2505         if (!ret)
2506                 *offset = min(*offset, inode->i_size);
2507 out:
2508         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2509                              &cached_state, GFP_NOFS);
2510         return ret;
2511 }
2512
2513 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2514 {
2515         struct inode *inode = file->f_mapping->host;
2516         int ret;
2517
2518         mutex_lock(&inode->i_mutex);
2519         switch (whence) {
2520         case SEEK_END:
2521         case SEEK_CUR:
2522                 offset = generic_file_llseek(file, offset, whence);
2523                 goto out;
2524         case SEEK_DATA:
2525         case SEEK_HOLE:
2526                 if (offset >= i_size_read(inode)) {
2527                         mutex_unlock(&inode->i_mutex);
2528                         return -ENXIO;
2529                 }
2530
2531                 ret = find_desired_extent(inode, &offset, whence);
2532                 if (ret) {
2533                         mutex_unlock(&inode->i_mutex);
2534                         return ret;
2535                 }
2536         }
2537
2538         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2539                 offset = -EINVAL;
2540                 goto out;
2541         }
2542         if (offset > inode->i_sb->s_maxbytes) {
2543                 offset = -EINVAL;
2544                 goto out;
2545         }
2546
2547         /* Special lock needed here? */
2548         if (offset != file->f_pos) {
2549                 file->f_pos = offset;
2550                 file->f_version = 0;
2551         }
2552 out:
2553         mutex_unlock(&inode->i_mutex);
2554         return offset;
2555 }
2556
2557 const struct file_operations btrfs_file_operations = {
2558         .llseek         = btrfs_file_llseek,
2559         .read           = do_sync_read,
2560         .write          = do_sync_write,
2561         .aio_read       = generic_file_aio_read,
2562         .splice_read    = generic_file_splice_read,
2563         .aio_write      = btrfs_file_aio_write,
2564         .mmap           = btrfs_file_mmap,
2565         .open           = generic_file_open,
2566         .release        = btrfs_release_file,
2567         .fsync          = btrfs_sync_file,
2568         .fallocate      = btrfs_fallocate,
2569         .unlocked_ioctl = btrfs_ioctl,
2570 #ifdef CONFIG_COMPAT
2571         .compat_ioctl   = btrfs_ioctl,
2572 #endif
2573 };
2574
2575 void btrfs_auto_defrag_exit(void)
2576 {
2577         if (btrfs_inode_defrag_cachep)
2578                 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2579 }
2580
2581 int btrfs_auto_defrag_init(void)
2582 {
2583         btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2584                                         sizeof(struct inode_defrag), 0,
2585                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2586                                         NULL);
2587         if (!btrfs_inode_defrag_cachep)
2588                 return -ENOMEM;
2589
2590         return 0;
2591 }