]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/file.c
Revert "Btrfs: do not do filemap_write_and_wait_range in fsync"
[karo-tx-linux.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43
44 /*
45  * when auto defrag is enabled we
46  * queue up these defrag structs to remember which
47  * inodes need defragging passes
48  */
49 struct inode_defrag {
50         struct rb_node rb_node;
51         /* objectid */
52         u64 ino;
53         /*
54          * transid where the defrag was added, we search for
55          * extents newer than this
56          */
57         u64 transid;
58
59         /* root objectid */
60         u64 root;
61
62         /* last offset we were able to defrag */
63         u64 last_offset;
64
65         /* if we've wrapped around back to zero once already */
66         int cycled;
67 };
68
69 static int __compare_inode_defrag(struct inode_defrag *defrag1,
70                                   struct inode_defrag *defrag2)
71 {
72         if (defrag1->root > defrag2->root)
73                 return 1;
74         else if (defrag1->root < defrag2->root)
75                 return -1;
76         else if (defrag1->ino > defrag2->ino)
77                 return 1;
78         else if (defrag1->ino < defrag2->ino)
79                 return -1;
80         else
81                 return 0;
82 }
83
84 /* pop a record for an inode into the defrag tree.  The lock
85  * must be held already
86  *
87  * If you're inserting a record for an older transid than an
88  * existing record, the transid already in the tree is lowered
89  *
90  * If an existing record is found the defrag item you
91  * pass in is freed
92  */
93 static void __btrfs_add_inode_defrag(struct inode *inode,
94                                     struct inode_defrag *defrag)
95 {
96         struct btrfs_root *root = BTRFS_I(inode)->root;
97         struct inode_defrag *entry;
98         struct rb_node **p;
99         struct rb_node *parent = NULL;
100         int ret;
101
102         p = &root->fs_info->defrag_inodes.rb_node;
103         while (*p) {
104                 parent = *p;
105                 entry = rb_entry(parent, struct inode_defrag, rb_node);
106
107                 ret = __compare_inode_defrag(defrag, entry);
108                 if (ret < 0)
109                         p = &parent->rb_left;
110                 else if (ret > 0)
111                         p = &parent->rb_right;
112                 else {
113                         /* if we're reinserting an entry for
114                          * an old defrag run, make sure to
115                          * lower the transid of our existing record
116                          */
117                         if (defrag->transid < entry->transid)
118                                 entry->transid = defrag->transid;
119                         if (defrag->last_offset > entry->last_offset)
120                                 entry->last_offset = defrag->last_offset;
121                         goto exists;
122                 }
123         }
124         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
125         rb_link_node(&defrag->rb_node, parent, p);
126         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
127         return;
128
129 exists:
130         kfree(defrag);
131         return;
132
133 }
134
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140                            struct inode *inode)
141 {
142         struct btrfs_root *root = BTRFS_I(inode)->root;
143         struct inode_defrag *defrag;
144         u64 transid;
145
146         if (!btrfs_test_opt(root, AUTO_DEFRAG))
147                 return 0;
148
149         if (btrfs_fs_closing(root->fs_info))
150                 return 0;
151
152         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
153                 return 0;
154
155         if (trans)
156                 transid = trans->transid;
157         else
158                 transid = BTRFS_I(inode)->root->last_trans;
159
160         defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
161         if (!defrag)
162                 return -ENOMEM;
163
164         defrag->ino = btrfs_ino(inode);
165         defrag->transid = transid;
166         defrag->root = root->root_key.objectid;
167
168         spin_lock(&root->fs_info->defrag_inodes_lock);
169         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
170                 __btrfs_add_inode_defrag(inode, defrag);
171         else
172                 kfree(defrag);
173         spin_unlock(&root->fs_info->defrag_inodes_lock);
174         return 0;
175 }
176
177 /*
178  * must be called with the defrag_inodes lock held
179  */
180 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
181                                              u64 root, u64 ino,
182                                              struct rb_node **next)
183 {
184         struct inode_defrag *entry = NULL;
185         struct inode_defrag tmp;
186         struct rb_node *p;
187         struct rb_node *parent = NULL;
188         int ret;
189
190         tmp.ino = ino;
191         tmp.root = root;
192
193         p = info->defrag_inodes.rb_node;
194         while (p) {
195                 parent = p;
196                 entry = rb_entry(parent, struct inode_defrag, rb_node);
197
198                 ret = __compare_inode_defrag(&tmp, entry);
199                 if (ret < 0)
200                         p = parent->rb_left;
201                 else if (ret > 0)
202                         p = parent->rb_right;
203                 else
204                         return entry;
205         }
206
207         if (next) {
208                 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
209                         parent = rb_next(parent);
210                         entry = rb_entry(parent, struct inode_defrag, rb_node);
211                 }
212                 *next = parent;
213         }
214         return NULL;
215 }
216
217 /*
218  * run through the list of inodes in the FS that need
219  * defragging
220  */
221 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
222 {
223         struct inode_defrag *defrag;
224         struct btrfs_root *inode_root;
225         struct inode *inode;
226         struct rb_node *n;
227         struct btrfs_key key;
228         struct btrfs_ioctl_defrag_range_args range;
229         u64 first_ino = 0;
230         u64 root_objectid = 0;
231         int num_defrag;
232         int defrag_batch = 1024;
233
234         memset(&range, 0, sizeof(range));
235         range.len = (u64)-1;
236
237         atomic_inc(&fs_info->defrag_running);
238         spin_lock(&fs_info->defrag_inodes_lock);
239         while(1) {
240                 n = NULL;
241
242                 /* find an inode to defrag */
243                 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
244                                                  first_ino, &n);
245                 if (!defrag) {
246                         if (n) {
247                                 defrag = rb_entry(n, struct inode_defrag,
248                                                   rb_node);
249                         } else if (root_objectid || first_ino) {
250                                 root_objectid = 0;
251                                 first_ino = 0;
252                                 continue;
253                         } else {
254                                 break;
255                         }
256                 }
257
258                 /* remove it from the rbtree */
259                 first_ino = defrag->ino + 1;
260                 root_objectid = defrag->root;
261                 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
262
263                 if (btrfs_fs_closing(fs_info))
264                         goto next_free;
265
266                 spin_unlock(&fs_info->defrag_inodes_lock);
267
268                 /* get the inode */
269                 key.objectid = defrag->root;
270                 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
271                 key.offset = (u64)-1;
272                 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
273                 if (IS_ERR(inode_root))
274                         goto next;
275
276                 key.objectid = defrag->ino;
277                 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
278                 key.offset = 0;
279
280                 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
281                 if (IS_ERR(inode))
282                         goto next;
283
284                 /* do a chunk of defrag */
285                 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
286                 range.start = defrag->last_offset;
287                 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
288                                                defrag_batch);
289                 /*
290                  * if we filled the whole defrag batch, there
291                  * must be more work to do.  Queue this defrag
292                  * again
293                  */
294                 if (num_defrag == defrag_batch) {
295                         defrag->last_offset = range.start;
296                         __btrfs_add_inode_defrag(inode, defrag);
297                         /*
298                          * we don't want to kfree defrag, we added it back to
299                          * the rbtree
300                          */
301                         defrag = NULL;
302                 } else if (defrag->last_offset && !defrag->cycled) {
303                         /*
304                          * we didn't fill our defrag batch, but
305                          * we didn't start at zero.  Make sure we loop
306                          * around to the start of the file.
307                          */
308                         defrag->last_offset = 0;
309                         defrag->cycled = 1;
310                         __btrfs_add_inode_defrag(inode, defrag);
311                         defrag = NULL;
312                 }
313
314                 iput(inode);
315 next:
316                 spin_lock(&fs_info->defrag_inodes_lock);
317 next_free:
318                 kfree(defrag);
319         }
320         spin_unlock(&fs_info->defrag_inodes_lock);
321
322         atomic_dec(&fs_info->defrag_running);
323
324         /*
325          * during unmount, we use the transaction_wait queue to
326          * wait for the defragger to stop
327          */
328         wake_up(&fs_info->transaction_wait);
329         return 0;
330 }
331
332 /* simple helper to fault in pages and copy.  This should go away
333  * and be replaced with calls into generic code.
334  */
335 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
336                                          size_t write_bytes,
337                                          struct page **prepared_pages,
338                                          struct iov_iter *i)
339 {
340         size_t copied = 0;
341         size_t total_copied = 0;
342         int pg = 0;
343         int offset = pos & (PAGE_CACHE_SIZE - 1);
344
345         while (write_bytes > 0) {
346                 size_t count = min_t(size_t,
347                                      PAGE_CACHE_SIZE - offset, write_bytes);
348                 struct page *page = prepared_pages[pg];
349                 /*
350                  * Copy data from userspace to the current page
351                  *
352                  * Disable pagefault to avoid recursive lock since
353                  * the pages are already locked
354                  */
355                 pagefault_disable();
356                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
357                 pagefault_enable();
358
359                 /* Flush processor's dcache for this page */
360                 flush_dcache_page(page);
361
362                 /*
363                  * if we get a partial write, we can end up with
364                  * partially up to date pages.  These add
365                  * a lot of complexity, so make sure they don't
366                  * happen by forcing this copy to be retried.
367                  *
368                  * The rest of the btrfs_file_write code will fall
369                  * back to page at a time copies after we return 0.
370                  */
371                 if (!PageUptodate(page) && copied < count)
372                         copied = 0;
373
374                 iov_iter_advance(i, copied);
375                 write_bytes -= copied;
376                 total_copied += copied;
377
378                 /* Return to btrfs_file_aio_write to fault page */
379                 if (unlikely(copied == 0))
380                         break;
381
382                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
383                         offset += copied;
384                 } else {
385                         pg++;
386                         offset = 0;
387                 }
388         }
389         return total_copied;
390 }
391
392 /*
393  * unlocks pages after btrfs_file_write is done with them
394  */
395 void btrfs_drop_pages(struct page **pages, size_t num_pages)
396 {
397         size_t i;
398         for (i = 0; i < num_pages; i++) {
399                 /* page checked is some magic around finding pages that
400                  * have been modified without going through btrfs_set_page_dirty
401                  * clear it here
402                  */
403                 ClearPageChecked(pages[i]);
404                 unlock_page(pages[i]);
405                 mark_page_accessed(pages[i]);
406                 page_cache_release(pages[i]);
407         }
408 }
409
410 /*
411  * after copy_from_user, pages need to be dirtied and we need to make
412  * sure holes are created between the current EOF and the start of
413  * any next extents (if required).
414  *
415  * this also makes the decision about creating an inline extent vs
416  * doing real data extents, marking pages dirty and delalloc as required.
417  */
418 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
419                       struct page **pages, size_t num_pages,
420                       loff_t pos, size_t write_bytes,
421                       struct extent_state **cached)
422 {
423         int err = 0;
424         int i;
425         u64 num_bytes;
426         u64 start_pos;
427         u64 end_of_last_block;
428         u64 end_pos = pos + write_bytes;
429         loff_t isize = i_size_read(inode);
430
431         start_pos = pos & ~((u64)root->sectorsize - 1);
432         num_bytes = (write_bytes + pos - start_pos +
433                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
434
435         end_of_last_block = start_pos + num_bytes - 1;
436         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
437                                         cached);
438         if (err)
439                 return err;
440
441         for (i = 0; i < num_pages; i++) {
442                 struct page *p = pages[i];
443                 SetPageUptodate(p);
444                 ClearPageChecked(p);
445                 set_page_dirty(p);
446         }
447
448         /*
449          * we've only changed i_size in ram, and we haven't updated
450          * the disk i_size.  There is no need to log the inode
451          * at this time.
452          */
453         if (end_pos > isize)
454                 i_size_write(inode, end_pos);
455         return 0;
456 }
457
458 /*
459  * this drops all the extents in the cache that intersect the range
460  * [start, end].  Existing extents are split as required.
461  */
462 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
463                              int skip_pinned)
464 {
465         struct extent_map *em;
466         struct extent_map *split = NULL;
467         struct extent_map *split2 = NULL;
468         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
469         u64 len = end - start + 1;
470         u64 gen;
471         int ret;
472         int testend = 1;
473         unsigned long flags;
474         int compressed = 0;
475
476         WARN_ON(end < start);
477         if (end == (u64)-1) {
478                 len = (u64)-1;
479                 testend = 0;
480         }
481         while (1) {
482                 int no_splits = 0;
483
484                 if (!split)
485                         split = alloc_extent_map();
486                 if (!split2)
487                         split2 = alloc_extent_map();
488                 if (!split || !split2)
489                         no_splits = 1;
490
491                 write_lock(&em_tree->lock);
492                 em = lookup_extent_mapping(em_tree, start, len);
493                 if (!em) {
494                         write_unlock(&em_tree->lock);
495                         break;
496                 }
497                 flags = em->flags;
498                 gen = em->generation;
499                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
500                         if (testend && em->start + em->len >= start + len) {
501                                 free_extent_map(em);
502                                 write_unlock(&em_tree->lock);
503                                 break;
504                         }
505                         start = em->start + em->len;
506                         if (testend)
507                                 len = start + len - (em->start + em->len);
508                         free_extent_map(em);
509                         write_unlock(&em_tree->lock);
510                         continue;
511                 }
512                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
513                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
514                 remove_extent_mapping(em_tree, em);
515                 if (no_splits)
516                         goto next;
517
518                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
519                     em->start < start) {
520                         split->start = em->start;
521                         split->len = start - em->start;
522                         split->orig_start = em->orig_start;
523                         split->block_start = em->block_start;
524
525                         if (compressed)
526                                 split->block_len = em->block_len;
527                         else
528                                 split->block_len = split->len;
529                         split->generation = gen;
530                         split->bdev = em->bdev;
531                         split->flags = flags;
532                         split->compress_type = em->compress_type;
533                         ret = add_extent_mapping(em_tree, split);
534                         BUG_ON(ret); /* Logic error */
535                         list_move(&split->list, &em_tree->modified_extents);
536                         free_extent_map(split);
537                         split = split2;
538                         split2 = NULL;
539                 }
540                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
541                     testend && em->start + em->len > start + len) {
542                         u64 diff = start + len - em->start;
543
544                         split->start = start + len;
545                         split->len = em->start + em->len - (start + len);
546                         split->bdev = em->bdev;
547                         split->flags = flags;
548                         split->compress_type = em->compress_type;
549                         split->generation = gen;
550
551                         if (compressed) {
552                                 split->block_len = em->block_len;
553                                 split->block_start = em->block_start;
554                                 split->orig_start = em->orig_start;
555                         } else {
556                                 split->block_len = split->len;
557                                 split->block_start = em->block_start + diff;
558                                 split->orig_start = split->start;
559                         }
560
561                         ret = add_extent_mapping(em_tree, split);
562                         BUG_ON(ret); /* Logic error */
563                         list_move(&split->list, &em_tree->modified_extents);
564                         free_extent_map(split);
565                         split = NULL;
566                 }
567 next:
568                 write_unlock(&em_tree->lock);
569
570                 /* once for us */
571                 free_extent_map(em);
572                 /* once for the tree*/
573                 free_extent_map(em);
574         }
575         if (split)
576                 free_extent_map(split);
577         if (split2)
578                 free_extent_map(split2);
579 }
580
581 /*
582  * this is very complex, but the basic idea is to drop all extents
583  * in the range start - end.  hint_block is filled in with a block number
584  * that would be a good hint to the block allocator for this file.
585  *
586  * If an extent intersects the range but is not entirely inside the range
587  * it is either truncated or split.  Anything entirely inside the range
588  * is deleted from the tree.
589  */
590 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
591                          struct btrfs_root *root, struct inode *inode,
592                          struct btrfs_path *path, u64 start, u64 end,
593                          u64 *drop_end, int drop_cache)
594 {
595         struct extent_buffer *leaf;
596         struct btrfs_file_extent_item *fi;
597         struct btrfs_key key;
598         struct btrfs_key new_key;
599         u64 ino = btrfs_ino(inode);
600         u64 search_start = start;
601         u64 disk_bytenr = 0;
602         u64 num_bytes = 0;
603         u64 extent_offset = 0;
604         u64 extent_end = 0;
605         int del_nr = 0;
606         int del_slot = 0;
607         int extent_type;
608         int recow;
609         int ret;
610         int modify_tree = -1;
611         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
612
613         if (drop_cache)
614                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
615
616         if (start >= BTRFS_I(inode)->disk_i_size)
617                 modify_tree = 0;
618
619         while (1) {
620                 recow = 0;
621                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
622                                                search_start, modify_tree);
623                 if (ret < 0)
624                         break;
625                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
626                         leaf = path->nodes[0];
627                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
628                         if (key.objectid == ino &&
629                             key.type == BTRFS_EXTENT_DATA_KEY)
630                                 path->slots[0]--;
631                 }
632                 ret = 0;
633 next_slot:
634                 leaf = path->nodes[0];
635                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
636                         BUG_ON(del_nr > 0);
637                         ret = btrfs_next_leaf(root, path);
638                         if (ret < 0)
639                                 break;
640                         if (ret > 0) {
641                                 ret = 0;
642                                 break;
643                         }
644                         leaf = path->nodes[0];
645                         recow = 1;
646                 }
647
648                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
649                 if (key.objectid > ino ||
650                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
651                         break;
652
653                 fi = btrfs_item_ptr(leaf, path->slots[0],
654                                     struct btrfs_file_extent_item);
655                 extent_type = btrfs_file_extent_type(leaf, fi);
656
657                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
658                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
659                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
660                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
661                         extent_offset = btrfs_file_extent_offset(leaf, fi);
662                         extent_end = key.offset +
663                                 btrfs_file_extent_num_bytes(leaf, fi);
664                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
665                         extent_end = key.offset +
666                                 btrfs_file_extent_inline_len(leaf, fi);
667                 } else {
668                         WARN_ON(1);
669                         extent_end = search_start;
670                 }
671
672                 if (extent_end <= search_start) {
673                         path->slots[0]++;
674                         goto next_slot;
675                 }
676
677                 search_start = max(key.offset, start);
678                 if (recow || !modify_tree) {
679                         modify_tree = -1;
680                         btrfs_release_path(path);
681                         continue;
682                 }
683
684                 /*
685                  *     | - range to drop - |
686                  *  | -------- extent -------- |
687                  */
688                 if (start > key.offset && end < extent_end) {
689                         BUG_ON(del_nr > 0);
690                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
691
692                         memcpy(&new_key, &key, sizeof(new_key));
693                         new_key.offset = start;
694                         ret = btrfs_duplicate_item(trans, root, path,
695                                                    &new_key);
696                         if (ret == -EAGAIN) {
697                                 btrfs_release_path(path);
698                                 continue;
699                         }
700                         if (ret < 0)
701                                 break;
702
703                         leaf = path->nodes[0];
704                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
705                                             struct btrfs_file_extent_item);
706                         btrfs_set_file_extent_num_bytes(leaf, fi,
707                                                         start - key.offset);
708
709                         fi = btrfs_item_ptr(leaf, path->slots[0],
710                                             struct btrfs_file_extent_item);
711
712                         extent_offset += start - key.offset;
713                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
714                         btrfs_set_file_extent_num_bytes(leaf, fi,
715                                                         extent_end - start);
716                         btrfs_mark_buffer_dirty(leaf);
717
718                         if (update_refs && disk_bytenr > 0) {
719                                 ret = btrfs_inc_extent_ref(trans, root,
720                                                 disk_bytenr, num_bytes, 0,
721                                                 root->root_key.objectid,
722                                                 new_key.objectid,
723                                                 start - extent_offset, 0);
724                                 BUG_ON(ret); /* -ENOMEM */
725                         }
726                         key.offset = start;
727                 }
728                 /*
729                  *  | ---- range to drop ----- |
730                  *      | -------- extent -------- |
731                  */
732                 if (start <= key.offset && end < extent_end) {
733                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
734
735                         memcpy(&new_key, &key, sizeof(new_key));
736                         new_key.offset = end;
737                         btrfs_set_item_key_safe(trans, root, path, &new_key);
738
739                         extent_offset += end - key.offset;
740                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
741                         btrfs_set_file_extent_num_bytes(leaf, fi,
742                                                         extent_end - end);
743                         btrfs_mark_buffer_dirty(leaf);
744                         if (update_refs && disk_bytenr > 0)
745                                 inode_sub_bytes(inode, end - key.offset);
746                         break;
747                 }
748
749                 search_start = extent_end;
750                 /*
751                  *       | ---- range to drop ----- |
752                  *  | -------- extent -------- |
753                  */
754                 if (start > key.offset && end >= extent_end) {
755                         BUG_ON(del_nr > 0);
756                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
757
758                         btrfs_set_file_extent_num_bytes(leaf, fi,
759                                                         start - key.offset);
760                         btrfs_mark_buffer_dirty(leaf);
761                         if (update_refs && disk_bytenr > 0)
762                                 inode_sub_bytes(inode, extent_end - start);
763                         if (end == extent_end)
764                                 break;
765
766                         path->slots[0]++;
767                         goto next_slot;
768                 }
769
770                 /*
771                  *  | ---- range to drop ----- |
772                  *    | ------ extent ------ |
773                  */
774                 if (start <= key.offset && end >= extent_end) {
775                         if (del_nr == 0) {
776                                 del_slot = path->slots[0];
777                                 del_nr = 1;
778                         } else {
779                                 BUG_ON(del_slot + del_nr != path->slots[0]);
780                                 del_nr++;
781                         }
782
783                         if (update_refs &&
784                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
785                                 inode_sub_bytes(inode,
786                                                 extent_end - key.offset);
787                                 extent_end = ALIGN(extent_end,
788                                                    root->sectorsize);
789                         } else if (update_refs && disk_bytenr > 0) {
790                                 ret = btrfs_free_extent(trans, root,
791                                                 disk_bytenr, num_bytes, 0,
792                                                 root->root_key.objectid,
793                                                 key.objectid, key.offset -
794                                                 extent_offset, 0);
795                                 BUG_ON(ret); /* -ENOMEM */
796                                 inode_sub_bytes(inode,
797                                                 extent_end - key.offset);
798                         }
799
800                         if (end == extent_end)
801                                 break;
802
803                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
804                                 path->slots[0]++;
805                                 goto next_slot;
806                         }
807
808                         ret = btrfs_del_items(trans, root, path, del_slot,
809                                               del_nr);
810                         if (ret) {
811                                 btrfs_abort_transaction(trans, root, ret);
812                                 break;
813                         }
814
815                         del_nr = 0;
816                         del_slot = 0;
817
818                         btrfs_release_path(path);
819                         continue;
820                 }
821
822                 BUG_ON(1);
823         }
824
825         if (!ret && del_nr > 0) {
826                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
827                 if (ret)
828                         btrfs_abort_transaction(trans, root, ret);
829         }
830
831         if (drop_end)
832                 *drop_end = min(end, extent_end);
833         btrfs_release_path(path);
834         return ret;
835 }
836
837 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
838                        struct btrfs_root *root, struct inode *inode, u64 start,
839                        u64 end, int drop_cache)
840 {
841         struct btrfs_path *path;
842         int ret;
843
844         path = btrfs_alloc_path();
845         if (!path)
846                 return -ENOMEM;
847         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
848                                    drop_cache);
849         btrfs_free_path(path);
850         return ret;
851 }
852
853 static int extent_mergeable(struct extent_buffer *leaf, int slot,
854                             u64 objectid, u64 bytenr, u64 orig_offset,
855                             u64 *start, u64 *end)
856 {
857         struct btrfs_file_extent_item *fi;
858         struct btrfs_key key;
859         u64 extent_end;
860
861         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
862                 return 0;
863
864         btrfs_item_key_to_cpu(leaf, &key, slot);
865         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
866                 return 0;
867
868         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
869         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
870             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
871             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
872             btrfs_file_extent_compression(leaf, fi) ||
873             btrfs_file_extent_encryption(leaf, fi) ||
874             btrfs_file_extent_other_encoding(leaf, fi))
875                 return 0;
876
877         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
878         if ((*start && *start != key.offset) || (*end && *end != extent_end))
879                 return 0;
880
881         *start = key.offset;
882         *end = extent_end;
883         return 1;
884 }
885
886 /*
887  * Mark extent in the range start - end as written.
888  *
889  * This changes extent type from 'pre-allocated' to 'regular'. If only
890  * part of extent is marked as written, the extent will be split into
891  * two or three.
892  */
893 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
894                               struct inode *inode, u64 start, u64 end)
895 {
896         struct btrfs_root *root = BTRFS_I(inode)->root;
897         struct extent_buffer *leaf;
898         struct btrfs_path *path;
899         struct btrfs_file_extent_item *fi;
900         struct btrfs_key key;
901         struct btrfs_key new_key;
902         u64 bytenr;
903         u64 num_bytes;
904         u64 extent_end;
905         u64 orig_offset;
906         u64 other_start;
907         u64 other_end;
908         u64 split;
909         int del_nr = 0;
910         int del_slot = 0;
911         int recow;
912         int ret;
913         u64 ino = btrfs_ino(inode);
914
915         path = btrfs_alloc_path();
916         if (!path)
917                 return -ENOMEM;
918 again:
919         recow = 0;
920         split = start;
921         key.objectid = ino;
922         key.type = BTRFS_EXTENT_DATA_KEY;
923         key.offset = split;
924
925         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
926         if (ret < 0)
927                 goto out;
928         if (ret > 0 && path->slots[0] > 0)
929                 path->slots[0]--;
930
931         leaf = path->nodes[0];
932         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
933         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
934         fi = btrfs_item_ptr(leaf, path->slots[0],
935                             struct btrfs_file_extent_item);
936         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
937                BTRFS_FILE_EXTENT_PREALLOC);
938         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
939         BUG_ON(key.offset > start || extent_end < end);
940
941         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
942         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
943         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
944         memcpy(&new_key, &key, sizeof(new_key));
945
946         if (start == key.offset && end < extent_end) {
947                 other_start = 0;
948                 other_end = start;
949                 if (extent_mergeable(leaf, path->slots[0] - 1,
950                                      ino, bytenr, orig_offset,
951                                      &other_start, &other_end)) {
952                         new_key.offset = end;
953                         btrfs_set_item_key_safe(trans, root, path, &new_key);
954                         fi = btrfs_item_ptr(leaf, path->slots[0],
955                                             struct btrfs_file_extent_item);
956                         btrfs_set_file_extent_generation(leaf, fi,
957                                                          trans->transid);
958                         btrfs_set_file_extent_num_bytes(leaf, fi,
959                                                         extent_end - end);
960                         btrfs_set_file_extent_offset(leaf, fi,
961                                                      end - orig_offset);
962                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
963                                             struct btrfs_file_extent_item);
964                         btrfs_set_file_extent_generation(leaf, fi,
965                                                          trans->transid);
966                         btrfs_set_file_extent_num_bytes(leaf, fi,
967                                                         end - other_start);
968                         btrfs_mark_buffer_dirty(leaf);
969                         goto out;
970                 }
971         }
972
973         if (start > key.offset && end == extent_end) {
974                 other_start = end;
975                 other_end = 0;
976                 if (extent_mergeable(leaf, path->slots[0] + 1,
977                                      ino, bytenr, orig_offset,
978                                      &other_start, &other_end)) {
979                         fi = btrfs_item_ptr(leaf, path->slots[0],
980                                             struct btrfs_file_extent_item);
981                         btrfs_set_file_extent_num_bytes(leaf, fi,
982                                                         start - key.offset);
983                         btrfs_set_file_extent_generation(leaf, fi,
984                                                          trans->transid);
985                         path->slots[0]++;
986                         new_key.offset = start;
987                         btrfs_set_item_key_safe(trans, root, path, &new_key);
988
989                         fi = btrfs_item_ptr(leaf, path->slots[0],
990                                             struct btrfs_file_extent_item);
991                         btrfs_set_file_extent_generation(leaf, fi,
992                                                          trans->transid);
993                         btrfs_set_file_extent_num_bytes(leaf, fi,
994                                                         other_end - start);
995                         btrfs_set_file_extent_offset(leaf, fi,
996                                                      start - orig_offset);
997                         btrfs_mark_buffer_dirty(leaf);
998                         goto out;
999                 }
1000         }
1001
1002         while (start > key.offset || end < extent_end) {
1003                 if (key.offset == start)
1004                         split = end;
1005
1006                 new_key.offset = split;
1007                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1008                 if (ret == -EAGAIN) {
1009                         btrfs_release_path(path);
1010                         goto again;
1011                 }
1012                 if (ret < 0) {
1013                         btrfs_abort_transaction(trans, root, ret);
1014                         goto out;
1015                 }
1016
1017                 leaf = path->nodes[0];
1018                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1019                                     struct btrfs_file_extent_item);
1020                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1021                 btrfs_set_file_extent_num_bytes(leaf, fi,
1022                                                 split - key.offset);
1023
1024                 fi = btrfs_item_ptr(leaf, path->slots[0],
1025                                     struct btrfs_file_extent_item);
1026
1027                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1028                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1029                 btrfs_set_file_extent_num_bytes(leaf, fi,
1030                                                 extent_end - split);
1031                 btrfs_mark_buffer_dirty(leaf);
1032
1033                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1034                                            root->root_key.objectid,
1035                                            ino, orig_offset, 0);
1036                 BUG_ON(ret); /* -ENOMEM */
1037
1038                 if (split == start) {
1039                         key.offset = start;
1040                 } else {
1041                         BUG_ON(start != key.offset);
1042                         path->slots[0]--;
1043                         extent_end = end;
1044                 }
1045                 recow = 1;
1046         }
1047
1048         other_start = end;
1049         other_end = 0;
1050         if (extent_mergeable(leaf, path->slots[0] + 1,
1051                              ino, bytenr, orig_offset,
1052                              &other_start, &other_end)) {
1053                 if (recow) {
1054                         btrfs_release_path(path);
1055                         goto again;
1056                 }
1057                 extent_end = other_end;
1058                 del_slot = path->slots[0] + 1;
1059                 del_nr++;
1060                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1061                                         0, root->root_key.objectid,
1062                                         ino, orig_offset, 0);
1063                 BUG_ON(ret); /* -ENOMEM */
1064         }
1065         other_start = 0;
1066         other_end = start;
1067         if (extent_mergeable(leaf, path->slots[0] - 1,
1068                              ino, bytenr, orig_offset,
1069                              &other_start, &other_end)) {
1070                 if (recow) {
1071                         btrfs_release_path(path);
1072                         goto again;
1073                 }
1074                 key.offset = other_start;
1075                 del_slot = path->slots[0];
1076                 del_nr++;
1077                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1078                                         0, root->root_key.objectid,
1079                                         ino, orig_offset, 0);
1080                 BUG_ON(ret); /* -ENOMEM */
1081         }
1082         if (del_nr == 0) {
1083                 fi = btrfs_item_ptr(leaf, path->slots[0],
1084                            struct btrfs_file_extent_item);
1085                 btrfs_set_file_extent_type(leaf, fi,
1086                                            BTRFS_FILE_EXTENT_REG);
1087                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1088                 btrfs_mark_buffer_dirty(leaf);
1089         } else {
1090                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1091                            struct btrfs_file_extent_item);
1092                 btrfs_set_file_extent_type(leaf, fi,
1093                                            BTRFS_FILE_EXTENT_REG);
1094                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1095                 btrfs_set_file_extent_num_bytes(leaf, fi,
1096                                                 extent_end - key.offset);
1097                 btrfs_mark_buffer_dirty(leaf);
1098
1099                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1100                 if (ret < 0) {
1101                         btrfs_abort_transaction(trans, root, ret);
1102                         goto out;
1103                 }
1104         }
1105 out:
1106         btrfs_free_path(path);
1107         return 0;
1108 }
1109
1110 /*
1111  * on error we return an unlocked page and the error value
1112  * on success we return a locked page and 0
1113  */
1114 static int prepare_uptodate_page(struct page *page, u64 pos,
1115                                  bool force_uptodate)
1116 {
1117         int ret = 0;
1118
1119         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1120             !PageUptodate(page)) {
1121                 ret = btrfs_readpage(NULL, page);
1122                 if (ret)
1123                         return ret;
1124                 lock_page(page);
1125                 if (!PageUptodate(page)) {
1126                         unlock_page(page);
1127                         return -EIO;
1128                 }
1129         }
1130         return 0;
1131 }
1132
1133 /*
1134  * this gets pages into the page cache and locks them down, it also properly
1135  * waits for data=ordered extents to finish before allowing the pages to be
1136  * modified.
1137  */
1138 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1139                          struct page **pages, size_t num_pages,
1140                          loff_t pos, unsigned long first_index,
1141                          size_t write_bytes, bool force_uptodate)
1142 {
1143         struct extent_state *cached_state = NULL;
1144         int i;
1145         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1146         struct inode *inode = fdentry(file)->d_inode;
1147         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1148         int err = 0;
1149         int faili = 0;
1150         u64 start_pos;
1151         u64 last_pos;
1152
1153         start_pos = pos & ~((u64)root->sectorsize - 1);
1154         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1155
1156 again:
1157         for (i = 0; i < num_pages; i++) {
1158                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1159                                                mask | __GFP_WRITE);
1160                 if (!pages[i]) {
1161                         faili = i - 1;
1162                         err = -ENOMEM;
1163                         goto fail;
1164                 }
1165
1166                 if (i == 0)
1167                         err = prepare_uptodate_page(pages[i], pos,
1168                                                     force_uptodate);
1169                 if (i == num_pages - 1)
1170                         err = prepare_uptodate_page(pages[i],
1171                                                     pos + write_bytes, false);
1172                 if (err) {
1173                         page_cache_release(pages[i]);
1174                         faili = i - 1;
1175                         goto fail;
1176                 }
1177                 wait_on_page_writeback(pages[i]);
1178         }
1179         err = 0;
1180         if (start_pos < inode->i_size) {
1181                 struct btrfs_ordered_extent *ordered;
1182                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1183                                  start_pos, last_pos - 1, 0, &cached_state);
1184                 ordered = btrfs_lookup_first_ordered_extent(inode,
1185                                                             last_pos - 1);
1186                 if (ordered &&
1187                     ordered->file_offset + ordered->len > start_pos &&
1188                     ordered->file_offset < last_pos) {
1189                         btrfs_put_ordered_extent(ordered);
1190                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1191                                              start_pos, last_pos - 1,
1192                                              &cached_state, GFP_NOFS);
1193                         for (i = 0; i < num_pages; i++) {
1194                                 unlock_page(pages[i]);
1195                                 page_cache_release(pages[i]);
1196                         }
1197                         btrfs_wait_ordered_range(inode, start_pos,
1198                                                  last_pos - start_pos);
1199                         goto again;
1200                 }
1201                 if (ordered)
1202                         btrfs_put_ordered_extent(ordered);
1203
1204                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1205                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1206                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1207                                   0, 0, &cached_state, GFP_NOFS);
1208                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1209                                      start_pos, last_pos - 1, &cached_state,
1210                                      GFP_NOFS);
1211         }
1212         for (i = 0; i < num_pages; i++) {
1213                 if (clear_page_dirty_for_io(pages[i]))
1214                         account_page_redirty(pages[i]);
1215                 set_page_extent_mapped(pages[i]);
1216                 WARN_ON(!PageLocked(pages[i]));
1217         }
1218         return 0;
1219 fail:
1220         while (faili >= 0) {
1221                 unlock_page(pages[faili]);
1222                 page_cache_release(pages[faili]);
1223                 faili--;
1224         }
1225         return err;
1226
1227 }
1228
1229 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1230                                                struct iov_iter *i,
1231                                                loff_t pos)
1232 {
1233         struct inode *inode = fdentry(file)->d_inode;
1234         struct btrfs_root *root = BTRFS_I(inode)->root;
1235         struct page **pages = NULL;
1236         unsigned long first_index;
1237         size_t num_written = 0;
1238         int nrptrs;
1239         int ret = 0;
1240         bool force_page_uptodate = false;
1241
1242         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1243                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1244                      (sizeof(struct page *)));
1245         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1246         nrptrs = max(nrptrs, 8);
1247         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1248         if (!pages)
1249                 return -ENOMEM;
1250
1251         first_index = pos >> PAGE_CACHE_SHIFT;
1252
1253         while (iov_iter_count(i) > 0) {
1254                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1255                 size_t write_bytes = min(iov_iter_count(i),
1256                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1257                                          offset);
1258                 size_t num_pages = (write_bytes + offset +
1259                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1260                 size_t dirty_pages;
1261                 size_t copied;
1262
1263                 WARN_ON(num_pages > nrptrs);
1264
1265                 /*
1266                  * Fault pages before locking them in prepare_pages
1267                  * to avoid recursive lock
1268                  */
1269                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1270                         ret = -EFAULT;
1271                         break;
1272                 }
1273
1274                 ret = btrfs_delalloc_reserve_space(inode,
1275                                         num_pages << PAGE_CACHE_SHIFT);
1276                 if (ret)
1277                         break;
1278
1279                 /*
1280                  * This is going to setup the pages array with the number of
1281                  * pages we want, so we don't really need to worry about the
1282                  * contents of pages from loop to loop
1283                  */
1284                 ret = prepare_pages(root, file, pages, num_pages,
1285                                     pos, first_index, write_bytes,
1286                                     force_page_uptodate);
1287                 if (ret) {
1288                         btrfs_delalloc_release_space(inode,
1289                                         num_pages << PAGE_CACHE_SHIFT);
1290                         break;
1291                 }
1292
1293                 copied = btrfs_copy_from_user(pos, num_pages,
1294                                            write_bytes, pages, i);
1295
1296                 /*
1297                  * if we have trouble faulting in the pages, fall
1298                  * back to one page at a time
1299                  */
1300                 if (copied < write_bytes)
1301                         nrptrs = 1;
1302
1303                 if (copied == 0) {
1304                         force_page_uptodate = true;
1305                         dirty_pages = 0;
1306                 } else {
1307                         force_page_uptodate = false;
1308                         dirty_pages = (copied + offset +
1309                                        PAGE_CACHE_SIZE - 1) >>
1310                                        PAGE_CACHE_SHIFT;
1311                 }
1312
1313                 /*
1314                  * If we had a short copy we need to release the excess delaloc
1315                  * bytes we reserved.  We need to increment outstanding_extents
1316                  * because btrfs_delalloc_release_space will decrement it, but
1317                  * we still have an outstanding extent for the chunk we actually
1318                  * managed to copy.
1319                  */
1320                 if (num_pages > dirty_pages) {
1321                         if (copied > 0) {
1322                                 spin_lock(&BTRFS_I(inode)->lock);
1323                                 BTRFS_I(inode)->outstanding_extents++;
1324                                 spin_unlock(&BTRFS_I(inode)->lock);
1325                         }
1326                         btrfs_delalloc_release_space(inode,
1327                                         (num_pages - dirty_pages) <<
1328                                         PAGE_CACHE_SHIFT);
1329                 }
1330
1331                 if (copied > 0) {
1332                         ret = btrfs_dirty_pages(root, inode, pages,
1333                                                 dirty_pages, pos, copied,
1334                                                 NULL);
1335                         if (ret) {
1336                                 btrfs_delalloc_release_space(inode,
1337                                         dirty_pages << PAGE_CACHE_SHIFT);
1338                                 btrfs_drop_pages(pages, num_pages);
1339                                 break;
1340                         }
1341                 }
1342
1343                 btrfs_drop_pages(pages, num_pages);
1344
1345                 cond_resched();
1346
1347                 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1348                                                    dirty_pages);
1349                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1350                         btrfs_btree_balance_dirty(root, 1);
1351
1352                 pos += copied;
1353                 num_written += copied;
1354         }
1355
1356         kfree(pages);
1357
1358         return num_written ? num_written : ret;
1359 }
1360
1361 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1362                                     const struct iovec *iov,
1363                                     unsigned long nr_segs, loff_t pos,
1364                                     loff_t *ppos, size_t count, size_t ocount)
1365 {
1366         struct file *file = iocb->ki_filp;
1367         struct iov_iter i;
1368         ssize_t written;
1369         ssize_t written_buffered;
1370         loff_t endbyte;
1371         int err;
1372
1373         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1374                                             count, ocount);
1375
1376         if (written < 0 || written == count)
1377                 return written;
1378
1379         pos += written;
1380         count -= written;
1381         iov_iter_init(&i, iov, nr_segs, count, written);
1382         written_buffered = __btrfs_buffered_write(file, &i, pos);
1383         if (written_buffered < 0) {
1384                 err = written_buffered;
1385                 goto out;
1386         }
1387         endbyte = pos + written_buffered - 1;
1388         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1389         if (err)
1390                 goto out;
1391         written += written_buffered;
1392         *ppos = pos + written_buffered;
1393         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1394                                  endbyte >> PAGE_CACHE_SHIFT);
1395 out:
1396         return written ? written : err;
1397 }
1398
1399 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1400                                     const struct iovec *iov,
1401                                     unsigned long nr_segs, loff_t pos)
1402 {
1403         struct file *file = iocb->ki_filp;
1404         struct inode *inode = fdentry(file)->d_inode;
1405         struct btrfs_root *root = BTRFS_I(inode)->root;
1406         loff_t *ppos = &iocb->ki_pos;
1407         u64 start_pos;
1408         ssize_t num_written = 0;
1409         ssize_t err = 0;
1410         size_t count, ocount;
1411
1412         sb_start_write(inode->i_sb);
1413
1414         mutex_lock(&inode->i_mutex);
1415
1416         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1417         if (err) {
1418                 mutex_unlock(&inode->i_mutex);
1419                 goto out;
1420         }
1421         count = ocount;
1422
1423         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1424         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1425         if (err) {
1426                 mutex_unlock(&inode->i_mutex);
1427                 goto out;
1428         }
1429
1430         if (count == 0) {
1431                 mutex_unlock(&inode->i_mutex);
1432                 goto out;
1433         }
1434
1435         err = file_remove_suid(file);
1436         if (err) {
1437                 mutex_unlock(&inode->i_mutex);
1438                 goto out;
1439         }
1440
1441         /*
1442          * If BTRFS flips readonly due to some impossible error
1443          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1444          * although we have opened a file as writable, we have
1445          * to stop this write operation to ensure FS consistency.
1446          */
1447         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1448                 mutex_unlock(&inode->i_mutex);
1449                 err = -EROFS;
1450                 goto out;
1451         }
1452
1453         err = file_update_time(file);
1454         if (err) {
1455                 mutex_unlock(&inode->i_mutex);
1456                 goto out;
1457         }
1458
1459         start_pos = round_down(pos, root->sectorsize);
1460         if (start_pos > i_size_read(inode)) {
1461                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1462                 if (err) {
1463                         mutex_unlock(&inode->i_mutex);
1464                         goto out;
1465                 }
1466         }
1467
1468         if (unlikely(file->f_flags & O_DIRECT)) {
1469                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1470                                                    pos, ppos, count, ocount);
1471         } else {
1472                 struct iov_iter i;
1473
1474                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1475
1476                 num_written = __btrfs_buffered_write(file, &i, pos);
1477                 if (num_written > 0)
1478                         *ppos = pos + num_written;
1479         }
1480
1481         mutex_unlock(&inode->i_mutex);
1482
1483         /*
1484          * we want to make sure fsync finds this change
1485          * but we haven't joined a transaction running right now.
1486          *
1487          * Later on, someone is sure to update the inode and get the
1488          * real transid recorded.
1489          *
1490          * We set last_trans now to the fs_info generation + 1,
1491          * this will either be one more than the running transaction
1492          * or the generation used for the next transaction if there isn't
1493          * one running right now.
1494          */
1495         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1496         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1497                 err = generic_write_sync(file, pos, num_written);
1498                 if (err < 0 && num_written > 0)
1499                         num_written = err;
1500         }
1501 out:
1502         sb_end_write(inode->i_sb);
1503         current->backing_dev_info = NULL;
1504         return num_written ? num_written : err;
1505 }
1506
1507 int btrfs_release_file(struct inode *inode, struct file *filp)
1508 {
1509         /*
1510          * ordered_data_close is set by settattr when we are about to truncate
1511          * a file from a non-zero size to a zero size.  This tries to
1512          * flush down new bytes that may have been written if the
1513          * application were using truncate to replace a file in place.
1514          */
1515         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1516                                &BTRFS_I(inode)->runtime_flags)) {
1517                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1518                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1519                         filemap_flush(inode->i_mapping);
1520         }
1521         if (filp->private_data)
1522                 btrfs_ioctl_trans_end(filp);
1523         return 0;
1524 }
1525
1526 /*
1527  * fsync call for both files and directories.  This logs the inode into
1528  * the tree log instead of forcing full commits whenever possible.
1529  *
1530  * It needs to call filemap_fdatawait so that all ordered extent updates are
1531  * in the metadata btree are up to date for copying to the log.
1532  *
1533  * It drops the inode mutex before doing the tree log commit.  This is an
1534  * important optimization for directories because holding the mutex prevents
1535  * new operations on the dir while we write to disk.
1536  */
1537 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1538 {
1539         struct dentry *dentry = file->f_path.dentry;
1540         struct inode *inode = dentry->d_inode;
1541         struct btrfs_root *root = BTRFS_I(inode)->root;
1542         int ret = 0;
1543         struct btrfs_trans_handle *trans;
1544
1545         trace_btrfs_sync_file(file, datasync);
1546
1547         /*
1548          * We write the dirty pages in the range and wait until they complete
1549          * out of the ->i_mutex. If so, we can flush the dirty pages by
1550          * multi-task, and make the performance up.
1551          */
1552         ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1553         if (ret)
1554                 return ret;
1555
1556         mutex_lock(&inode->i_mutex);
1557
1558         /*
1559          * We flush the dirty pages again to avoid some dirty pages in the
1560          * range being left.
1561          */
1562         atomic_inc(&root->log_batch);
1563         btrfs_wait_ordered_range(inode, start, end);
1564         atomic_inc(&root->log_batch);
1565
1566         /*
1567          * check the transaction that last modified this inode
1568          * and see if its already been committed
1569          */
1570         if (!BTRFS_I(inode)->last_trans) {
1571                 mutex_unlock(&inode->i_mutex);
1572                 goto out;
1573         }
1574
1575         /*
1576          * if the last transaction that changed this file was before
1577          * the current transaction, we can bail out now without any
1578          * syncing
1579          */
1580         smp_mb();
1581         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1582             BTRFS_I(inode)->last_trans <=
1583             root->fs_info->last_trans_committed) {
1584                 BTRFS_I(inode)->last_trans = 0;
1585
1586                 /*
1587                  * We'v had everything committed since the last time we were
1588                  * modified so clear this flag in case it was set for whatever
1589                  * reason, it's no longer relevant.
1590                  */
1591                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1592                           &BTRFS_I(inode)->runtime_flags);
1593                 mutex_unlock(&inode->i_mutex);
1594                 goto out;
1595         }
1596
1597         /*
1598          * ok we haven't committed the transaction yet, lets do a commit
1599          */
1600         if (file->private_data)
1601                 btrfs_ioctl_trans_end(file);
1602
1603         trans = btrfs_start_transaction(root, 0);
1604         if (IS_ERR(trans)) {
1605                 ret = PTR_ERR(trans);
1606                 mutex_unlock(&inode->i_mutex);
1607                 goto out;
1608         }
1609
1610         ret = btrfs_log_dentry_safe(trans, root, dentry);
1611         if (ret < 0) {
1612                 mutex_unlock(&inode->i_mutex);
1613                 goto out;
1614         }
1615
1616         /* we've logged all the items and now have a consistent
1617          * version of the file in the log.  It is possible that
1618          * someone will come in and modify the file, but that's
1619          * fine because the log is consistent on disk, and we
1620          * have references to all of the file's extents
1621          *
1622          * It is possible that someone will come in and log the
1623          * file again, but that will end up using the synchronization
1624          * inside btrfs_sync_log to keep things safe.
1625          */
1626         mutex_unlock(&inode->i_mutex);
1627
1628         if (ret != BTRFS_NO_LOG_SYNC) {
1629                 if (ret > 0) {
1630                         ret = btrfs_commit_transaction(trans, root);
1631                 } else {
1632                         ret = btrfs_sync_log(trans, root);
1633                         if (ret == 0)
1634                                 ret = btrfs_end_transaction(trans, root);
1635                         else
1636                                 ret = btrfs_commit_transaction(trans, root);
1637                 }
1638         } else {
1639                 ret = btrfs_end_transaction(trans, root);
1640         }
1641 out:
1642         return ret > 0 ? -EIO : ret;
1643 }
1644
1645 static const struct vm_operations_struct btrfs_file_vm_ops = {
1646         .fault          = filemap_fault,
1647         .page_mkwrite   = btrfs_page_mkwrite,
1648 };
1649
1650 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1651 {
1652         struct address_space *mapping = filp->f_mapping;
1653
1654         if (!mapping->a_ops->readpage)
1655                 return -ENOEXEC;
1656
1657         file_accessed(filp);
1658         vma->vm_ops = &btrfs_file_vm_ops;
1659         vma->vm_flags |= VM_CAN_NONLINEAR;
1660
1661         return 0;
1662 }
1663
1664 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1665                           int slot, u64 start, u64 end)
1666 {
1667         struct btrfs_file_extent_item *fi;
1668         struct btrfs_key key;
1669
1670         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1671                 return 0;
1672
1673         btrfs_item_key_to_cpu(leaf, &key, slot);
1674         if (key.objectid != btrfs_ino(inode) ||
1675             key.type != BTRFS_EXTENT_DATA_KEY)
1676                 return 0;
1677
1678         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1679
1680         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1681                 return 0;
1682
1683         if (btrfs_file_extent_disk_bytenr(leaf, fi))
1684                 return 0;
1685
1686         if (key.offset == end)
1687                 return 1;
1688         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1689                 return 1;
1690         return 0;
1691 }
1692
1693 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1694                       struct btrfs_path *path, u64 offset, u64 end)
1695 {
1696         struct btrfs_root *root = BTRFS_I(inode)->root;
1697         struct extent_buffer *leaf;
1698         struct btrfs_file_extent_item *fi;
1699         struct extent_map *hole_em;
1700         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1701         struct btrfs_key key;
1702         int ret;
1703
1704         key.objectid = btrfs_ino(inode);
1705         key.type = BTRFS_EXTENT_DATA_KEY;
1706         key.offset = offset;
1707
1708
1709         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1710         if (ret < 0)
1711                 return ret;
1712         BUG_ON(!ret);
1713
1714         leaf = path->nodes[0];
1715         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1716                 u64 num_bytes;
1717
1718                 path->slots[0]--;
1719                 fi = btrfs_item_ptr(leaf, path->slots[0],
1720                                     struct btrfs_file_extent_item);
1721                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1722                         end - offset;
1723                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1724                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1725                 btrfs_set_file_extent_offset(leaf, fi, 0);
1726                 btrfs_mark_buffer_dirty(leaf);
1727                 goto out;
1728         }
1729
1730         if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1731                 u64 num_bytes;
1732
1733                 path->slots[0]++;
1734                 key.offset = offset;
1735                 btrfs_set_item_key_safe(trans, root, path, &key);
1736                 fi = btrfs_item_ptr(leaf, path->slots[0],
1737                                     struct btrfs_file_extent_item);
1738                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1739                         offset;
1740                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1741                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1742                 btrfs_set_file_extent_offset(leaf, fi, 0);
1743                 btrfs_mark_buffer_dirty(leaf);
1744                 goto out;
1745         }
1746         btrfs_release_path(path);
1747
1748         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1749                                        0, 0, end - offset, 0, end - offset,
1750                                        0, 0, 0);
1751         if (ret)
1752                 return ret;
1753
1754 out:
1755         btrfs_release_path(path);
1756
1757         hole_em = alloc_extent_map();
1758         if (!hole_em) {
1759                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1760                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1761                         &BTRFS_I(inode)->runtime_flags);
1762         } else {
1763                 hole_em->start = offset;
1764                 hole_em->len = end - offset;
1765                 hole_em->orig_start = offset;
1766
1767                 hole_em->block_start = EXTENT_MAP_HOLE;
1768                 hole_em->block_len = 0;
1769                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1770                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1771                 hole_em->generation = trans->transid;
1772
1773                 do {
1774                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1775                         write_lock(&em_tree->lock);
1776                         ret = add_extent_mapping(em_tree, hole_em);
1777                         if (!ret)
1778                                 list_move(&hole_em->list,
1779                                           &em_tree->modified_extents);
1780                         write_unlock(&em_tree->lock);
1781                 } while (ret == -EEXIST);
1782                 free_extent_map(hole_em);
1783                 if (ret)
1784                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1785                                 &BTRFS_I(inode)->runtime_flags);
1786         }
1787
1788         return 0;
1789 }
1790
1791 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1792 {
1793         struct btrfs_root *root = BTRFS_I(inode)->root;
1794         struct extent_state *cached_state = NULL;
1795         struct btrfs_path *path;
1796         struct btrfs_block_rsv *rsv;
1797         struct btrfs_trans_handle *trans;
1798         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1799         u64 lockstart = (offset + mask) & ~mask;
1800         u64 lockend = ((offset + len) & ~mask) - 1;
1801         u64 cur_offset = lockstart;
1802         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1803         u64 drop_end;
1804         unsigned long nr;
1805         int ret = 0;
1806         int err = 0;
1807         bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
1808                 ((offset + len) >> PAGE_CACHE_SHIFT);
1809
1810         btrfs_wait_ordered_range(inode, offset, len);
1811
1812         mutex_lock(&inode->i_mutex);
1813         if (offset >= inode->i_size) {
1814                 mutex_unlock(&inode->i_mutex);
1815                 return 0;
1816         }
1817
1818         /*
1819          * Only do this if we are in the same page and we aren't doing the
1820          * entire page.
1821          */
1822         if (same_page && len < PAGE_CACHE_SIZE) {
1823                 ret = btrfs_truncate_page(inode, offset, len, 0);
1824                 mutex_unlock(&inode->i_mutex);
1825                 return ret;
1826         }
1827
1828         /* zero back part of the first page */
1829         ret = btrfs_truncate_page(inode, offset, 0, 0);
1830         if (ret) {
1831                 mutex_unlock(&inode->i_mutex);
1832                 return ret;
1833         }
1834
1835         /* zero the front end of the last page */
1836         ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1837         if (ret) {
1838                 mutex_unlock(&inode->i_mutex);
1839                 return ret;
1840         }
1841
1842         if (lockend < lockstart) {
1843                 mutex_unlock(&inode->i_mutex);
1844                 return 0;
1845         }
1846
1847         while (1) {
1848                 struct btrfs_ordered_extent *ordered;
1849
1850                 truncate_pagecache_range(inode, lockstart, lockend);
1851
1852                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1853                                  0, &cached_state);
1854                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1855
1856                 /*
1857                  * We need to make sure we have no ordered extents in this range
1858                  * and nobody raced in and read a page in this range, if we did
1859                  * we need to try again.
1860                  */
1861                 if ((!ordered ||
1862                     (ordered->file_offset + ordered->len < lockstart ||
1863                      ordered->file_offset > lockend)) &&
1864                      !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1865                                      lockend, EXTENT_UPTODATE, 0,
1866                                      cached_state)) {
1867                         if (ordered)
1868                                 btrfs_put_ordered_extent(ordered);
1869                         break;
1870                 }
1871                 if (ordered)
1872                         btrfs_put_ordered_extent(ordered);
1873                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1874                                      lockend, &cached_state, GFP_NOFS);
1875                 btrfs_wait_ordered_range(inode, lockstart,
1876                                          lockend - lockstart + 1);
1877         }
1878
1879         path = btrfs_alloc_path();
1880         if (!path) {
1881                 ret = -ENOMEM;
1882                 goto out;
1883         }
1884
1885         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
1886         if (!rsv) {
1887                 ret = -ENOMEM;
1888                 goto out_free;
1889         }
1890         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1891         rsv->failfast = 1;
1892
1893         /*
1894          * 1 - update the inode
1895          * 1 - removing the extents in the range
1896          * 1 - adding the hole extent
1897          */
1898         trans = btrfs_start_transaction(root, 3);
1899         if (IS_ERR(trans)) {
1900                 err = PTR_ERR(trans);
1901                 goto out_free;
1902         }
1903
1904         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
1905                                       min_size);
1906         BUG_ON(ret);
1907         trans->block_rsv = rsv;
1908
1909         while (cur_offset < lockend) {
1910                 ret = __btrfs_drop_extents(trans, root, inode, path,
1911                                            cur_offset, lockend + 1,
1912                                            &drop_end, 1);
1913                 if (ret != -ENOSPC)
1914                         break;
1915
1916                 trans->block_rsv = &root->fs_info->trans_block_rsv;
1917
1918                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1919                 if (ret) {
1920                         err = ret;
1921                         break;
1922                 }
1923
1924                 cur_offset = drop_end;
1925
1926                 ret = btrfs_update_inode(trans, root, inode);
1927                 if (ret) {
1928                         err = ret;
1929                         break;
1930                 }
1931
1932                 nr = trans->blocks_used;
1933                 btrfs_end_transaction(trans, root);
1934                 btrfs_btree_balance_dirty(root, nr);
1935
1936                 trans = btrfs_start_transaction(root, 3);
1937                 if (IS_ERR(trans)) {
1938                         ret = PTR_ERR(trans);
1939                         trans = NULL;
1940                         break;
1941                 }
1942
1943                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
1944                                               rsv, min_size);
1945                 BUG_ON(ret);    /* shouldn't happen */
1946                 trans->block_rsv = rsv;
1947         }
1948
1949         if (ret) {
1950                 err = ret;
1951                 goto out_trans;
1952         }
1953
1954         trans->block_rsv = &root->fs_info->trans_block_rsv;
1955         ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1956         if (ret) {
1957                 err = ret;
1958                 goto out_trans;
1959         }
1960
1961 out_trans:
1962         if (!trans)
1963                 goto out_free;
1964
1965         trans->block_rsv = &root->fs_info->trans_block_rsv;
1966         ret = btrfs_update_inode(trans, root, inode);
1967         nr = trans->blocks_used;
1968         btrfs_end_transaction(trans, root);
1969         btrfs_btree_balance_dirty(root, nr);
1970 out_free:
1971         btrfs_free_path(path);
1972         btrfs_free_block_rsv(root, rsv);
1973 out:
1974         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1975                              &cached_state, GFP_NOFS);
1976         mutex_unlock(&inode->i_mutex);
1977         if (ret && !err)
1978                 err = ret;
1979         return err;
1980 }
1981
1982 static long btrfs_fallocate(struct file *file, int mode,
1983                             loff_t offset, loff_t len)
1984 {
1985         struct inode *inode = file->f_path.dentry->d_inode;
1986         struct extent_state *cached_state = NULL;
1987         u64 cur_offset;
1988         u64 last_byte;
1989         u64 alloc_start;
1990         u64 alloc_end;
1991         u64 alloc_hint = 0;
1992         u64 locked_end;
1993         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1994         struct extent_map *em;
1995         int ret;
1996
1997         alloc_start = offset & ~mask;
1998         alloc_end =  (offset + len + mask) & ~mask;
1999
2000         /* Make sure we aren't being give some crap mode */
2001         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2002                 return -EOPNOTSUPP;
2003
2004         if (mode & FALLOC_FL_PUNCH_HOLE)
2005                 return btrfs_punch_hole(inode, offset, len);
2006
2007         /*
2008          * Make sure we have enough space before we do the
2009          * allocation.
2010          */
2011         ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
2012         if (ret)
2013                 return ret;
2014
2015         /*
2016          * wait for ordered IO before we have any locks.  We'll loop again
2017          * below with the locks held.
2018          */
2019         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2020
2021         mutex_lock(&inode->i_mutex);
2022         ret = inode_newsize_ok(inode, alloc_end);
2023         if (ret)
2024                 goto out;
2025
2026         if (alloc_start > inode->i_size) {
2027                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2028                                         alloc_start);
2029                 if (ret)
2030                         goto out;
2031         }
2032
2033         locked_end = alloc_end - 1;
2034         while (1) {
2035                 struct btrfs_ordered_extent *ordered;
2036
2037                 /* the extent lock is ordered inside the running
2038                  * transaction
2039                  */
2040                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2041                                  locked_end, 0, &cached_state);
2042                 ordered = btrfs_lookup_first_ordered_extent(inode,
2043                                                             alloc_end - 1);
2044                 if (ordered &&
2045                     ordered->file_offset + ordered->len > alloc_start &&
2046                     ordered->file_offset < alloc_end) {
2047                         btrfs_put_ordered_extent(ordered);
2048                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2049                                              alloc_start, locked_end,
2050                                              &cached_state, GFP_NOFS);
2051                         /*
2052                          * we can't wait on the range with the transaction
2053                          * running or with the extent lock held
2054                          */
2055                         btrfs_wait_ordered_range(inode, alloc_start,
2056                                                  alloc_end - alloc_start);
2057                 } else {
2058                         if (ordered)
2059                                 btrfs_put_ordered_extent(ordered);
2060                         break;
2061                 }
2062         }
2063
2064         cur_offset = alloc_start;
2065         while (1) {
2066                 u64 actual_end;
2067
2068                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2069                                       alloc_end - cur_offset, 0);
2070                 if (IS_ERR_OR_NULL(em)) {
2071                         if (!em)
2072                                 ret = -ENOMEM;
2073                         else
2074                                 ret = PTR_ERR(em);
2075                         break;
2076                 }
2077                 last_byte = min(extent_map_end(em), alloc_end);
2078                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2079                 last_byte = (last_byte + mask) & ~mask;
2080
2081                 if (em->block_start == EXTENT_MAP_HOLE ||
2082                     (cur_offset >= inode->i_size &&
2083                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2084                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2085                                                         last_byte - cur_offset,
2086                                                         1 << inode->i_blkbits,
2087                                                         offset + len,
2088                                                         &alloc_hint);
2089
2090                         if (ret < 0) {
2091                                 free_extent_map(em);
2092                                 break;
2093                         }
2094                 } else if (actual_end > inode->i_size &&
2095                            !(mode & FALLOC_FL_KEEP_SIZE)) {
2096                         /*
2097                          * We didn't need to allocate any more space, but we
2098                          * still extended the size of the file so we need to
2099                          * update i_size.
2100                          */
2101                         inode->i_ctime = CURRENT_TIME;
2102                         i_size_write(inode, actual_end);
2103                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2104                 }
2105                 free_extent_map(em);
2106
2107                 cur_offset = last_byte;
2108                 if (cur_offset >= alloc_end) {
2109                         ret = 0;
2110                         break;
2111                 }
2112         }
2113         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2114                              &cached_state, GFP_NOFS);
2115 out:
2116         mutex_unlock(&inode->i_mutex);
2117         /* Let go of our reservation. */
2118         btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
2119         return ret;
2120 }
2121
2122 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
2123 {
2124         struct btrfs_root *root = BTRFS_I(inode)->root;
2125         struct extent_map *em;
2126         struct extent_state *cached_state = NULL;
2127         u64 lockstart = *offset;
2128         u64 lockend = i_size_read(inode);
2129         u64 start = *offset;
2130         u64 orig_start = *offset;
2131         u64 len = i_size_read(inode);
2132         u64 last_end = 0;
2133         int ret = 0;
2134
2135         lockend = max_t(u64, root->sectorsize, lockend);
2136         if (lockend <= lockstart)
2137                 lockend = lockstart + root->sectorsize;
2138
2139         len = lockend - lockstart + 1;
2140
2141         len = max_t(u64, len, root->sectorsize);
2142         if (inode->i_size == 0)
2143                 return -ENXIO;
2144
2145         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2146                          &cached_state);
2147
2148         /*
2149          * Delalloc is such a pain.  If we have a hole and we have pending
2150          * delalloc for a portion of the hole we will get back a hole that
2151          * exists for the entire range since it hasn't been actually written
2152          * yet.  So to take care of this case we need to look for an extent just
2153          * before the position we want in case there is outstanding delalloc
2154          * going on here.
2155          */
2156         if (origin == SEEK_HOLE && start != 0) {
2157                 if (start <= root->sectorsize)
2158                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2159                                                      root->sectorsize, 0);
2160                 else
2161                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
2162                                                      start - root->sectorsize,
2163                                                      root->sectorsize, 0);
2164                 if (IS_ERR(em)) {
2165                         ret = PTR_ERR(em);
2166                         goto out;
2167                 }
2168                 last_end = em->start + em->len;
2169                 if (em->block_start == EXTENT_MAP_DELALLOC)
2170                         last_end = min_t(u64, last_end, inode->i_size);
2171                 free_extent_map(em);
2172         }
2173
2174         while (1) {
2175                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2176                 if (IS_ERR(em)) {
2177                         ret = PTR_ERR(em);
2178                         break;
2179                 }
2180
2181                 if (em->block_start == EXTENT_MAP_HOLE) {
2182                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2183                                 if (last_end <= orig_start) {
2184                                         free_extent_map(em);
2185                                         ret = -ENXIO;
2186                                         break;
2187                                 }
2188                         }
2189
2190                         if (origin == SEEK_HOLE) {
2191                                 *offset = start;
2192                                 free_extent_map(em);
2193                                 break;
2194                         }
2195                 } else {
2196                         if (origin == SEEK_DATA) {
2197                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
2198                                         if (start >= inode->i_size) {
2199                                                 free_extent_map(em);
2200                                                 ret = -ENXIO;
2201                                                 break;
2202                                         }
2203                                 }
2204
2205                                 *offset = start;
2206                                 free_extent_map(em);
2207                                 break;
2208                         }
2209                 }
2210
2211                 start = em->start + em->len;
2212                 last_end = em->start + em->len;
2213
2214                 if (em->block_start == EXTENT_MAP_DELALLOC)
2215                         last_end = min_t(u64, last_end, inode->i_size);
2216
2217                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2218                         free_extent_map(em);
2219                         ret = -ENXIO;
2220                         break;
2221                 }
2222                 free_extent_map(em);
2223                 cond_resched();
2224         }
2225         if (!ret)
2226                 *offset = min(*offset, inode->i_size);
2227 out:
2228         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2229                              &cached_state, GFP_NOFS);
2230         return ret;
2231 }
2232
2233 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
2234 {
2235         struct inode *inode = file->f_mapping->host;
2236         int ret;
2237
2238         mutex_lock(&inode->i_mutex);
2239         switch (origin) {
2240         case SEEK_END:
2241         case SEEK_CUR:
2242                 offset = generic_file_llseek(file, offset, origin);
2243                 goto out;
2244         case SEEK_DATA:
2245         case SEEK_HOLE:
2246                 if (offset >= i_size_read(inode)) {
2247                         mutex_unlock(&inode->i_mutex);
2248                         return -ENXIO;
2249                 }
2250
2251                 ret = find_desired_extent(inode, &offset, origin);
2252                 if (ret) {
2253                         mutex_unlock(&inode->i_mutex);
2254                         return ret;
2255                 }
2256         }
2257
2258         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2259                 offset = -EINVAL;
2260                 goto out;
2261         }
2262         if (offset > inode->i_sb->s_maxbytes) {
2263                 offset = -EINVAL;
2264                 goto out;
2265         }
2266
2267         /* Special lock needed here? */
2268         if (offset != file->f_pos) {
2269                 file->f_pos = offset;
2270                 file->f_version = 0;
2271         }
2272 out:
2273         mutex_unlock(&inode->i_mutex);
2274         return offset;
2275 }
2276
2277 const struct file_operations btrfs_file_operations = {
2278         .llseek         = btrfs_file_llseek,
2279         .read           = do_sync_read,
2280         .write          = do_sync_write,
2281         .aio_read       = generic_file_aio_read,
2282         .splice_read    = generic_file_splice_read,
2283         .aio_write      = btrfs_file_aio_write,
2284         .mmap           = btrfs_file_mmap,
2285         .open           = generic_file_open,
2286         .release        = btrfs_release_file,
2287         .fsync          = btrfs_sync_file,
2288         .fallocate      = btrfs_fallocate,
2289         .unlocked_ioctl = btrfs_ioctl,
2290 #ifdef CONFIG_COMPAT
2291         .compat_ioctl   = btrfs_ioctl,
2292 #endif
2293 };