]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/file.c
Merge remote-tracking branch 'xen-two/linux-next'
[karo-tx-linux.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43
44 /*
45  * when auto defrag is enabled we
46  * queue up these defrag structs to remember which
47  * inodes need defragging passes
48  */
49 struct inode_defrag {
50         struct rb_node rb_node;
51         /* objectid */
52         u64 ino;
53         /*
54          * transid where the defrag was added, we search for
55          * extents newer than this
56          */
57         u64 transid;
58
59         /* root objectid */
60         u64 root;
61
62         /* last offset we were able to defrag */
63         u64 last_offset;
64
65         /* if we've wrapped around back to zero once already */
66         int cycled;
67 };
68
69 static int __compare_inode_defrag(struct inode_defrag *defrag1,
70                                   struct inode_defrag *defrag2)
71 {
72         if (defrag1->root > defrag2->root)
73                 return 1;
74         else if (defrag1->root < defrag2->root)
75                 return -1;
76         else if (defrag1->ino > defrag2->ino)
77                 return 1;
78         else if (defrag1->ino < defrag2->ino)
79                 return -1;
80         else
81                 return 0;
82 }
83
84 /* pop a record for an inode into the defrag tree.  The lock
85  * must be held already
86  *
87  * If you're inserting a record for an older transid than an
88  * existing record, the transid already in the tree is lowered
89  *
90  * If an existing record is found the defrag item you
91  * pass in is freed
92  */
93 static void __btrfs_add_inode_defrag(struct inode *inode,
94                                     struct inode_defrag *defrag)
95 {
96         struct btrfs_root *root = BTRFS_I(inode)->root;
97         struct inode_defrag *entry;
98         struct rb_node **p;
99         struct rb_node *parent = NULL;
100         int ret;
101
102         p = &root->fs_info->defrag_inodes.rb_node;
103         while (*p) {
104                 parent = *p;
105                 entry = rb_entry(parent, struct inode_defrag, rb_node);
106
107                 ret = __compare_inode_defrag(defrag, entry);
108                 if (ret < 0)
109                         p = &parent->rb_left;
110                 else if (ret > 0)
111                         p = &parent->rb_right;
112                 else {
113                         /* if we're reinserting an entry for
114                          * an old defrag run, make sure to
115                          * lower the transid of our existing record
116                          */
117                         if (defrag->transid < entry->transid)
118                                 entry->transid = defrag->transid;
119                         if (defrag->last_offset > entry->last_offset)
120                                 entry->last_offset = defrag->last_offset;
121                         goto exists;
122                 }
123         }
124         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
125         rb_link_node(&defrag->rb_node, parent, p);
126         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
127         return;
128
129 exists:
130         kfree(defrag);
131         return;
132
133 }
134
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140                            struct inode *inode)
141 {
142         struct btrfs_root *root = BTRFS_I(inode)->root;
143         struct inode_defrag *defrag;
144         u64 transid;
145
146         if (!btrfs_test_opt(root, AUTO_DEFRAG))
147                 return 0;
148
149         if (btrfs_fs_closing(root->fs_info))
150                 return 0;
151
152         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
153                 return 0;
154
155         if (trans)
156                 transid = trans->transid;
157         else
158                 transid = BTRFS_I(inode)->root->last_trans;
159
160         defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
161         if (!defrag)
162                 return -ENOMEM;
163
164         defrag->ino = btrfs_ino(inode);
165         defrag->transid = transid;
166         defrag->root = root->root_key.objectid;
167
168         spin_lock(&root->fs_info->defrag_inodes_lock);
169         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
170                 __btrfs_add_inode_defrag(inode, defrag);
171         else
172                 kfree(defrag);
173         spin_unlock(&root->fs_info->defrag_inodes_lock);
174         return 0;
175 }
176
177 /*
178  * must be called with the defrag_inodes lock held
179  */
180 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
181                                              u64 root, u64 ino,
182                                              struct rb_node **next)
183 {
184         struct inode_defrag *entry = NULL;
185         struct inode_defrag tmp;
186         struct rb_node *p;
187         struct rb_node *parent = NULL;
188         int ret;
189
190         tmp.ino = ino;
191         tmp.root = root;
192
193         p = info->defrag_inodes.rb_node;
194         while (p) {
195                 parent = p;
196                 entry = rb_entry(parent, struct inode_defrag, rb_node);
197
198                 ret = __compare_inode_defrag(&tmp, entry);
199                 if (ret < 0)
200                         p = parent->rb_left;
201                 else if (ret > 0)
202                         p = parent->rb_right;
203                 else
204                         return entry;
205         }
206
207         if (next) {
208                 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
209                         parent = rb_next(parent);
210                         entry = rb_entry(parent, struct inode_defrag, rb_node);
211                 }
212                 *next = parent;
213         }
214         return NULL;
215 }
216
217 /*
218  * run through the list of inodes in the FS that need
219  * defragging
220  */
221 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
222 {
223         struct inode_defrag *defrag;
224         struct btrfs_root *inode_root;
225         struct inode *inode;
226         struct rb_node *n;
227         struct btrfs_key key;
228         struct btrfs_ioctl_defrag_range_args range;
229         u64 first_ino = 0;
230         u64 root_objectid = 0;
231         int num_defrag;
232         int defrag_batch = 1024;
233
234         memset(&range, 0, sizeof(range));
235         range.len = (u64)-1;
236
237         atomic_inc(&fs_info->defrag_running);
238         spin_lock(&fs_info->defrag_inodes_lock);
239         while(1) {
240                 n = NULL;
241
242                 /* find an inode to defrag */
243                 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
244                                                  first_ino, &n);
245                 if (!defrag) {
246                         if (n) {
247                                 defrag = rb_entry(n, struct inode_defrag,
248                                                   rb_node);
249                         } else if (root_objectid || first_ino) {
250                                 root_objectid = 0;
251                                 first_ino = 0;
252                                 continue;
253                         } else {
254                                 break;
255                         }
256                 }
257
258                 /* remove it from the rbtree */
259                 first_ino = defrag->ino + 1;
260                 root_objectid = defrag->root;
261                 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
262
263                 if (btrfs_fs_closing(fs_info))
264                         goto next_free;
265
266                 spin_unlock(&fs_info->defrag_inodes_lock);
267
268                 /* get the inode */
269                 key.objectid = defrag->root;
270                 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
271                 key.offset = (u64)-1;
272                 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
273                 if (IS_ERR(inode_root))
274                         goto next;
275
276                 key.objectid = defrag->ino;
277                 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
278                 key.offset = 0;
279
280                 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
281                 if (IS_ERR(inode))
282                         goto next;
283
284                 /* do a chunk of defrag */
285                 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
286                 range.start = defrag->last_offset;
287                 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
288                                                defrag_batch);
289                 /*
290                  * if we filled the whole defrag batch, there
291                  * must be more work to do.  Queue this defrag
292                  * again
293                  */
294                 if (num_defrag == defrag_batch) {
295                         defrag->last_offset = range.start;
296                         __btrfs_add_inode_defrag(inode, defrag);
297                         /*
298                          * we don't want to kfree defrag, we added it back to
299                          * the rbtree
300                          */
301                         defrag = NULL;
302                 } else if (defrag->last_offset && !defrag->cycled) {
303                         /*
304                          * we didn't fill our defrag batch, but
305                          * we didn't start at zero.  Make sure we loop
306                          * around to the start of the file.
307                          */
308                         defrag->last_offset = 0;
309                         defrag->cycled = 1;
310                         __btrfs_add_inode_defrag(inode, defrag);
311                         defrag = NULL;
312                 }
313
314                 iput(inode);
315 next:
316                 spin_lock(&fs_info->defrag_inodes_lock);
317 next_free:
318                 kfree(defrag);
319         }
320         spin_unlock(&fs_info->defrag_inodes_lock);
321
322         atomic_dec(&fs_info->defrag_running);
323
324         /*
325          * during unmount, we use the transaction_wait queue to
326          * wait for the defragger to stop
327          */
328         wake_up(&fs_info->transaction_wait);
329         return 0;
330 }
331
332 /* simple helper to fault in pages and copy.  This should go away
333  * and be replaced with calls into generic code.
334  */
335 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
336                                          size_t write_bytes,
337                                          struct page **prepared_pages,
338                                          struct iov_iter *i)
339 {
340         size_t copied = 0;
341         size_t total_copied = 0;
342         int pg = 0;
343         int offset = pos & (PAGE_CACHE_SIZE - 1);
344
345         while (write_bytes > 0) {
346                 size_t count = min_t(size_t,
347                                      PAGE_CACHE_SIZE - offset, write_bytes);
348                 struct page *page = prepared_pages[pg];
349                 /*
350                  * Copy data from userspace to the current page
351                  *
352                  * Disable pagefault to avoid recursive lock since
353                  * the pages are already locked
354                  */
355                 pagefault_disable();
356                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
357                 pagefault_enable();
358
359                 /* Flush processor's dcache for this page */
360                 flush_dcache_page(page);
361
362                 /*
363                  * if we get a partial write, we can end up with
364                  * partially up to date pages.  These add
365                  * a lot of complexity, so make sure they don't
366                  * happen by forcing this copy to be retried.
367                  *
368                  * The rest of the btrfs_file_write code will fall
369                  * back to page at a time copies after we return 0.
370                  */
371                 if (!PageUptodate(page) && copied < count)
372                         copied = 0;
373
374                 iov_iter_advance(i, copied);
375                 write_bytes -= copied;
376                 total_copied += copied;
377
378                 /* Return to btrfs_file_aio_write to fault page */
379                 if (unlikely(copied == 0))
380                         break;
381
382                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
383                         offset += copied;
384                 } else {
385                         pg++;
386                         offset = 0;
387                 }
388         }
389         return total_copied;
390 }
391
392 /*
393  * unlocks pages after btrfs_file_write is done with them
394  */
395 void btrfs_drop_pages(struct page **pages, size_t num_pages)
396 {
397         size_t i;
398         for (i = 0; i < num_pages; i++) {
399                 /* page checked is some magic around finding pages that
400                  * have been modified without going through btrfs_set_page_dirty
401                  * clear it here
402                  */
403                 ClearPageChecked(pages[i]);
404                 unlock_page(pages[i]);
405                 mark_page_accessed(pages[i]);
406                 page_cache_release(pages[i]);
407         }
408 }
409
410 /*
411  * after copy_from_user, pages need to be dirtied and we need to make
412  * sure holes are created between the current EOF and the start of
413  * any next extents (if required).
414  *
415  * this also makes the decision about creating an inline extent vs
416  * doing real data extents, marking pages dirty and delalloc as required.
417  */
418 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
419                       struct page **pages, size_t num_pages,
420                       loff_t pos, size_t write_bytes,
421                       struct extent_state **cached)
422 {
423         int err = 0;
424         int i;
425         u64 num_bytes;
426         u64 start_pos;
427         u64 end_of_last_block;
428         u64 end_pos = pos + write_bytes;
429         loff_t isize = i_size_read(inode);
430
431         start_pos = pos & ~((u64)root->sectorsize - 1);
432         num_bytes = (write_bytes + pos - start_pos +
433                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
434
435         end_of_last_block = start_pos + num_bytes - 1;
436         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
437                                         cached);
438         if (err)
439                 return err;
440
441         for (i = 0; i < num_pages; i++) {
442                 struct page *p = pages[i];
443                 SetPageUptodate(p);
444                 ClearPageChecked(p);
445                 set_page_dirty(p);
446         }
447
448         /*
449          * we've only changed i_size in ram, and we haven't updated
450          * the disk i_size.  There is no need to log the inode
451          * at this time.
452          */
453         if (end_pos > isize)
454                 i_size_write(inode, end_pos);
455         return 0;
456 }
457
458 /*
459  * this drops all the extents in the cache that intersect the range
460  * [start, end].  Existing extents are split as required.
461  */
462 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
463                              int skip_pinned)
464 {
465         struct extent_map *em;
466         struct extent_map *split = NULL;
467         struct extent_map *split2 = NULL;
468         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
469         u64 len = end - start + 1;
470         u64 gen;
471         int ret;
472         int testend = 1;
473         unsigned long flags;
474         int compressed = 0;
475
476         WARN_ON(end < start);
477         if (end == (u64)-1) {
478                 len = (u64)-1;
479                 testend = 0;
480         }
481         while (1) {
482                 int no_splits = 0;
483
484                 if (!split)
485                         split = alloc_extent_map();
486                 if (!split2)
487                         split2 = alloc_extent_map();
488                 if (!split || !split2)
489                         no_splits = 1;
490
491                 write_lock(&em_tree->lock);
492                 em = lookup_extent_mapping(em_tree, start, len);
493                 if (!em) {
494                         write_unlock(&em_tree->lock);
495                         break;
496                 }
497                 flags = em->flags;
498                 gen = em->generation;
499                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
500                         if (testend && em->start + em->len >= start + len) {
501                                 free_extent_map(em);
502                                 write_unlock(&em_tree->lock);
503                                 break;
504                         }
505                         start = em->start + em->len;
506                         if (testend)
507                                 len = start + len - (em->start + em->len);
508                         free_extent_map(em);
509                         write_unlock(&em_tree->lock);
510                         continue;
511                 }
512                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
513                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
514                 remove_extent_mapping(em_tree, em);
515                 if (no_splits)
516                         goto next;
517
518                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
519                     em->start < start) {
520                         split->start = em->start;
521                         split->len = start - em->start;
522                         split->orig_start = em->orig_start;
523                         split->block_start = em->block_start;
524
525                         if (compressed)
526                                 split->block_len = em->block_len;
527                         else
528                                 split->block_len = split->len;
529                         split->generation = gen;
530                         split->bdev = em->bdev;
531                         split->flags = flags;
532                         split->compress_type = em->compress_type;
533                         ret = add_extent_mapping(em_tree, split);
534                         BUG_ON(ret); /* Logic error */
535                         list_move(&split->list, &em_tree->modified_extents);
536                         free_extent_map(split);
537                         split = split2;
538                         split2 = NULL;
539                 }
540                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
541                     testend && em->start + em->len > start + len) {
542                         u64 diff = start + len - em->start;
543
544                         split->start = start + len;
545                         split->len = em->start + em->len - (start + len);
546                         split->bdev = em->bdev;
547                         split->flags = flags;
548                         split->compress_type = em->compress_type;
549                         split->generation = gen;
550
551                         if (compressed) {
552                                 split->block_len = em->block_len;
553                                 split->block_start = em->block_start;
554                                 split->orig_start = em->orig_start;
555                         } else {
556                                 split->block_len = split->len;
557                                 split->block_start = em->block_start + diff;
558                                 split->orig_start = split->start;
559                         }
560
561                         ret = add_extent_mapping(em_tree, split);
562                         BUG_ON(ret); /* Logic error */
563                         list_move(&split->list, &em_tree->modified_extents);
564                         free_extent_map(split);
565                         split = NULL;
566                 }
567 next:
568                 write_unlock(&em_tree->lock);
569
570                 /* once for us */
571                 free_extent_map(em);
572                 /* once for the tree*/
573                 free_extent_map(em);
574         }
575         if (split)
576                 free_extent_map(split);
577         if (split2)
578                 free_extent_map(split2);
579 }
580
581 /*
582  * this is very complex, but the basic idea is to drop all extents
583  * in the range start - end.  hint_block is filled in with a block number
584  * that would be a good hint to the block allocator for this file.
585  *
586  * If an extent intersects the range but is not entirely inside the range
587  * it is either truncated or split.  Anything entirely inside the range
588  * is deleted from the tree.
589  */
590 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
591                          struct btrfs_root *root, struct inode *inode,
592                          struct btrfs_path *path, u64 start, u64 end,
593                          u64 *drop_end, int drop_cache)
594 {
595         struct extent_buffer *leaf;
596         struct btrfs_file_extent_item *fi;
597         struct btrfs_key key;
598         struct btrfs_key new_key;
599         u64 ino = btrfs_ino(inode);
600         u64 search_start = start;
601         u64 disk_bytenr = 0;
602         u64 num_bytes = 0;
603         u64 extent_offset = 0;
604         u64 extent_end = 0;
605         int del_nr = 0;
606         int del_slot = 0;
607         int extent_type;
608         int recow;
609         int ret;
610         int modify_tree = -1;
611         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
612         int found = 0;
613
614         if (drop_cache)
615                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
616
617         if (start >= BTRFS_I(inode)->disk_i_size)
618                 modify_tree = 0;
619
620         while (1) {
621                 recow = 0;
622                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
623                                                search_start, modify_tree);
624                 if (ret < 0)
625                         break;
626                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
627                         leaf = path->nodes[0];
628                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
629                         if (key.objectid == ino &&
630                             key.type == BTRFS_EXTENT_DATA_KEY)
631                                 path->slots[0]--;
632                 }
633                 ret = 0;
634 next_slot:
635                 leaf = path->nodes[0];
636                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
637                         BUG_ON(del_nr > 0);
638                         ret = btrfs_next_leaf(root, path);
639                         if (ret < 0)
640                                 break;
641                         if (ret > 0) {
642                                 ret = 0;
643                                 break;
644                         }
645                         leaf = path->nodes[0];
646                         recow = 1;
647                 }
648
649                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
650                 if (key.objectid > ino ||
651                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
652                         break;
653
654                 fi = btrfs_item_ptr(leaf, path->slots[0],
655                                     struct btrfs_file_extent_item);
656                 extent_type = btrfs_file_extent_type(leaf, fi);
657
658                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
659                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
660                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
661                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
662                         extent_offset = btrfs_file_extent_offset(leaf, fi);
663                         extent_end = key.offset +
664                                 btrfs_file_extent_num_bytes(leaf, fi);
665                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
666                         extent_end = key.offset +
667                                 btrfs_file_extent_inline_len(leaf, fi);
668                 } else {
669                         WARN_ON(1);
670                         extent_end = search_start;
671                 }
672
673                 if (extent_end <= search_start) {
674                         path->slots[0]++;
675                         goto next_slot;
676                 }
677
678                 found = 1;
679                 search_start = max(key.offset, start);
680                 if (recow || !modify_tree) {
681                         modify_tree = -1;
682                         btrfs_release_path(path);
683                         continue;
684                 }
685
686                 /*
687                  *     | - range to drop - |
688                  *  | -------- extent -------- |
689                  */
690                 if (start > key.offset && end < extent_end) {
691                         BUG_ON(del_nr > 0);
692                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
693
694                         memcpy(&new_key, &key, sizeof(new_key));
695                         new_key.offset = start;
696                         ret = btrfs_duplicate_item(trans, root, path,
697                                                    &new_key);
698                         if (ret == -EAGAIN) {
699                                 btrfs_release_path(path);
700                                 continue;
701                         }
702                         if (ret < 0)
703                                 break;
704
705                         leaf = path->nodes[0];
706                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
707                                             struct btrfs_file_extent_item);
708                         btrfs_set_file_extent_num_bytes(leaf, fi,
709                                                         start - key.offset);
710
711                         fi = btrfs_item_ptr(leaf, path->slots[0],
712                                             struct btrfs_file_extent_item);
713
714                         extent_offset += start - key.offset;
715                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
716                         btrfs_set_file_extent_num_bytes(leaf, fi,
717                                                         extent_end - start);
718                         btrfs_mark_buffer_dirty(leaf);
719
720                         if (update_refs && disk_bytenr > 0) {
721                                 ret = btrfs_inc_extent_ref(trans, root,
722                                                 disk_bytenr, num_bytes, 0,
723                                                 root->root_key.objectid,
724                                                 new_key.objectid,
725                                                 start - extent_offset, 0);
726                                 BUG_ON(ret); /* -ENOMEM */
727                         }
728                         key.offset = start;
729                 }
730                 /*
731                  *  | ---- range to drop ----- |
732                  *      | -------- extent -------- |
733                  */
734                 if (start <= key.offset && end < extent_end) {
735                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
736
737                         memcpy(&new_key, &key, sizeof(new_key));
738                         new_key.offset = end;
739                         btrfs_set_item_key_safe(trans, root, path, &new_key);
740
741                         extent_offset += end - key.offset;
742                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
743                         btrfs_set_file_extent_num_bytes(leaf, fi,
744                                                         extent_end - end);
745                         btrfs_mark_buffer_dirty(leaf);
746                         if (update_refs && disk_bytenr > 0)
747                                 inode_sub_bytes(inode, end - key.offset);
748                         break;
749                 }
750
751                 search_start = extent_end;
752                 /*
753                  *       | ---- range to drop ----- |
754                  *  | -------- extent -------- |
755                  */
756                 if (start > key.offset && end >= extent_end) {
757                         BUG_ON(del_nr > 0);
758                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
759
760                         btrfs_set_file_extent_num_bytes(leaf, fi,
761                                                         start - key.offset);
762                         btrfs_mark_buffer_dirty(leaf);
763                         if (update_refs && disk_bytenr > 0)
764                                 inode_sub_bytes(inode, extent_end - start);
765                         if (end == extent_end)
766                                 break;
767
768                         path->slots[0]++;
769                         goto next_slot;
770                 }
771
772                 /*
773                  *  | ---- range to drop ----- |
774                  *    | ------ extent ------ |
775                  */
776                 if (start <= key.offset && end >= extent_end) {
777                         if (del_nr == 0) {
778                                 del_slot = path->slots[0];
779                                 del_nr = 1;
780                         } else {
781                                 BUG_ON(del_slot + del_nr != path->slots[0]);
782                                 del_nr++;
783                         }
784
785                         if (update_refs &&
786                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
787                                 inode_sub_bytes(inode,
788                                                 extent_end - key.offset);
789                                 extent_end = ALIGN(extent_end,
790                                                    root->sectorsize);
791                         } else if (update_refs && disk_bytenr > 0) {
792                                 ret = btrfs_free_extent(trans, root,
793                                                 disk_bytenr, num_bytes, 0,
794                                                 root->root_key.objectid,
795                                                 key.objectid, key.offset -
796                                                 extent_offset, 0);
797                                 BUG_ON(ret); /* -ENOMEM */
798                                 inode_sub_bytes(inode,
799                                                 extent_end - key.offset);
800                         }
801
802                         if (end == extent_end)
803                                 break;
804
805                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
806                                 path->slots[0]++;
807                                 goto next_slot;
808                         }
809
810                         ret = btrfs_del_items(trans, root, path, del_slot,
811                                               del_nr);
812                         if (ret) {
813                                 btrfs_abort_transaction(trans, root, ret);
814                                 break;
815                         }
816
817                         del_nr = 0;
818                         del_slot = 0;
819
820                         btrfs_release_path(path);
821                         continue;
822                 }
823
824                 BUG_ON(1);
825         }
826
827         if (!ret && del_nr > 0) {
828                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
829                 if (ret)
830                         btrfs_abort_transaction(trans, root, ret);
831         }
832
833         if (drop_end)
834                 *drop_end = found ? min(end, extent_end) : end;
835         btrfs_release_path(path);
836         return ret;
837 }
838
839 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
840                        struct btrfs_root *root, struct inode *inode, u64 start,
841                        u64 end, int drop_cache)
842 {
843         struct btrfs_path *path;
844         int ret;
845
846         path = btrfs_alloc_path();
847         if (!path)
848                 return -ENOMEM;
849         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
850                                    drop_cache);
851         btrfs_free_path(path);
852         return ret;
853 }
854
855 static int extent_mergeable(struct extent_buffer *leaf, int slot,
856                             u64 objectid, u64 bytenr, u64 orig_offset,
857                             u64 *start, u64 *end)
858 {
859         struct btrfs_file_extent_item *fi;
860         struct btrfs_key key;
861         u64 extent_end;
862
863         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
864                 return 0;
865
866         btrfs_item_key_to_cpu(leaf, &key, slot);
867         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
868                 return 0;
869
870         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
871         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
872             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
873             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
874             btrfs_file_extent_compression(leaf, fi) ||
875             btrfs_file_extent_encryption(leaf, fi) ||
876             btrfs_file_extent_other_encoding(leaf, fi))
877                 return 0;
878
879         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
880         if ((*start && *start != key.offset) || (*end && *end != extent_end))
881                 return 0;
882
883         *start = key.offset;
884         *end = extent_end;
885         return 1;
886 }
887
888 /*
889  * Mark extent in the range start - end as written.
890  *
891  * This changes extent type from 'pre-allocated' to 'regular'. If only
892  * part of extent is marked as written, the extent will be split into
893  * two or three.
894  */
895 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
896                               struct inode *inode, u64 start, u64 end)
897 {
898         struct btrfs_root *root = BTRFS_I(inode)->root;
899         struct extent_buffer *leaf;
900         struct btrfs_path *path;
901         struct btrfs_file_extent_item *fi;
902         struct btrfs_key key;
903         struct btrfs_key new_key;
904         u64 bytenr;
905         u64 num_bytes;
906         u64 extent_end;
907         u64 orig_offset;
908         u64 other_start;
909         u64 other_end;
910         u64 split;
911         int del_nr = 0;
912         int del_slot = 0;
913         int recow;
914         int ret;
915         u64 ino = btrfs_ino(inode);
916
917         path = btrfs_alloc_path();
918         if (!path)
919                 return -ENOMEM;
920 again:
921         recow = 0;
922         split = start;
923         key.objectid = ino;
924         key.type = BTRFS_EXTENT_DATA_KEY;
925         key.offset = split;
926
927         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
928         if (ret < 0)
929                 goto out;
930         if (ret > 0 && path->slots[0] > 0)
931                 path->slots[0]--;
932
933         leaf = path->nodes[0];
934         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
935         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
936         fi = btrfs_item_ptr(leaf, path->slots[0],
937                             struct btrfs_file_extent_item);
938         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
939                BTRFS_FILE_EXTENT_PREALLOC);
940         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
941         BUG_ON(key.offset > start || extent_end < end);
942
943         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
944         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
945         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
946         memcpy(&new_key, &key, sizeof(new_key));
947
948         if (start == key.offset && end < extent_end) {
949                 other_start = 0;
950                 other_end = start;
951                 if (extent_mergeable(leaf, path->slots[0] - 1,
952                                      ino, bytenr, orig_offset,
953                                      &other_start, &other_end)) {
954                         new_key.offset = end;
955                         btrfs_set_item_key_safe(trans, root, path, &new_key);
956                         fi = btrfs_item_ptr(leaf, path->slots[0],
957                                             struct btrfs_file_extent_item);
958                         btrfs_set_file_extent_generation(leaf, fi,
959                                                          trans->transid);
960                         btrfs_set_file_extent_num_bytes(leaf, fi,
961                                                         extent_end - end);
962                         btrfs_set_file_extent_offset(leaf, fi,
963                                                      end - orig_offset);
964                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
965                                             struct btrfs_file_extent_item);
966                         btrfs_set_file_extent_generation(leaf, fi,
967                                                          trans->transid);
968                         btrfs_set_file_extent_num_bytes(leaf, fi,
969                                                         end - other_start);
970                         btrfs_mark_buffer_dirty(leaf);
971                         goto out;
972                 }
973         }
974
975         if (start > key.offset && end == extent_end) {
976                 other_start = end;
977                 other_end = 0;
978                 if (extent_mergeable(leaf, path->slots[0] + 1,
979                                      ino, bytenr, orig_offset,
980                                      &other_start, &other_end)) {
981                         fi = btrfs_item_ptr(leaf, path->slots[0],
982                                             struct btrfs_file_extent_item);
983                         btrfs_set_file_extent_num_bytes(leaf, fi,
984                                                         start - key.offset);
985                         btrfs_set_file_extent_generation(leaf, fi,
986                                                          trans->transid);
987                         path->slots[0]++;
988                         new_key.offset = start;
989                         btrfs_set_item_key_safe(trans, root, path, &new_key);
990
991                         fi = btrfs_item_ptr(leaf, path->slots[0],
992                                             struct btrfs_file_extent_item);
993                         btrfs_set_file_extent_generation(leaf, fi,
994                                                          trans->transid);
995                         btrfs_set_file_extent_num_bytes(leaf, fi,
996                                                         other_end - start);
997                         btrfs_set_file_extent_offset(leaf, fi,
998                                                      start - orig_offset);
999                         btrfs_mark_buffer_dirty(leaf);
1000                         goto out;
1001                 }
1002         }
1003
1004         while (start > key.offset || end < extent_end) {
1005                 if (key.offset == start)
1006                         split = end;
1007
1008                 new_key.offset = split;
1009                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1010                 if (ret == -EAGAIN) {
1011                         btrfs_release_path(path);
1012                         goto again;
1013                 }
1014                 if (ret < 0) {
1015                         btrfs_abort_transaction(trans, root, ret);
1016                         goto out;
1017                 }
1018
1019                 leaf = path->nodes[0];
1020                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1021                                     struct btrfs_file_extent_item);
1022                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1023                 btrfs_set_file_extent_num_bytes(leaf, fi,
1024                                                 split - key.offset);
1025
1026                 fi = btrfs_item_ptr(leaf, path->slots[0],
1027                                     struct btrfs_file_extent_item);
1028
1029                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1030                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1031                 btrfs_set_file_extent_num_bytes(leaf, fi,
1032                                                 extent_end - split);
1033                 btrfs_mark_buffer_dirty(leaf);
1034
1035                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1036                                            root->root_key.objectid,
1037                                            ino, orig_offset, 0);
1038                 BUG_ON(ret); /* -ENOMEM */
1039
1040                 if (split == start) {
1041                         key.offset = start;
1042                 } else {
1043                         BUG_ON(start != key.offset);
1044                         path->slots[0]--;
1045                         extent_end = end;
1046                 }
1047                 recow = 1;
1048         }
1049
1050         other_start = end;
1051         other_end = 0;
1052         if (extent_mergeable(leaf, path->slots[0] + 1,
1053                              ino, bytenr, orig_offset,
1054                              &other_start, &other_end)) {
1055                 if (recow) {
1056                         btrfs_release_path(path);
1057                         goto again;
1058                 }
1059                 extent_end = other_end;
1060                 del_slot = path->slots[0] + 1;
1061                 del_nr++;
1062                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1063                                         0, root->root_key.objectid,
1064                                         ino, orig_offset, 0);
1065                 BUG_ON(ret); /* -ENOMEM */
1066         }
1067         other_start = 0;
1068         other_end = start;
1069         if (extent_mergeable(leaf, path->slots[0] - 1,
1070                              ino, bytenr, orig_offset,
1071                              &other_start, &other_end)) {
1072                 if (recow) {
1073                         btrfs_release_path(path);
1074                         goto again;
1075                 }
1076                 key.offset = other_start;
1077                 del_slot = path->slots[0];
1078                 del_nr++;
1079                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1080                                         0, root->root_key.objectid,
1081                                         ino, orig_offset, 0);
1082                 BUG_ON(ret); /* -ENOMEM */
1083         }
1084         if (del_nr == 0) {
1085                 fi = btrfs_item_ptr(leaf, path->slots[0],
1086                            struct btrfs_file_extent_item);
1087                 btrfs_set_file_extent_type(leaf, fi,
1088                                            BTRFS_FILE_EXTENT_REG);
1089                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1090                 btrfs_mark_buffer_dirty(leaf);
1091         } else {
1092                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1093                            struct btrfs_file_extent_item);
1094                 btrfs_set_file_extent_type(leaf, fi,
1095                                            BTRFS_FILE_EXTENT_REG);
1096                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1097                 btrfs_set_file_extent_num_bytes(leaf, fi,
1098                                                 extent_end - key.offset);
1099                 btrfs_mark_buffer_dirty(leaf);
1100
1101                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1102                 if (ret < 0) {
1103                         btrfs_abort_transaction(trans, root, ret);
1104                         goto out;
1105                 }
1106         }
1107 out:
1108         btrfs_free_path(path);
1109         return 0;
1110 }
1111
1112 /*
1113  * on error we return an unlocked page and the error value
1114  * on success we return a locked page and 0
1115  */
1116 static int prepare_uptodate_page(struct page *page, u64 pos,
1117                                  bool force_uptodate)
1118 {
1119         int ret = 0;
1120
1121         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1122             !PageUptodate(page)) {
1123                 ret = btrfs_readpage(NULL, page);
1124                 if (ret)
1125                         return ret;
1126                 lock_page(page);
1127                 if (!PageUptodate(page)) {
1128                         unlock_page(page);
1129                         return -EIO;
1130                 }
1131         }
1132         return 0;
1133 }
1134
1135 /*
1136  * this gets pages into the page cache and locks them down, it also properly
1137  * waits for data=ordered extents to finish before allowing the pages to be
1138  * modified.
1139  */
1140 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1141                          struct page **pages, size_t num_pages,
1142                          loff_t pos, unsigned long first_index,
1143                          size_t write_bytes, bool force_uptodate)
1144 {
1145         struct extent_state *cached_state = NULL;
1146         int i;
1147         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1148         struct inode *inode = fdentry(file)->d_inode;
1149         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1150         int err = 0;
1151         int faili = 0;
1152         u64 start_pos;
1153         u64 last_pos;
1154
1155         start_pos = pos & ~((u64)root->sectorsize - 1);
1156         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1157
1158 again:
1159         for (i = 0; i < num_pages; i++) {
1160                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1161                                                mask | __GFP_WRITE);
1162                 if (!pages[i]) {
1163                         faili = i - 1;
1164                         err = -ENOMEM;
1165                         goto fail;
1166                 }
1167
1168                 if (i == 0)
1169                         err = prepare_uptodate_page(pages[i], pos,
1170                                                     force_uptodate);
1171                 if (i == num_pages - 1)
1172                         err = prepare_uptodate_page(pages[i],
1173                                                     pos + write_bytes, false);
1174                 if (err) {
1175                         page_cache_release(pages[i]);
1176                         faili = i - 1;
1177                         goto fail;
1178                 }
1179                 wait_on_page_writeback(pages[i]);
1180         }
1181         err = 0;
1182         if (start_pos < inode->i_size) {
1183                 struct btrfs_ordered_extent *ordered;
1184                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1185                                  start_pos, last_pos - 1, 0, &cached_state);
1186                 ordered = btrfs_lookup_first_ordered_extent(inode,
1187                                                             last_pos - 1);
1188                 if (ordered &&
1189                     ordered->file_offset + ordered->len > start_pos &&
1190                     ordered->file_offset < last_pos) {
1191                         btrfs_put_ordered_extent(ordered);
1192                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1193                                              start_pos, last_pos - 1,
1194                                              &cached_state, GFP_NOFS);
1195                         for (i = 0; i < num_pages; i++) {
1196                                 unlock_page(pages[i]);
1197                                 page_cache_release(pages[i]);
1198                         }
1199                         btrfs_wait_ordered_range(inode, start_pos,
1200                                                  last_pos - start_pos);
1201                         goto again;
1202                 }
1203                 if (ordered)
1204                         btrfs_put_ordered_extent(ordered);
1205
1206                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1207                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1208                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1209                                   0, 0, &cached_state, GFP_NOFS);
1210                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1211                                      start_pos, last_pos - 1, &cached_state,
1212                                      GFP_NOFS);
1213         }
1214         for (i = 0; i < num_pages; i++) {
1215                 if (clear_page_dirty_for_io(pages[i]))
1216                         account_page_redirty(pages[i]);
1217                 set_page_extent_mapped(pages[i]);
1218                 WARN_ON(!PageLocked(pages[i]));
1219         }
1220         return 0;
1221 fail:
1222         while (faili >= 0) {
1223                 unlock_page(pages[faili]);
1224                 page_cache_release(pages[faili]);
1225                 faili--;
1226         }
1227         return err;
1228
1229 }
1230
1231 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1232                                                struct iov_iter *i,
1233                                                loff_t pos)
1234 {
1235         struct inode *inode = fdentry(file)->d_inode;
1236         struct btrfs_root *root = BTRFS_I(inode)->root;
1237         struct page **pages = NULL;
1238         unsigned long first_index;
1239         size_t num_written = 0;
1240         int nrptrs;
1241         int ret = 0;
1242         bool force_page_uptodate = false;
1243
1244         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1245                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1246                      (sizeof(struct page *)));
1247         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1248         nrptrs = max(nrptrs, 8);
1249         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1250         if (!pages)
1251                 return -ENOMEM;
1252
1253         first_index = pos >> PAGE_CACHE_SHIFT;
1254
1255         while (iov_iter_count(i) > 0) {
1256                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1257                 size_t write_bytes = min(iov_iter_count(i),
1258                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1259                                          offset);
1260                 size_t num_pages = (write_bytes + offset +
1261                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1262                 size_t dirty_pages;
1263                 size_t copied;
1264
1265                 WARN_ON(num_pages > nrptrs);
1266
1267                 /*
1268                  * Fault pages before locking them in prepare_pages
1269                  * to avoid recursive lock
1270                  */
1271                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1272                         ret = -EFAULT;
1273                         break;
1274                 }
1275
1276                 ret = btrfs_delalloc_reserve_space(inode,
1277                                         num_pages << PAGE_CACHE_SHIFT);
1278                 if (ret)
1279                         break;
1280
1281                 /*
1282                  * This is going to setup the pages array with the number of
1283                  * pages we want, so we don't really need to worry about the
1284                  * contents of pages from loop to loop
1285                  */
1286                 ret = prepare_pages(root, file, pages, num_pages,
1287                                     pos, first_index, write_bytes,
1288                                     force_page_uptodate);
1289                 if (ret) {
1290                         btrfs_delalloc_release_space(inode,
1291                                         num_pages << PAGE_CACHE_SHIFT);
1292                         break;
1293                 }
1294
1295                 copied = btrfs_copy_from_user(pos, num_pages,
1296                                            write_bytes, pages, i);
1297
1298                 /*
1299                  * if we have trouble faulting in the pages, fall
1300                  * back to one page at a time
1301                  */
1302                 if (copied < write_bytes)
1303                         nrptrs = 1;
1304
1305                 if (copied == 0) {
1306                         force_page_uptodate = true;
1307                         dirty_pages = 0;
1308                 } else {
1309                         force_page_uptodate = false;
1310                         dirty_pages = (copied + offset +
1311                                        PAGE_CACHE_SIZE - 1) >>
1312                                        PAGE_CACHE_SHIFT;
1313                 }
1314
1315                 /*
1316                  * If we had a short copy we need to release the excess delaloc
1317                  * bytes we reserved.  We need to increment outstanding_extents
1318                  * because btrfs_delalloc_release_space will decrement it, but
1319                  * we still have an outstanding extent for the chunk we actually
1320                  * managed to copy.
1321                  */
1322                 if (num_pages > dirty_pages) {
1323                         if (copied > 0) {
1324                                 spin_lock(&BTRFS_I(inode)->lock);
1325                                 BTRFS_I(inode)->outstanding_extents++;
1326                                 spin_unlock(&BTRFS_I(inode)->lock);
1327                         }
1328                         btrfs_delalloc_release_space(inode,
1329                                         (num_pages - dirty_pages) <<
1330                                         PAGE_CACHE_SHIFT);
1331                 }
1332
1333                 if (copied > 0) {
1334                         ret = btrfs_dirty_pages(root, inode, pages,
1335                                                 dirty_pages, pos, copied,
1336                                                 NULL);
1337                         if (ret) {
1338                                 btrfs_delalloc_release_space(inode,
1339                                         dirty_pages << PAGE_CACHE_SHIFT);
1340                                 btrfs_drop_pages(pages, num_pages);
1341                                 break;
1342                         }
1343                 }
1344
1345                 btrfs_drop_pages(pages, num_pages);
1346
1347                 cond_resched();
1348
1349                 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1350                                                    dirty_pages);
1351                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1352                         btrfs_btree_balance_dirty(root, 1);
1353
1354                 pos += copied;
1355                 num_written += copied;
1356         }
1357
1358         kfree(pages);
1359
1360         return num_written ? num_written : ret;
1361 }
1362
1363 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1364                                     const struct iovec *iov,
1365                                     unsigned long nr_segs, loff_t pos,
1366                                     loff_t *ppos, size_t count, size_t ocount)
1367 {
1368         struct file *file = iocb->ki_filp;
1369         struct iov_iter i;
1370         ssize_t written;
1371         ssize_t written_buffered;
1372         loff_t endbyte;
1373         int err;
1374
1375         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1376                                             count, ocount);
1377
1378         if (written < 0 || written == count)
1379                 return written;
1380
1381         pos += written;
1382         count -= written;
1383         iov_iter_init(&i, iov, nr_segs, count, written);
1384         written_buffered = __btrfs_buffered_write(file, &i, pos);
1385         if (written_buffered < 0) {
1386                 err = written_buffered;
1387                 goto out;
1388         }
1389         endbyte = pos + written_buffered - 1;
1390         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1391         if (err)
1392                 goto out;
1393         written += written_buffered;
1394         *ppos = pos + written_buffered;
1395         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1396                                  endbyte >> PAGE_CACHE_SHIFT);
1397 out:
1398         return written ? written : err;
1399 }
1400
1401 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1402                                     const struct iovec *iov,
1403                                     unsigned long nr_segs, loff_t pos)
1404 {
1405         struct file *file = iocb->ki_filp;
1406         struct inode *inode = fdentry(file)->d_inode;
1407         struct btrfs_root *root = BTRFS_I(inode)->root;
1408         loff_t *ppos = &iocb->ki_pos;
1409         u64 start_pos;
1410         ssize_t num_written = 0;
1411         ssize_t err = 0;
1412         size_t count, ocount;
1413
1414         sb_start_write(inode->i_sb);
1415
1416         mutex_lock(&inode->i_mutex);
1417
1418         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1419         if (err) {
1420                 mutex_unlock(&inode->i_mutex);
1421                 goto out;
1422         }
1423         count = ocount;
1424
1425         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1426         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1427         if (err) {
1428                 mutex_unlock(&inode->i_mutex);
1429                 goto out;
1430         }
1431
1432         if (count == 0) {
1433                 mutex_unlock(&inode->i_mutex);
1434                 goto out;
1435         }
1436
1437         err = file_remove_suid(file);
1438         if (err) {
1439                 mutex_unlock(&inode->i_mutex);
1440                 goto out;
1441         }
1442
1443         /*
1444          * If BTRFS flips readonly due to some impossible error
1445          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1446          * although we have opened a file as writable, we have
1447          * to stop this write operation to ensure FS consistency.
1448          */
1449         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1450                 mutex_unlock(&inode->i_mutex);
1451                 err = -EROFS;
1452                 goto out;
1453         }
1454
1455         err = file_update_time(file);
1456         if (err) {
1457                 mutex_unlock(&inode->i_mutex);
1458                 goto out;
1459         }
1460
1461         start_pos = round_down(pos, root->sectorsize);
1462         if (start_pos > i_size_read(inode)) {
1463                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1464                 if (err) {
1465                         mutex_unlock(&inode->i_mutex);
1466                         goto out;
1467                 }
1468         }
1469
1470         if (unlikely(file->f_flags & O_DIRECT)) {
1471                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1472                                                    pos, ppos, count, ocount);
1473         } else {
1474                 struct iov_iter i;
1475
1476                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1477
1478                 num_written = __btrfs_buffered_write(file, &i, pos);
1479                 if (num_written > 0)
1480                         *ppos = pos + num_written;
1481         }
1482
1483         mutex_unlock(&inode->i_mutex);
1484
1485         /*
1486          * we want to make sure fsync finds this change
1487          * but we haven't joined a transaction running right now.
1488          *
1489          * Later on, someone is sure to update the inode and get the
1490          * real transid recorded.
1491          *
1492          * We set last_trans now to the fs_info generation + 1,
1493          * this will either be one more than the running transaction
1494          * or the generation used for the next transaction if there isn't
1495          * one running right now.
1496          */
1497         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1498         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1499                 err = generic_write_sync(file, pos, num_written);
1500                 if (err < 0 && num_written > 0)
1501                         num_written = err;
1502         }
1503 out:
1504         sb_end_write(inode->i_sb);
1505         current->backing_dev_info = NULL;
1506         return num_written ? num_written : err;
1507 }
1508
1509 int btrfs_release_file(struct inode *inode, struct file *filp)
1510 {
1511         /*
1512          * ordered_data_close is set by settattr when we are about to truncate
1513          * a file from a non-zero size to a zero size.  This tries to
1514          * flush down new bytes that may have been written if the
1515          * application were using truncate to replace a file in place.
1516          */
1517         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1518                                &BTRFS_I(inode)->runtime_flags)) {
1519                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1520                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1521                         filemap_flush(inode->i_mapping);
1522         }
1523         if (filp->private_data)
1524                 btrfs_ioctl_trans_end(filp);
1525         return 0;
1526 }
1527
1528 /*
1529  * fsync call for both files and directories.  This logs the inode into
1530  * the tree log instead of forcing full commits whenever possible.
1531  *
1532  * It needs to call filemap_fdatawait so that all ordered extent updates are
1533  * in the metadata btree are up to date for copying to the log.
1534  *
1535  * It drops the inode mutex before doing the tree log commit.  This is an
1536  * important optimization for directories because holding the mutex prevents
1537  * new operations on the dir while we write to disk.
1538  */
1539 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1540 {
1541         struct dentry *dentry = file->f_path.dentry;
1542         struct inode *inode = dentry->d_inode;
1543         struct btrfs_root *root = BTRFS_I(inode)->root;
1544         int ret = 0;
1545         struct btrfs_trans_handle *trans;
1546
1547         trace_btrfs_sync_file(file, datasync);
1548
1549         /*
1550          * We write the dirty pages in the range and wait until they complete
1551          * out of the ->i_mutex. If so, we can flush the dirty pages by
1552          * multi-task, and make the performance up.
1553          */
1554         ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1555         if (ret)
1556                 return ret;
1557
1558         mutex_lock(&inode->i_mutex);
1559
1560         /*
1561          * We flush the dirty pages again to avoid some dirty pages in the
1562          * range being left.
1563          */
1564         atomic_inc(&root->log_batch);
1565         btrfs_wait_ordered_range(inode, start, end);
1566         atomic_inc(&root->log_batch);
1567
1568         /*
1569          * check the transaction that last modified this inode
1570          * and see if its already been committed
1571          */
1572         if (!BTRFS_I(inode)->last_trans) {
1573                 mutex_unlock(&inode->i_mutex);
1574                 goto out;
1575         }
1576
1577         /*
1578          * if the last transaction that changed this file was before
1579          * the current transaction, we can bail out now without any
1580          * syncing
1581          */
1582         smp_mb();
1583         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1584             BTRFS_I(inode)->last_trans <=
1585             root->fs_info->last_trans_committed) {
1586                 BTRFS_I(inode)->last_trans = 0;
1587
1588                 /*
1589                  * We'v had everything committed since the last time we were
1590                  * modified so clear this flag in case it was set for whatever
1591                  * reason, it's no longer relevant.
1592                  */
1593                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1594                           &BTRFS_I(inode)->runtime_flags);
1595                 mutex_unlock(&inode->i_mutex);
1596                 goto out;
1597         }
1598
1599         /*
1600          * ok we haven't committed the transaction yet, lets do a commit
1601          */
1602         if (file->private_data)
1603                 btrfs_ioctl_trans_end(file);
1604
1605         trans = btrfs_start_transaction(root, 0);
1606         if (IS_ERR(trans)) {
1607                 ret = PTR_ERR(trans);
1608                 mutex_unlock(&inode->i_mutex);
1609                 goto out;
1610         }
1611
1612         ret = btrfs_log_dentry_safe(trans, root, dentry);
1613         if (ret < 0) {
1614                 mutex_unlock(&inode->i_mutex);
1615                 goto out;
1616         }
1617
1618         /* we've logged all the items and now have a consistent
1619          * version of the file in the log.  It is possible that
1620          * someone will come in and modify the file, but that's
1621          * fine because the log is consistent on disk, and we
1622          * have references to all of the file's extents
1623          *
1624          * It is possible that someone will come in and log the
1625          * file again, but that will end up using the synchronization
1626          * inside btrfs_sync_log to keep things safe.
1627          */
1628         mutex_unlock(&inode->i_mutex);
1629
1630         if (ret != BTRFS_NO_LOG_SYNC) {
1631                 if (ret > 0) {
1632                         ret = btrfs_commit_transaction(trans, root);
1633                 } else {
1634                         ret = btrfs_sync_log(trans, root);
1635                         if (ret == 0)
1636                                 ret = btrfs_end_transaction(trans, root);
1637                         else
1638                                 ret = btrfs_commit_transaction(trans, root);
1639                 }
1640         } else {
1641                 ret = btrfs_end_transaction(trans, root);
1642         }
1643 out:
1644         return ret > 0 ? -EIO : ret;
1645 }
1646
1647 static const struct vm_operations_struct btrfs_file_vm_ops = {
1648         .fault          = filemap_fault,
1649         .page_mkwrite   = btrfs_page_mkwrite,
1650         .remap_pages    = generic_file_remap_pages,
1651 };
1652
1653 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1654 {
1655         struct address_space *mapping = filp->f_mapping;
1656
1657         if (!mapping->a_ops->readpage)
1658                 return -ENOEXEC;
1659
1660         file_accessed(filp);
1661         vma->vm_ops = &btrfs_file_vm_ops;
1662
1663         return 0;
1664 }
1665
1666 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1667                           int slot, u64 start, u64 end)
1668 {
1669         struct btrfs_file_extent_item *fi;
1670         struct btrfs_key key;
1671
1672         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1673                 return 0;
1674
1675         btrfs_item_key_to_cpu(leaf, &key, slot);
1676         if (key.objectid != btrfs_ino(inode) ||
1677             key.type != BTRFS_EXTENT_DATA_KEY)
1678                 return 0;
1679
1680         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1681
1682         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1683                 return 0;
1684
1685         if (btrfs_file_extent_disk_bytenr(leaf, fi))
1686                 return 0;
1687
1688         if (key.offset == end)
1689                 return 1;
1690         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1691                 return 1;
1692         return 0;
1693 }
1694
1695 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1696                       struct btrfs_path *path, u64 offset, u64 end)
1697 {
1698         struct btrfs_root *root = BTRFS_I(inode)->root;
1699         struct extent_buffer *leaf;
1700         struct btrfs_file_extent_item *fi;
1701         struct extent_map *hole_em;
1702         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1703         struct btrfs_key key;
1704         int ret;
1705
1706         key.objectid = btrfs_ino(inode);
1707         key.type = BTRFS_EXTENT_DATA_KEY;
1708         key.offset = offset;
1709
1710
1711         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1712         if (ret < 0)
1713                 return ret;
1714         BUG_ON(!ret);
1715
1716         leaf = path->nodes[0];
1717         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1718                 u64 num_bytes;
1719
1720                 path->slots[0]--;
1721                 fi = btrfs_item_ptr(leaf, path->slots[0],
1722                                     struct btrfs_file_extent_item);
1723                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1724                         end - offset;
1725                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1726                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1727                 btrfs_set_file_extent_offset(leaf, fi, 0);
1728                 btrfs_mark_buffer_dirty(leaf);
1729                 goto out;
1730         }
1731
1732         if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1733                 u64 num_bytes;
1734
1735                 path->slots[0]++;
1736                 key.offset = offset;
1737                 btrfs_set_item_key_safe(trans, root, path, &key);
1738                 fi = btrfs_item_ptr(leaf, path->slots[0],
1739                                     struct btrfs_file_extent_item);
1740                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1741                         offset;
1742                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1743                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1744                 btrfs_set_file_extent_offset(leaf, fi, 0);
1745                 btrfs_mark_buffer_dirty(leaf);
1746                 goto out;
1747         }
1748         btrfs_release_path(path);
1749
1750         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1751                                        0, 0, end - offset, 0, end - offset,
1752                                        0, 0, 0);
1753         if (ret)
1754                 return ret;
1755
1756 out:
1757         btrfs_release_path(path);
1758
1759         hole_em = alloc_extent_map();
1760         if (!hole_em) {
1761                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1762                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1763                         &BTRFS_I(inode)->runtime_flags);
1764         } else {
1765                 hole_em->start = offset;
1766                 hole_em->len = end - offset;
1767                 hole_em->orig_start = offset;
1768
1769                 hole_em->block_start = EXTENT_MAP_HOLE;
1770                 hole_em->block_len = 0;
1771                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1772                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1773                 hole_em->generation = trans->transid;
1774
1775                 do {
1776                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1777                         write_lock(&em_tree->lock);
1778                         ret = add_extent_mapping(em_tree, hole_em);
1779                         if (!ret)
1780                                 list_move(&hole_em->list,
1781                                           &em_tree->modified_extents);
1782                         write_unlock(&em_tree->lock);
1783                 } while (ret == -EEXIST);
1784                 free_extent_map(hole_em);
1785                 if (ret)
1786                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1787                                 &BTRFS_I(inode)->runtime_flags);
1788         }
1789
1790         return 0;
1791 }
1792
1793 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1794 {
1795         struct btrfs_root *root = BTRFS_I(inode)->root;
1796         struct extent_state *cached_state = NULL;
1797         struct btrfs_path *path;
1798         struct btrfs_block_rsv *rsv;
1799         struct btrfs_trans_handle *trans;
1800         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1801         u64 lockstart = (offset + mask) & ~mask;
1802         u64 lockend = ((offset + len) & ~mask) - 1;
1803         u64 cur_offset = lockstart;
1804         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1805         u64 drop_end;
1806         unsigned long nr;
1807         int ret = 0;
1808         int err = 0;
1809         bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
1810                 ((offset + len) >> PAGE_CACHE_SHIFT);
1811
1812         btrfs_wait_ordered_range(inode, offset, len);
1813
1814         mutex_lock(&inode->i_mutex);
1815         if (offset >= inode->i_size) {
1816                 mutex_unlock(&inode->i_mutex);
1817                 return 0;
1818         }
1819
1820         /*
1821          * Only do this if we are in the same page and we aren't doing the
1822          * entire page.
1823          */
1824         if (same_page && len < PAGE_CACHE_SIZE) {
1825                 ret = btrfs_truncate_page(inode, offset, len, 0);
1826                 mutex_unlock(&inode->i_mutex);
1827                 return ret;
1828         }
1829
1830         /* zero back part of the first page */
1831         ret = btrfs_truncate_page(inode, offset, 0, 0);
1832         if (ret) {
1833                 mutex_unlock(&inode->i_mutex);
1834                 return ret;
1835         }
1836
1837         /* zero the front end of the last page */
1838         ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1839         if (ret) {
1840                 mutex_unlock(&inode->i_mutex);
1841                 return ret;
1842         }
1843
1844         if (lockend < lockstart) {
1845                 mutex_unlock(&inode->i_mutex);
1846                 return 0;
1847         }
1848
1849         while (1) {
1850                 struct btrfs_ordered_extent *ordered;
1851
1852                 truncate_pagecache_range(inode, lockstart, lockend);
1853
1854                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1855                                  0, &cached_state);
1856                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1857
1858                 /*
1859                  * We need to make sure we have no ordered extents in this range
1860                  * and nobody raced in and read a page in this range, if we did
1861                  * we need to try again.
1862                  */
1863                 if ((!ordered ||
1864                     (ordered->file_offset + ordered->len < lockstart ||
1865                      ordered->file_offset > lockend)) &&
1866                      !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1867                                      lockend, EXTENT_UPTODATE, 0,
1868                                      cached_state)) {
1869                         if (ordered)
1870                                 btrfs_put_ordered_extent(ordered);
1871                         break;
1872                 }
1873                 if (ordered)
1874                         btrfs_put_ordered_extent(ordered);
1875                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1876                                      lockend, &cached_state, GFP_NOFS);
1877                 btrfs_wait_ordered_range(inode, lockstart,
1878                                          lockend - lockstart + 1);
1879         }
1880
1881         path = btrfs_alloc_path();
1882         if (!path) {
1883                 ret = -ENOMEM;
1884                 goto out;
1885         }
1886
1887         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
1888         if (!rsv) {
1889                 ret = -ENOMEM;
1890                 goto out_free;
1891         }
1892         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1893         rsv->failfast = 1;
1894
1895         /*
1896          * 1 - update the inode
1897          * 1 - removing the extents in the range
1898          * 1 - adding the hole extent
1899          */
1900         trans = btrfs_start_transaction(root, 3);
1901         if (IS_ERR(trans)) {
1902                 err = PTR_ERR(trans);
1903                 goto out_free;
1904         }
1905
1906         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
1907                                       min_size);
1908         BUG_ON(ret);
1909         trans->block_rsv = rsv;
1910
1911         while (cur_offset < lockend) {
1912                 ret = __btrfs_drop_extents(trans, root, inode, path,
1913                                            cur_offset, lockend + 1,
1914                                            &drop_end, 1);
1915                 if (ret != -ENOSPC)
1916                         break;
1917
1918                 trans->block_rsv = &root->fs_info->trans_block_rsv;
1919
1920                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1921                 if (ret) {
1922                         err = ret;
1923                         break;
1924                 }
1925
1926                 cur_offset = drop_end;
1927
1928                 ret = btrfs_update_inode(trans, root, inode);
1929                 if (ret) {
1930                         err = ret;
1931                         break;
1932                 }
1933
1934                 nr = trans->blocks_used;
1935                 btrfs_end_transaction(trans, root);
1936                 btrfs_btree_balance_dirty(root, nr);
1937
1938                 trans = btrfs_start_transaction(root, 3);
1939                 if (IS_ERR(trans)) {
1940                         ret = PTR_ERR(trans);
1941                         trans = NULL;
1942                         break;
1943                 }
1944
1945                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
1946                                               rsv, min_size);
1947                 BUG_ON(ret);    /* shouldn't happen */
1948                 trans->block_rsv = rsv;
1949         }
1950
1951         if (ret) {
1952                 err = ret;
1953                 goto out_trans;
1954         }
1955
1956         trans->block_rsv = &root->fs_info->trans_block_rsv;
1957         ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1958         if (ret) {
1959                 err = ret;
1960                 goto out_trans;
1961         }
1962
1963 out_trans:
1964         if (!trans)
1965                 goto out_free;
1966
1967         trans->block_rsv = &root->fs_info->trans_block_rsv;
1968         ret = btrfs_update_inode(trans, root, inode);
1969         nr = trans->blocks_used;
1970         btrfs_end_transaction(trans, root);
1971         btrfs_btree_balance_dirty(root, nr);
1972 out_free:
1973         btrfs_free_path(path);
1974         btrfs_free_block_rsv(root, rsv);
1975 out:
1976         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1977                              &cached_state, GFP_NOFS);
1978         mutex_unlock(&inode->i_mutex);
1979         if (ret && !err)
1980                 err = ret;
1981         return err;
1982 }
1983
1984 static long btrfs_fallocate(struct file *file, int mode,
1985                             loff_t offset, loff_t len)
1986 {
1987         struct inode *inode = file->f_path.dentry->d_inode;
1988         struct extent_state *cached_state = NULL;
1989         u64 cur_offset;
1990         u64 last_byte;
1991         u64 alloc_start;
1992         u64 alloc_end;
1993         u64 alloc_hint = 0;
1994         u64 locked_end;
1995         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1996         struct extent_map *em;
1997         int ret;
1998
1999         alloc_start = offset & ~mask;
2000         alloc_end =  (offset + len + mask) & ~mask;
2001
2002         /* Make sure we aren't being give some crap mode */
2003         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2004                 return -EOPNOTSUPP;
2005
2006         if (mode & FALLOC_FL_PUNCH_HOLE)
2007                 return btrfs_punch_hole(inode, offset, len);
2008
2009         /*
2010          * Make sure we have enough space before we do the
2011          * allocation.
2012          */
2013         ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
2014         if (ret)
2015                 return ret;
2016
2017         /*
2018          * wait for ordered IO before we have any locks.  We'll loop again
2019          * below with the locks held.
2020          */
2021         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2022
2023         mutex_lock(&inode->i_mutex);
2024         ret = inode_newsize_ok(inode, alloc_end);
2025         if (ret)
2026                 goto out;
2027
2028         if (alloc_start > inode->i_size) {
2029                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2030                                         alloc_start);
2031                 if (ret)
2032                         goto out;
2033         }
2034
2035         locked_end = alloc_end - 1;
2036         while (1) {
2037                 struct btrfs_ordered_extent *ordered;
2038
2039                 /* the extent lock is ordered inside the running
2040                  * transaction
2041                  */
2042                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2043                                  locked_end, 0, &cached_state);
2044                 ordered = btrfs_lookup_first_ordered_extent(inode,
2045                                                             alloc_end - 1);
2046                 if (ordered &&
2047                     ordered->file_offset + ordered->len > alloc_start &&
2048                     ordered->file_offset < alloc_end) {
2049                         btrfs_put_ordered_extent(ordered);
2050                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2051                                              alloc_start, locked_end,
2052                                              &cached_state, GFP_NOFS);
2053                         /*
2054                          * we can't wait on the range with the transaction
2055                          * running or with the extent lock held
2056                          */
2057                         btrfs_wait_ordered_range(inode, alloc_start,
2058                                                  alloc_end - alloc_start);
2059                 } else {
2060                         if (ordered)
2061                                 btrfs_put_ordered_extent(ordered);
2062                         break;
2063                 }
2064         }
2065
2066         cur_offset = alloc_start;
2067         while (1) {
2068                 u64 actual_end;
2069
2070                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2071                                       alloc_end - cur_offset, 0);
2072                 if (IS_ERR_OR_NULL(em)) {
2073                         if (!em)
2074                                 ret = -ENOMEM;
2075                         else
2076                                 ret = PTR_ERR(em);
2077                         break;
2078                 }
2079                 last_byte = min(extent_map_end(em), alloc_end);
2080                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2081                 last_byte = (last_byte + mask) & ~mask;
2082
2083                 if (em->block_start == EXTENT_MAP_HOLE ||
2084                     (cur_offset >= inode->i_size &&
2085                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2086                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2087                                                         last_byte - cur_offset,
2088                                                         1 << inode->i_blkbits,
2089                                                         offset + len,
2090                                                         &alloc_hint);
2091
2092                         if (ret < 0) {
2093                                 free_extent_map(em);
2094                                 break;
2095                         }
2096                 } else if (actual_end > inode->i_size &&
2097                            !(mode & FALLOC_FL_KEEP_SIZE)) {
2098                         /*
2099                          * We didn't need to allocate any more space, but we
2100                          * still extended the size of the file so we need to
2101                          * update i_size.
2102                          */
2103                         inode->i_ctime = CURRENT_TIME;
2104                         i_size_write(inode, actual_end);
2105                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2106                 }
2107                 free_extent_map(em);
2108
2109                 cur_offset = last_byte;
2110                 if (cur_offset >= alloc_end) {
2111                         ret = 0;
2112                         break;
2113                 }
2114         }
2115         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2116                              &cached_state, GFP_NOFS);
2117 out:
2118         mutex_unlock(&inode->i_mutex);
2119         /* Let go of our reservation. */
2120         btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
2121         return ret;
2122 }
2123
2124 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
2125 {
2126         struct btrfs_root *root = BTRFS_I(inode)->root;
2127         struct extent_map *em;
2128         struct extent_state *cached_state = NULL;
2129         u64 lockstart = *offset;
2130         u64 lockend = i_size_read(inode);
2131         u64 start = *offset;
2132         u64 orig_start = *offset;
2133         u64 len = i_size_read(inode);
2134         u64 last_end = 0;
2135         int ret = 0;
2136
2137         lockend = max_t(u64, root->sectorsize, lockend);
2138         if (lockend <= lockstart)
2139                 lockend = lockstart + root->sectorsize;
2140
2141         len = lockend - lockstart + 1;
2142
2143         len = max_t(u64, len, root->sectorsize);
2144         if (inode->i_size == 0)
2145                 return -ENXIO;
2146
2147         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2148                          &cached_state);
2149
2150         /*
2151          * Delalloc is such a pain.  If we have a hole and we have pending
2152          * delalloc for a portion of the hole we will get back a hole that
2153          * exists for the entire range since it hasn't been actually written
2154          * yet.  So to take care of this case we need to look for an extent just
2155          * before the position we want in case there is outstanding delalloc
2156          * going on here.
2157          */
2158         if (origin == SEEK_HOLE && start != 0) {
2159                 if (start <= root->sectorsize)
2160                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2161                                                      root->sectorsize, 0);
2162                 else
2163                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
2164                                                      start - root->sectorsize,
2165                                                      root->sectorsize, 0);
2166                 if (IS_ERR(em)) {
2167                         ret = PTR_ERR(em);
2168                         goto out;
2169                 }
2170                 last_end = em->start + em->len;
2171                 if (em->block_start == EXTENT_MAP_DELALLOC)
2172                         last_end = min_t(u64, last_end, inode->i_size);
2173                 free_extent_map(em);
2174         }
2175
2176         while (1) {
2177                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2178                 if (IS_ERR(em)) {
2179                         ret = PTR_ERR(em);
2180                         break;
2181                 }
2182
2183                 if (em->block_start == EXTENT_MAP_HOLE) {
2184                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2185                                 if (last_end <= orig_start) {
2186                                         free_extent_map(em);
2187                                         ret = -ENXIO;
2188                                         break;
2189                                 }
2190                         }
2191
2192                         if (origin == SEEK_HOLE) {
2193                                 *offset = start;
2194                                 free_extent_map(em);
2195                                 break;
2196                         }
2197                 } else {
2198                         if (origin == SEEK_DATA) {
2199                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
2200                                         if (start >= inode->i_size) {
2201                                                 free_extent_map(em);
2202                                                 ret = -ENXIO;
2203                                                 break;
2204                                         }
2205                                 }
2206
2207                                 *offset = start;
2208                                 free_extent_map(em);
2209                                 break;
2210                         }
2211                 }
2212
2213                 start = em->start + em->len;
2214                 last_end = em->start + em->len;
2215
2216                 if (em->block_start == EXTENT_MAP_DELALLOC)
2217                         last_end = min_t(u64, last_end, inode->i_size);
2218
2219                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2220                         free_extent_map(em);
2221                         ret = -ENXIO;
2222                         break;
2223                 }
2224                 free_extent_map(em);
2225                 cond_resched();
2226         }
2227         if (!ret)
2228                 *offset = min(*offset, inode->i_size);
2229 out:
2230         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2231                              &cached_state, GFP_NOFS);
2232         return ret;
2233 }
2234
2235 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
2236 {
2237         struct inode *inode = file->f_mapping->host;
2238         int ret;
2239
2240         mutex_lock(&inode->i_mutex);
2241         switch (origin) {
2242         case SEEK_END:
2243         case SEEK_CUR:
2244                 offset = generic_file_llseek(file, offset, origin);
2245                 goto out;
2246         case SEEK_DATA:
2247         case SEEK_HOLE:
2248                 if (offset >= i_size_read(inode)) {
2249                         mutex_unlock(&inode->i_mutex);
2250                         return -ENXIO;
2251                 }
2252
2253                 ret = find_desired_extent(inode, &offset, origin);
2254                 if (ret) {
2255                         mutex_unlock(&inode->i_mutex);
2256                         return ret;
2257                 }
2258         }
2259
2260         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2261                 offset = -EINVAL;
2262                 goto out;
2263         }
2264         if (offset > inode->i_sb->s_maxbytes) {
2265                 offset = -EINVAL;
2266                 goto out;
2267         }
2268
2269         /* Special lock needed here? */
2270         if (offset != file->f_pos) {
2271                 file->f_pos = offset;
2272                 file->f_version = 0;
2273         }
2274 out:
2275         mutex_unlock(&inode->i_mutex);
2276         return offset;
2277 }
2278
2279 const struct file_operations btrfs_file_operations = {
2280         .llseek         = btrfs_file_llseek,
2281         .read           = do_sync_read,
2282         .write          = do_sync_write,
2283         .aio_read       = generic_file_aio_read,
2284         .splice_read    = generic_file_splice_read,
2285         .aio_write      = btrfs_file_aio_write,
2286         .mmap           = btrfs_file_mmap,
2287         .open           = generic_file_open,
2288         .release        = btrfs_release_file,
2289         .fsync          = btrfs_sync_file,
2290         .fallocate      = btrfs_fallocate,
2291         .unlocked_ioctl = btrfs_ioctl,
2292 #ifdef CONFIG_COMPAT
2293         .compat_ioctl   = btrfs_ioctl,
2294 #endif
2295 };