]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/file.c
Btrfs: turbo charge fsync
[karo-tx-linux.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42
43 /*
44  * when auto defrag is enabled we
45  * queue up these defrag structs to remember which
46  * inodes need defragging passes
47  */
48 struct inode_defrag {
49         struct rb_node rb_node;
50         /* objectid */
51         u64 ino;
52         /*
53          * transid where the defrag was added, we search for
54          * extents newer than this
55          */
56         u64 transid;
57
58         /* root objectid */
59         u64 root;
60
61         /* last offset we were able to defrag */
62         u64 last_offset;
63
64         /* if we've wrapped around back to zero once already */
65         int cycled;
66 };
67
68 static int __compare_inode_defrag(struct inode_defrag *defrag1,
69                                   struct inode_defrag *defrag2)
70 {
71         if (defrag1->root > defrag2->root)
72                 return 1;
73         else if (defrag1->root < defrag2->root)
74                 return -1;
75         else if (defrag1->ino > defrag2->ino)
76                 return 1;
77         else if (defrag1->ino < defrag2->ino)
78                 return -1;
79         else
80                 return 0;
81 }
82
83 /* pop a record for an inode into the defrag tree.  The lock
84  * must be held already
85  *
86  * If you're inserting a record for an older transid than an
87  * existing record, the transid already in the tree is lowered
88  *
89  * If an existing record is found the defrag item you
90  * pass in is freed
91  */
92 static void __btrfs_add_inode_defrag(struct inode *inode,
93                                     struct inode_defrag *defrag)
94 {
95         struct btrfs_root *root = BTRFS_I(inode)->root;
96         struct inode_defrag *entry;
97         struct rb_node **p;
98         struct rb_node *parent = NULL;
99         int ret;
100
101         p = &root->fs_info->defrag_inodes.rb_node;
102         while (*p) {
103                 parent = *p;
104                 entry = rb_entry(parent, struct inode_defrag, rb_node);
105
106                 ret = __compare_inode_defrag(defrag, entry);
107                 if (ret < 0)
108                         p = &parent->rb_left;
109                 else if (ret > 0)
110                         p = &parent->rb_right;
111                 else {
112                         /* if we're reinserting an entry for
113                          * an old defrag run, make sure to
114                          * lower the transid of our existing record
115                          */
116                         if (defrag->transid < entry->transid)
117                                 entry->transid = defrag->transid;
118                         if (defrag->last_offset > entry->last_offset)
119                                 entry->last_offset = defrag->last_offset;
120                         goto exists;
121                 }
122         }
123         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
124         rb_link_node(&defrag->rb_node, parent, p);
125         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
126         return;
127
128 exists:
129         kfree(defrag);
130         return;
131
132 }
133
134 /*
135  * insert a defrag record for this inode if auto defrag is
136  * enabled
137  */
138 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
139                            struct inode *inode)
140 {
141         struct btrfs_root *root = BTRFS_I(inode)->root;
142         struct inode_defrag *defrag;
143         u64 transid;
144
145         if (!btrfs_test_opt(root, AUTO_DEFRAG))
146                 return 0;
147
148         if (btrfs_fs_closing(root->fs_info))
149                 return 0;
150
151         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
152                 return 0;
153
154         if (trans)
155                 transid = trans->transid;
156         else
157                 transid = BTRFS_I(inode)->root->last_trans;
158
159         defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
160         if (!defrag)
161                 return -ENOMEM;
162
163         defrag->ino = btrfs_ino(inode);
164         defrag->transid = transid;
165         defrag->root = root->root_key.objectid;
166
167         spin_lock(&root->fs_info->defrag_inodes_lock);
168         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
169                 __btrfs_add_inode_defrag(inode, defrag);
170         else
171                 kfree(defrag);
172         spin_unlock(&root->fs_info->defrag_inodes_lock);
173         return 0;
174 }
175
176 /*
177  * must be called with the defrag_inodes lock held
178  */
179 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
180                                              u64 root, u64 ino,
181                                              struct rb_node **next)
182 {
183         struct inode_defrag *entry = NULL;
184         struct inode_defrag tmp;
185         struct rb_node *p;
186         struct rb_node *parent = NULL;
187         int ret;
188
189         tmp.ino = ino;
190         tmp.root = root;
191
192         p = info->defrag_inodes.rb_node;
193         while (p) {
194                 parent = p;
195                 entry = rb_entry(parent, struct inode_defrag, rb_node);
196
197                 ret = __compare_inode_defrag(&tmp, entry);
198                 if (ret < 0)
199                         p = parent->rb_left;
200                 else if (ret > 0)
201                         p = parent->rb_right;
202                 else
203                         return entry;
204         }
205
206         if (next) {
207                 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
208                         parent = rb_next(parent);
209                         entry = rb_entry(parent, struct inode_defrag, rb_node);
210                 }
211                 *next = parent;
212         }
213         return NULL;
214 }
215
216 /*
217  * run through the list of inodes in the FS that need
218  * defragging
219  */
220 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
221 {
222         struct inode_defrag *defrag;
223         struct btrfs_root *inode_root;
224         struct inode *inode;
225         struct rb_node *n;
226         struct btrfs_key key;
227         struct btrfs_ioctl_defrag_range_args range;
228         u64 first_ino = 0;
229         u64 root_objectid = 0;
230         int num_defrag;
231         int defrag_batch = 1024;
232
233         memset(&range, 0, sizeof(range));
234         range.len = (u64)-1;
235
236         atomic_inc(&fs_info->defrag_running);
237         spin_lock(&fs_info->defrag_inodes_lock);
238         while(1) {
239                 n = NULL;
240
241                 /* find an inode to defrag */
242                 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
243                                                  first_ino, &n);
244                 if (!defrag) {
245                         if (n) {
246                                 defrag = rb_entry(n, struct inode_defrag,
247                                                   rb_node);
248                         } else if (root_objectid || first_ino) {
249                                 root_objectid = 0;
250                                 first_ino = 0;
251                                 continue;
252                         } else {
253                                 break;
254                         }
255                 }
256
257                 /* remove it from the rbtree */
258                 first_ino = defrag->ino + 1;
259                 root_objectid = defrag->root;
260                 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
261
262                 if (btrfs_fs_closing(fs_info))
263                         goto next_free;
264
265                 spin_unlock(&fs_info->defrag_inodes_lock);
266
267                 /* get the inode */
268                 key.objectid = defrag->root;
269                 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
270                 key.offset = (u64)-1;
271                 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
272                 if (IS_ERR(inode_root))
273                         goto next;
274
275                 key.objectid = defrag->ino;
276                 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
277                 key.offset = 0;
278
279                 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
280                 if (IS_ERR(inode))
281                         goto next;
282
283                 /* do a chunk of defrag */
284                 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
285                 range.start = defrag->last_offset;
286                 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
287                                                defrag_batch);
288                 /*
289                  * if we filled the whole defrag batch, there
290                  * must be more work to do.  Queue this defrag
291                  * again
292                  */
293                 if (num_defrag == defrag_batch) {
294                         defrag->last_offset = range.start;
295                         __btrfs_add_inode_defrag(inode, defrag);
296                         /*
297                          * we don't want to kfree defrag, we added it back to
298                          * the rbtree
299                          */
300                         defrag = NULL;
301                 } else if (defrag->last_offset && !defrag->cycled) {
302                         /*
303                          * we didn't fill our defrag batch, but
304                          * we didn't start at zero.  Make sure we loop
305                          * around to the start of the file.
306                          */
307                         defrag->last_offset = 0;
308                         defrag->cycled = 1;
309                         __btrfs_add_inode_defrag(inode, defrag);
310                         defrag = NULL;
311                 }
312
313                 iput(inode);
314 next:
315                 spin_lock(&fs_info->defrag_inodes_lock);
316 next_free:
317                 kfree(defrag);
318         }
319         spin_unlock(&fs_info->defrag_inodes_lock);
320
321         atomic_dec(&fs_info->defrag_running);
322
323         /*
324          * during unmount, we use the transaction_wait queue to
325          * wait for the defragger to stop
326          */
327         wake_up(&fs_info->transaction_wait);
328         return 0;
329 }
330
331 /* simple helper to fault in pages and copy.  This should go away
332  * and be replaced with calls into generic code.
333  */
334 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
335                                          size_t write_bytes,
336                                          struct page **prepared_pages,
337                                          struct iov_iter *i)
338 {
339         size_t copied = 0;
340         size_t total_copied = 0;
341         int pg = 0;
342         int offset = pos & (PAGE_CACHE_SIZE - 1);
343
344         while (write_bytes > 0) {
345                 size_t count = min_t(size_t,
346                                      PAGE_CACHE_SIZE - offset, write_bytes);
347                 struct page *page = prepared_pages[pg];
348                 /*
349                  * Copy data from userspace to the current page
350                  *
351                  * Disable pagefault to avoid recursive lock since
352                  * the pages are already locked
353                  */
354                 pagefault_disable();
355                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
356                 pagefault_enable();
357
358                 /* Flush processor's dcache for this page */
359                 flush_dcache_page(page);
360
361                 /*
362                  * if we get a partial write, we can end up with
363                  * partially up to date pages.  These add
364                  * a lot of complexity, so make sure they don't
365                  * happen by forcing this copy to be retried.
366                  *
367                  * The rest of the btrfs_file_write code will fall
368                  * back to page at a time copies after we return 0.
369                  */
370                 if (!PageUptodate(page) && copied < count)
371                         copied = 0;
372
373                 iov_iter_advance(i, copied);
374                 write_bytes -= copied;
375                 total_copied += copied;
376
377                 /* Return to btrfs_file_aio_write to fault page */
378                 if (unlikely(copied == 0))
379                         break;
380
381                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
382                         offset += copied;
383                 } else {
384                         pg++;
385                         offset = 0;
386                 }
387         }
388         return total_copied;
389 }
390
391 /*
392  * unlocks pages after btrfs_file_write is done with them
393  */
394 void btrfs_drop_pages(struct page **pages, size_t num_pages)
395 {
396         size_t i;
397         for (i = 0; i < num_pages; i++) {
398                 /* page checked is some magic around finding pages that
399                  * have been modified without going through btrfs_set_page_dirty
400                  * clear it here
401                  */
402                 ClearPageChecked(pages[i]);
403                 unlock_page(pages[i]);
404                 mark_page_accessed(pages[i]);
405                 page_cache_release(pages[i]);
406         }
407 }
408
409 /*
410  * after copy_from_user, pages need to be dirtied and we need to make
411  * sure holes are created between the current EOF and the start of
412  * any next extents (if required).
413  *
414  * this also makes the decision about creating an inline extent vs
415  * doing real data extents, marking pages dirty and delalloc as required.
416  */
417 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
418                       struct page **pages, size_t num_pages,
419                       loff_t pos, size_t write_bytes,
420                       struct extent_state **cached)
421 {
422         int err = 0;
423         int i;
424         u64 num_bytes;
425         u64 start_pos;
426         u64 end_of_last_block;
427         u64 end_pos = pos + write_bytes;
428         loff_t isize = i_size_read(inode);
429
430         start_pos = pos & ~((u64)root->sectorsize - 1);
431         num_bytes = (write_bytes + pos - start_pos +
432                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
433
434         end_of_last_block = start_pos + num_bytes - 1;
435         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
436                                         cached);
437         if (err)
438                 return err;
439
440         for (i = 0; i < num_pages; i++) {
441                 struct page *p = pages[i];
442                 SetPageUptodate(p);
443                 ClearPageChecked(p);
444                 set_page_dirty(p);
445         }
446
447         /*
448          * we've only changed i_size in ram, and we haven't updated
449          * the disk i_size.  There is no need to log the inode
450          * at this time.
451          */
452         if (end_pos > isize)
453                 i_size_write(inode, end_pos);
454         return 0;
455 }
456
457 /*
458  * this drops all the extents in the cache that intersect the range
459  * [start, end].  Existing extents are split as required.
460  */
461 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
462                                int skip_pinned)
463 {
464         struct extent_map *em;
465         struct extent_map *split = NULL;
466         struct extent_map *split2 = NULL;
467         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
468         u64 len = end - start + 1;
469         u64 gen;
470         int ret;
471         int testend = 1;
472         unsigned long flags;
473         int compressed = 0;
474
475         WARN_ON(end < start);
476         if (end == (u64)-1) {
477                 len = (u64)-1;
478                 testend = 0;
479         }
480         while (1) {
481                 if (!split)
482                         split = alloc_extent_map();
483                 if (!split2)
484                         split2 = alloc_extent_map();
485                 BUG_ON(!split || !split2); /* -ENOMEM */
486
487                 write_lock(&em_tree->lock);
488                 em = lookup_extent_mapping(em_tree, start, len);
489                 if (!em) {
490                         write_unlock(&em_tree->lock);
491                         break;
492                 }
493                 flags = em->flags;
494                 gen = em->generation;
495                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
496                         if (testend && em->start + em->len >= start + len) {
497                                 free_extent_map(em);
498                                 write_unlock(&em_tree->lock);
499                                 break;
500                         }
501                         start = em->start + em->len;
502                         if (testend)
503                                 len = start + len - (em->start + em->len);
504                         free_extent_map(em);
505                         write_unlock(&em_tree->lock);
506                         continue;
507                 }
508                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
509                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
510                 remove_extent_mapping(em_tree, em);
511
512                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
513                     em->start < start) {
514                         split->start = em->start;
515                         split->len = start - em->start;
516                         split->orig_start = em->orig_start;
517                         split->block_start = em->block_start;
518
519                         if (compressed)
520                                 split->block_len = em->block_len;
521                         else
522                                 split->block_len = split->len;
523                         split->generation = gen;
524                         split->bdev = em->bdev;
525                         split->flags = flags;
526                         split->compress_type = em->compress_type;
527                         ret = add_extent_mapping(em_tree, split);
528                         BUG_ON(ret); /* Logic error */
529                         list_move(&split->list, &em_tree->modified_extents);
530                         free_extent_map(split);
531                         split = split2;
532                         split2 = NULL;
533                 }
534                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
535                     testend && em->start + em->len > start + len) {
536                         u64 diff = start + len - em->start;
537
538                         split->start = start + len;
539                         split->len = em->start + em->len - (start + len);
540                         split->bdev = em->bdev;
541                         split->flags = flags;
542                         split->compress_type = em->compress_type;
543                         split->generation = gen;
544
545                         if (compressed) {
546                                 split->block_len = em->block_len;
547                                 split->block_start = em->block_start;
548                                 split->orig_start = em->orig_start;
549                         } else {
550                                 split->block_len = split->len;
551                                 split->block_start = em->block_start + diff;
552                                 split->orig_start = split->start;
553                         }
554
555                         ret = add_extent_mapping(em_tree, split);
556                         BUG_ON(ret); /* Logic error */
557                         list_move(&split->list, &em_tree->modified_extents);
558                         free_extent_map(split);
559                         split = NULL;
560                 }
561                 write_unlock(&em_tree->lock);
562
563                 /* once for us */
564                 free_extent_map(em);
565                 /* once for the tree*/
566                 free_extent_map(em);
567         }
568         if (split)
569                 free_extent_map(split);
570         if (split2)
571                 free_extent_map(split2);
572         return 0;
573 }
574
575 /*
576  * this is very complex, but the basic idea is to drop all extents
577  * in the range start - end.  hint_block is filled in with a block number
578  * that would be a good hint to the block allocator for this file.
579  *
580  * If an extent intersects the range but is not entirely inside the range
581  * it is either truncated or split.  Anything entirely inside the range
582  * is deleted from the tree.
583  */
584 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
585                          struct btrfs_root *root, struct inode *inode,
586                          struct btrfs_path *path, u64 start, u64 end,
587                          u64 *hint_byte, int drop_cache)
588 {
589         struct extent_buffer *leaf;
590         struct btrfs_file_extent_item *fi;
591         struct btrfs_key key;
592         struct btrfs_key new_key;
593         u64 ino = btrfs_ino(inode);
594         u64 search_start = start;
595         u64 disk_bytenr = 0;
596         u64 num_bytes = 0;
597         u64 extent_offset = 0;
598         u64 extent_end = 0;
599         int del_nr = 0;
600         int del_slot = 0;
601         int extent_type;
602         int recow;
603         int ret;
604         int modify_tree = -1;
605         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
606
607         if (drop_cache)
608                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
609
610         if (start >= BTRFS_I(inode)->disk_i_size)
611                 modify_tree = 0;
612
613         while (1) {
614                 recow = 0;
615                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
616                                                search_start, modify_tree);
617                 if (ret < 0)
618                         break;
619                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
620                         leaf = path->nodes[0];
621                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
622                         if (key.objectid == ino &&
623                             key.type == BTRFS_EXTENT_DATA_KEY)
624                                 path->slots[0]--;
625                 }
626                 ret = 0;
627 next_slot:
628                 leaf = path->nodes[0];
629                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
630                         BUG_ON(del_nr > 0);
631                         ret = btrfs_next_leaf(root, path);
632                         if (ret < 0)
633                                 break;
634                         if (ret > 0) {
635                                 ret = 0;
636                                 break;
637                         }
638                         leaf = path->nodes[0];
639                         recow = 1;
640                 }
641
642                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
643                 if (key.objectid > ino ||
644                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
645                         break;
646
647                 fi = btrfs_item_ptr(leaf, path->slots[0],
648                                     struct btrfs_file_extent_item);
649                 extent_type = btrfs_file_extent_type(leaf, fi);
650
651                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
652                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
653                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
654                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
655                         extent_offset = btrfs_file_extent_offset(leaf, fi);
656                         extent_end = key.offset +
657                                 btrfs_file_extent_num_bytes(leaf, fi);
658                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
659                         extent_end = key.offset +
660                                 btrfs_file_extent_inline_len(leaf, fi);
661                 } else {
662                         WARN_ON(1);
663                         extent_end = search_start;
664                 }
665
666                 if (extent_end <= search_start) {
667                         path->slots[0]++;
668                         goto next_slot;
669                 }
670
671                 search_start = max(key.offset, start);
672                 if (recow || !modify_tree) {
673                         modify_tree = -1;
674                         btrfs_release_path(path);
675                         continue;
676                 }
677
678                 /*
679                  *     | - range to drop - |
680                  *  | -------- extent -------- |
681                  */
682                 if (start > key.offset && end < extent_end) {
683                         BUG_ON(del_nr > 0);
684                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
685
686                         memcpy(&new_key, &key, sizeof(new_key));
687                         new_key.offset = start;
688                         ret = btrfs_duplicate_item(trans, root, path,
689                                                    &new_key);
690                         if (ret == -EAGAIN) {
691                                 btrfs_release_path(path);
692                                 continue;
693                         }
694                         if (ret < 0)
695                                 break;
696
697                         leaf = path->nodes[0];
698                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
699                                             struct btrfs_file_extent_item);
700                         btrfs_set_file_extent_num_bytes(leaf, fi,
701                                                         start - key.offset);
702
703                         fi = btrfs_item_ptr(leaf, path->slots[0],
704                                             struct btrfs_file_extent_item);
705
706                         extent_offset += start - key.offset;
707                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
708                         btrfs_set_file_extent_num_bytes(leaf, fi,
709                                                         extent_end - start);
710                         btrfs_mark_buffer_dirty(leaf);
711
712                         if (update_refs && disk_bytenr > 0) {
713                                 ret = btrfs_inc_extent_ref(trans, root,
714                                                 disk_bytenr, num_bytes, 0,
715                                                 root->root_key.objectid,
716                                                 new_key.objectid,
717                                                 start - extent_offset, 0);
718                                 BUG_ON(ret); /* -ENOMEM */
719                                 *hint_byte = disk_bytenr;
720                         }
721                         key.offset = start;
722                 }
723                 /*
724                  *  | ---- range to drop ----- |
725                  *      | -------- extent -------- |
726                  */
727                 if (start <= key.offset && end < extent_end) {
728                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
729
730                         memcpy(&new_key, &key, sizeof(new_key));
731                         new_key.offset = end;
732                         btrfs_set_item_key_safe(trans, root, path, &new_key);
733
734                         extent_offset += end - key.offset;
735                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
736                         btrfs_set_file_extent_num_bytes(leaf, fi,
737                                                         extent_end - end);
738                         btrfs_mark_buffer_dirty(leaf);
739                         if (update_refs && disk_bytenr > 0) {
740                                 inode_sub_bytes(inode, end - key.offset);
741                                 *hint_byte = disk_bytenr;
742                         }
743                         break;
744                 }
745
746                 search_start = extent_end;
747                 /*
748                  *       | ---- range to drop ----- |
749                  *  | -------- extent -------- |
750                  */
751                 if (start > key.offset && end >= extent_end) {
752                         BUG_ON(del_nr > 0);
753                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
754
755                         btrfs_set_file_extent_num_bytes(leaf, fi,
756                                                         start - key.offset);
757                         btrfs_mark_buffer_dirty(leaf);
758                         if (update_refs && disk_bytenr > 0) {
759                                 inode_sub_bytes(inode, extent_end - start);
760                                 *hint_byte = disk_bytenr;
761                         }
762                         if (end == extent_end)
763                                 break;
764
765                         path->slots[0]++;
766                         goto next_slot;
767                 }
768
769                 /*
770                  *  | ---- range to drop ----- |
771                  *    | ------ extent ------ |
772                  */
773                 if (start <= key.offset && end >= extent_end) {
774                         if (del_nr == 0) {
775                                 del_slot = path->slots[0];
776                                 del_nr = 1;
777                         } else {
778                                 BUG_ON(del_slot + del_nr != path->slots[0]);
779                                 del_nr++;
780                         }
781
782                         if (update_refs &&
783                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
784                                 inode_sub_bytes(inode,
785                                                 extent_end - key.offset);
786                                 extent_end = ALIGN(extent_end,
787                                                    root->sectorsize);
788                         } else if (update_refs && disk_bytenr > 0) {
789                                 ret = btrfs_free_extent(trans, root,
790                                                 disk_bytenr, num_bytes, 0,
791                                                 root->root_key.objectid,
792                                                 key.objectid, key.offset -
793                                                 extent_offset, 0);
794                                 BUG_ON(ret); /* -ENOMEM */
795                                 inode_sub_bytes(inode,
796                                                 extent_end - key.offset);
797                                 *hint_byte = disk_bytenr;
798                         }
799
800                         if (end == extent_end)
801                                 break;
802
803                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
804                                 path->slots[0]++;
805                                 goto next_slot;
806                         }
807
808                         ret = btrfs_del_items(trans, root, path, del_slot,
809                                               del_nr);
810                         if (ret) {
811                                 btrfs_abort_transaction(trans, root, ret);
812                                 break;
813                         }
814
815                         del_nr = 0;
816                         del_slot = 0;
817
818                         btrfs_release_path(path);
819                         continue;
820                 }
821
822                 BUG_ON(1);
823         }
824
825         if (!ret && del_nr > 0) {
826                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
827                 if (ret)
828                         btrfs_abort_transaction(trans, root, ret);
829         }
830
831         btrfs_release_path(path);
832         return ret;
833 }
834
835 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
836                        struct btrfs_root *root, struct inode *inode, u64 start,
837                        u64 end, u64 *hint_byte, int drop_cache)
838 {
839         struct btrfs_path *path;
840         int ret;
841
842         path = btrfs_alloc_path();
843         if (!path)
844                 return -ENOMEM;
845         ret = __btrfs_drop_extents(trans, root, inode, path, start, end,
846                                    hint_byte, drop_cache);
847         btrfs_free_path(path);
848         return ret;
849 }
850
851 static int extent_mergeable(struct extent_buffer *leaf, int slot,
852                             u64 objectid, u64 bytenr, u64 orig_offset,
853                             u64 *start, u64 *end)
854 {
855         struct btrfs_file_extent_item *fi;
856         struct btrfs_key key;
857         u64 extent_end;
858
859         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
860                 return 0;
861
862         btrfs_item_key_to_cpu(leaf, &key, slot);
863         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
864                 return 0;
865
866         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
867         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
868             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
869             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
870             btrfs_file_extent_compression(leaf, fi) ||
871             btrfs_file_extent_encryption(leaf, fi) ||
872             btrfs_file_extent_other_encoding(leaf, fi))
873                 return 0;
874
875         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
876         if ((*start && *start != key.offset) || (*end && *end != extent_end))
877                 return 0;
878
879         *start = key.offset;
880         *end = extent_end;
881         return 1;
882 }
883
884 /*
885  * Mark extent in the range start - end as written.
886  *
887  * This changes extent type from 'pre-allocated' to 'regular'. If only
888  * part of extent is marked as written, the extent will be split into
889  * two or three.
890  */
891 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
892                               struct inode *inode, u64 start, u64 end)
893 {
894         struct btrfs_root *root = BTRFS_I(inode)->root;
895         struct extent_buffer *leaf;
896         struct btrfs_path *path;
897         struct btrfs_file_extent_item *fi;
898         struct btrfs_key key;
899         struct btrfs_key new_key;
900         u64 bytenr;
901         u64 num_bytes;
902         u64 extent_end;
903         u64 orig_offset;
904         u64 other_start;
905         u64 other_end;
906         u64 split;
907         int del_nr = 0;
908         int del_slot = 0;
909         int recow;
910         int ret;
911         u64 ino = btrfs_ino(inode);
912
913         path = btrfs_alloc_path();
914         if (!path)
915                 return -ENOMEM;
916 again:
917         recow = 0;
918         split = start;
919         key.objectid = ino;
920         key.type = BTRFS_EXTENT_DATA_KEY;
921         key.offset = split;
922
923         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
924         if (ret < 0)
925                 goto out;
926         if (ret > 0 && path->slots[0] > 0)
927                 path->slots[0]--;
928
929         leaf = path->nodes[0];
930         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
931         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
932         fi = btrfs_item_ptr(leaf, path->slots[0],
933                             struct btrfs_file_extent_item);
934         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
935                BTRFS_FILE_EXTENT_PREALLOC);
936         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
937         BUG_ON(key.offset > start || extent_end < end);
938
939         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
940         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
941         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
942         memcpy(&new_key, &key, sizeof(new_key));
943
944         if (start == key.offset && end < extent_end) {
945                 other_start = 0;
946                 other_end = start;
947                 if (extent_mergeable(leaf, path->slots[0] - 1,
948                                      ino, bytenr, orig_offset,
949                                      &other_start, &other_end)) {
950                         new_key.offset = end;
951                         btrfs_set_item_key_safe(trans, root, path, &new_key);
952                         fi = btrfs_item_ptr(leaf, path->slots[0],
953                                             struct btrfs_file_extent_item);
954                         btrfs_set_file_extent_generation(leaf, fi,
955                                                          trans->transid);
956                         btrfs_set_file_extent_num_bytes(leaf, fi,
957                                                         extent_end - end);
958                         btrfs_set_file_extent_offset(leaf, fi,
959                                                      end - orig_offset);
960                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
961                                             struct btrfs_file_extent_item);
962                         btrfs_set_file_extent_generation(leaf, fi,
963                                                          trans->transid);
964                         btrfs_set_file_extent_num_bytes(leaf, fi,
965                                                         end - other_start);
966                         btrfs_mark_buffer_dirty(leaf);
967                         goto out;
968                 }
969         }
970
971         if (start > key.offset && end == extent_end) {
972                 other_start = end;
973                 other_end = 0;
974                 if (extent_mergeable(leaf, path->slots[0] + 1,
975                                      ino, bytenr, orig_offset,
976                                      &other_start, &other_end)) {
977                         fi = btrfs_item_ptr(leaf, path->slots[0],
978                                             struct btrfs_file_extent_item);
979                         btrfs_set_file_extent_num_bytes(leaf, fi,
980                                                         start - key.offset);
981                         btrfs_set_file_extent_generation(leaf, fi,
982                                                          trans->transid);
983                         path->slots[0]++;
984                         new_key.offset = start;
985                         btrfs_set_item_key_safe(trans, root, path, &new_key);
986
987                         fi = btrfs_item_ptr(leaf, path->slots[0],
988                                             struct btrfs_file_extent_item);
989                         btrfs_set_file_extent_generation(leaf, fi,
990                                                          trans->transid);
991                         btrfs_set_file_extent_num_bytes(leaf, fi,
992                                                         other_end - start);
993                         btrfs_set_file_extent_offset(leaf, fi,
994                                                      start - orig_offset);
995                         btrfs_mark_buffer_dirty(leaf);
996                         goto out;
997                 }
998         }
999
1000         while (start > key.offset || end < extent_end) {
1001                 if (key.offset == start)
1002                         split = end;
1003
1004                 new_key.offset = split;
1005                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1006                 if (ret == -EAGAIN) {
1007                         btrfs_release_path(path);
1008                         goto again;
1009                 }
1010                 if (ret < 0) {
1011                         btrfs_abort_transaction(trans, root, ret);
1012                         goto out;
1013                 }
1014
1015                 leaf = path->nodes[0];
1016                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1017                                     struct btrfs_file_extent_item);
1018                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1019                 btrfs_set_file_extent_num_bytes(leaf, fi,
1020                                                 split - key.offset);
1021
1022                 fi = btrfs_item_ptr(leaf, path->slots[0],
1023                                     struct btrfs_file_extent_item);
1024
1025                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1026                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1027                 btrfs_set_file_extent_num_bytes(leaf, fi,
1028                                                 extent_end - split);
1029                 btrfs_mark_buffer_dirty(leaf);
1030
1031                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1032                                            root->root_key.objectid,
1033                                            ino, orig_offset, 0);
1034                 BUG_ON(ret); /* -ENOMEM */
1035
1036                 if (split == start) {
1037                         key.offset = start;
1038                 } else {
1039                         BUG_ON(start != key.offset);
1040                         path->slots[0]--;
1041                         extent_end = end;
1042                 }
1043                 recow = 1;
1044         }
1045
1046         other_start = end;
1047         other_end = 0;
1048         if (extent_mergeable(leaf, path->slots[0] + 1,
1049                              ino, bytenr, orig_offset,
1050                              &other_start, &other_end)) {
1051                 if (recow) {
1052                         btrfs_release_path(path);
1053                         goto again;
1054                 }
1055                 extent_end = other_end;
1056                 del_slot = path->slots[0] + 1;
1057                 del_nr++;
1058                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1059                                         0, root->root_key.objectid,
1060                                         ino, orig_offset, 0);
1061                 BUG_ON(ret); /* -ENOMEM */
1062         }
1063         other_start = 0;
1064         other_end = start;
1065         if (extent_mergeable(leaf, path->slots[0] - 1,
1066                              ino, bytenr, orig_offset,
1067                              &other_start, &other_end)) {
1068                 if (recow) {
1069                         btrfs_release_path(path);
1070                         goto again;
1071                 }
1072                 key.offset = other_start;
1073                 del_slot = path->slots[0];
1074                 del_nr++;
1075                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1076                                         0, root->root_key.objectid,
1077                                         ino, orig_offset, 0);
1078                 BUG_ON(ret); /* -ENOMEM */
1079         }
1080         if (del_nr == 0) {
1081                 fi = btrfs_item_ptr(leaf, path->slots[0],
1082                            struct btrfs_file_extent_item);
1083                 btrfs_set_file_extent_type(leaf, fi,
1084                                            BTRFS_FILE_EXTENT_REG);
1085                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1086                 btrfs_mark_buffer_dirty(leaf);
1087         } else {
1088                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1089                            struct btrfs_file_extent_item);
1090                 btrfs_set_file_extent_type(leaf, fi,
1091                                            BTRFS_FILE_EXTENT_REG);
1092                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1093                 btrfs_set_file_extent_num_bytes(leaf, fi,
1094                                                 extent_end - key.offset);
1095                 btrfs_mark_buffer_dirty(leaf);
1096
1097                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1098                 if (ret < 0) {
1099                         btrfs_abort_transaction(trans, root, ret);
1100                         goto out;
1101                 }
1102         }
1103 out:
1104         btrfs_free_path(path);
1105         return 0;
1106 }
1107
1108 /*
1109  * on error we return an unlocked page and the error value
1110  * on success we return a locked page and 0
1111  */
1112 static int prepare_uptodate_page(struct page *page, u64 pos,
1113                                  bool force_uptodate)
1114 {
1115         int ret = 0;
1116
1117         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1118             !PageUptodate(page)) {
1119                 ret = btrfs_readpage(NULL, page);
1120                 if (ret)
1121                         return ret;
1122                 lock_page(page);
1123                 if (!PageUptodate(page)) {
1124                         unlock_page(page);
1125                         return -EIO;
1126                 }
1127         }
1128         return 0;
1129 }
1130
1131 /*
1132  * this gets pages into the page cache and locks them down, it also properly
1133  * waits for data=ordered extents to finish before allowing the pages to be
1134  * modified.
1135  */
1136 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1137                          struct page **pages, size_t num_pages,
1138                          loff_t pos, unsigned long first_index,
1139                          size_t write_bytes, bool force_uptodate)
1140 {
1141         struct extent_state *cached_state = NULL;
1142         int i;
1143         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1144         struct inode *inode = fdentry(file)->d_inode;
1145         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1146         int err = 0;
1147         int faili = 0;
1148         u64 start_pos;
1149         u64 last_pos;
1150
1151         start_pos = pos & ~((u64)root->sectorsize - 1);
1152         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1153
1154 again:
1155         for (i = 0; i < num_pages; i++) {
1156                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1157                                                mask | __GFP_WRITE);
1158                 if (!pages[i]) {
1159                         faili = i - 1;
1160                         err = -ENOMEM;
1161                         goto fail;
1162                 }
1163
1164                 if (i == 0)
1165                         err = prepare_uptodate_page(pages[i], pos,
1166                                                     force_uptodate);
1167                 if (i == num_pages - 1)
1168                         err = prepare_uptodate_page(pages[i],
1169                                                     pos + write_bytes, false);
1170                 if (err) {
1171                         page_cache_release(pages[i]);
1172                         faili = i - 1;
1173                         goto fail;
1174                 }
1175                 wait_on_page_writeback(pages[i]);
1176         }
1177         err = 0;
1178         if (start_pos < inode->i_size) {
1179                 struct btrfs_ordered_extent *ordered;
1180                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1181                                  start_pos, last_pos - 1, 0, &cached_state);
1182                 ordered = btrfs_lookup_first_ordered_extent(inode,
1183                                                             last_pos - 1);
1184                 if (ordered &&
1185                     ordered->file_offset + ordered->len > start_pos &&
1186                     ordered->file_offset < last_pos) {
1187                         btrfs_put_ordered_extent(ordered);
1188                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1189                                              start_pos, last_pos - 1,
1190                                              &cached_state, GFP_NOFS);
1191                         for (i = 0; i < num_pages; i++) {
1192                                 unlock_page(pages[i]);
1193                                 page_cache_release(pages[i]);
1194                         }
1195                         btrfs_wait_ordered_range(inode, start_pos,
1196                                                  last_pos - start_pos);
1197                         goto again;
1198                 }
1199                 if (ordered)
1200                         btrfs_put_ordered_extent(ordered);
1201
1202                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1203                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1204                                   EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
1205                                   GFP_NOFS);
1206                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1207                                      start_pos, last_pos - 1, &cached_state,
1208                                      GFP_NOFS);
1209         }
1210         for (i = 0; i < num_pages; i++) {
1211                 if (clear_page_dirty_for_io(pages[i]))
1212                         account_page_redirty(pages[i]);
1213                 set_page_extent_mapped(pages[i]);
1214                 WARN_ON(!PageLocked(pages[i]));
1215         }
1216         return 0;
1217 fail:
1218         while (faili >= 0) {
1219                 unlock_page(pages[faili]);
1220                 page_cache_release(pages[faili]);
1221                 faili--;
1222         }
1223         return err;
1224
1225 }
1226
1227 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1228                                                struct iov_iter *i,
1229                                                loff_t pos)
1230 {
1231         struct inode *inode = fdentry(file)->d_inode;
1232         struct btrfs_root *root = BTRFS_I(inode)->root;
1233         struct page **pages = NULL;
1234         unsigned long first_index;
1235         size_t num_written = 0;
1236         int nrptrs;
1237         int ret = 0;
1238         bool force_page_uptodate = false;
1239
1240         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1241                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1242                      (sizeof(struct page *)));
1243         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1244         nrptrs = max(nrptrs, 8);
1245         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1246         if (!pages)
1247                 return -ENOMEM;
1248
1249         first_index = pos >> PAGE_CACHE_SHIFT;
1250
1251         while (iov_iter_count(i) > 0) {
1252                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1253                 size_t write_bytes = min(iov_iter_count(i),
1254                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1255                                          offset);
1256                 size_t num_pages = (write_bytes + offset +
1257                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1258                 size_t dirty_pages;
1259                 size_t copied;
1260
1261                 WARN_ON(num_pages > nrptrs);
1262
1263                 /*
1264                  * Fault pages before locking them in prepare_pages
1265                  * to avoid recursive lock
1266                  */
1267                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1268                         ret = -EFAULT;
1269                         break;
1270                 }
1271
1272                 ret = btrfs_delalloc_reserve_space(inode,
1273                                         num_pages << PAGE_CACHE_SHIFT);
1274                 if (ret)
1275                         break;
1276
1277                 /*
1278                  * This is going to setup the pages array with the number of
1279                  * pages we want, so we don't really need to worry about the
1280                  * contents of pages from loop to loop
1281                  */
1282                 ret = prepare_pages(root, file, pages, num_pages,
1283                                     pos, first_index, write_bytes,
1284                                     force_page_uptodate);
1285                 if (ret) {
1286                         btrfs_delalloc_release_space(inode,
1287                                         num_pages << PAGE_CACHE_SHIFT);
1288                         break;
1289                 }
1290
1291                 copied = btrfs_copy_from_user(pos, num_pages,
1292                                            write_bytes, pages, i);
1293
1294                 /*
1295                  * if we have trouble faulting in the pages, fall
1296                  * back to one page at a time
1297                  */
1298                 if (copied < write_bytes)
1299                         nrptrs = 1;
1300
1301                 if (copied == 0) {
1302                         force_page_uptodate = true;
1303                         dirty_pages = 0;
1304                 } else {
1305                         force_page_uptodate = false;
1306                         dirty_pages = (copied + offset +
1307                                        PAGE_CACHE_SIZE - 1) >>
1308                                        PAGE_CACHE_SHIFT;
1309                 }
1310
1311                 /*
1312                  * If we had a short copy we need to release the excess delaloc
1313                  * bytes we reserved.  We need to increment outstanding_extents
1314                  * because btrfs_delalloc_release_space will decrement it, but
1315                  * we still have an outstanding extent for the chunk we actually
1316                  * managed to copy.
1317                  */
1318                 if (num_pages > dirty_pages) {
1319                         if (copied > 0) {
1320                                 spin_lock(&BTRFS_I(inode)->lock);
1321                                 BTRFS_I(inode)->outstanding_extents++;
1322                                 spin_unlock(&BTRFS_I(inode)->lock);
1323                         }
1324                         btrfs_delalloc_release_space(inode,
1325                                         (num_pages - dirty_pages) <<
1326                                         PAGE_CACHE_SHIFT);
1327                 }
1328
1329                 if (copied > 0) {
1330                         ret = btrfs_dirty_pages(root, inode, pages,
1331                                                 dirty_pages, pos, copied,
1332                                                 NULL);
1333                         if (ret) {
1334                                 btrfs_delalloc_release_space(inode,
1335                                         dirty_pages << PAGE_CACHE_SHIFT);
1336                                 btrfs_drop_pages(pages, num_pages);
1337                                 break;
1338                         }
1339                 }
1340
1341                 btrfs_drop_pages(pages, num_pages);
1342
1343                 cond_resched();
1344
1345                 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1346                                                    dirty_pages);
1347                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1348                         btrfs_btree_balance_dirty(root, 1);
1349
1350                 pos += copied;
1351                 num_written += copied;
1352         }
1353
1354         kfree(pages);
1355
1356         return num_written ? num_written : ret;
1357 }
1358
1359 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1360                                     const struct iovec *iov,
1361                                     unsigned long nr_segs, loff_t pos,
1362                                     loff_t *ppos, size_t count, size_t ocount)
1363 {
1364         struct file *file = iocb->ki_filp;
1365         struct iov_iter i;
1366         ssize_t written;
1367         ssize_t written_buffered;
1368         loff_t endbyte;
1369         int err;
1370
1371         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1372                                             count, ocount);
1373
1374         if (written < 0 || written == count)
1375                 return written;
1376
1377         pos += written;
1378         count -= written;
1379         iov_iter_init(&i, iov, nr_segs, count, written);
1380         written_buffered = __btrfs_buffered_write(file, &i, pos);
1381         if (written_buffered < 0) {
1382                 err = written_buffered;
1383                 goto out;
1384         }
1385         endbyte = pos + written_buffered - 1;
1386         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1387         if (err)
1388                 goto out;
1389         written += written_buffered;
1390         *ppos = pos + written_buffered;
1391         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1392                                  endbyte >> PAGE_CACHE_SHIFT);
1393 out:
1394         return written ? written : err;
1395 }
1396
1397 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1398                                     const struct iovec *iov,
1399                                     unsigned long nr_segs, loff_t pos)
1400 {
1401         struct file *file = iocb->ki_filp;
1402         struct inode *inode = fdentry(file)->d_inode;
1403         struct btrfs_root *root = BTRFS_I(inode)->root;
1404         loff_t *ppos = &iocb->ki_pos;
1405         u64 start_pos;
1406         ssize_t num_written = 0;
1407         ssize_t err = 0;
1408         size_t count, ocount;
1409
1410         sb_start_write(inode->i_sb);
1411
1412         mutex_lock(&inode->i_mutex);
1413
1414         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1415         if (err) {
1416                 mutex_unlock(&inode->i_mutex);
1417                 goto out;
1418         }
1419         count = ocount;
1420
1421         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1422         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1423         if (err) {
1424                 mutex_unlock(&inode->i_mutex);
1425                 goto out;
1426         }
1427
1428         if (count == 0) {
1429                 mutex_unlock(&inode->i_mutex);
1430                 goto out;
1431         }
1432
1433         err = file_remove_suid(file);
1434         if (err) {
1435                 mutex_unlock(&inode->i_mutex);
1436                 goto out;
1437         }
1438
1439         /*
1440          * If BTRFS flips readonly due to some impossible error
1441          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1442          * although we have opened a file as writable, we have
1443          * to stop this write operation to ensure FS consistency.
1444          */
1445         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1446                 mutex_unlock(&inode->i_mutex);
1447                 err = -EROFS;
1448                 goto out;
1449         }
1450
1451         err = file_update_time(file);
1452         if (err) {
1453                 mutex_unlock(&inode->i_mutex);
1454                 goto out;
1455         }
1456
1457         start_pos = round_down(pos, root->sectorsize);
1458         if (start_pos > i_size_read(inode)) {
1459                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1460                 if (err) {
1461                         mutex_unlock(&inode->i_mutex);
1462                         goto out;
1463                 }
1464         }
1465
1466         if (unlikely(file->f_flags & O_DIRECT)) {
1467                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1468                                                    pos, ppos, count, ocount);
1469         } else {
1470                 struct iov_iter i;
1471
1472                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1473
1474                 num_written = __btrfs_buffered_write(file, &i, pos);
1475                 if (num_written > 0)
1476                         *ppos = pos + num_written;
1477         }
1478
1479         mutex_unlock(&inode->i_mutex);
1480
1481         /*
1482          * we want to make sure fsync finds this change
1483          * but we haven't joined a transaction running right now.
1484          *
1485          * Later on, someone is sure to update the inode and get the
1486          * real transid recorded.
1487          *
1488          * We set last_trans now to the fs_info generation + 1,
1489          * this will either be one more than the running transaction
1490          * or the generation used for the next transaction if there isn't
1491          * one running right now.
1492          */
1493         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1494         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1495                 err = generic_write_sync(file, pos, num_written);
1496                 if (err < 0 && num_written > 0)
1497                         num_written = err;
1498         }
1499 out:
1500         sb_end_write(inode->i_sb);
1501         current->backing_dev_info = NULL;
1502         return num_written ? num_written : err;
1503 }
1504
1505 int btrfs_release_file(struct inode *inode, struct file *filp)
1506 {
1507         /*
1508          * ordered_data_close is set by settattr when we are about to truncate
1509          * a file from a non-zero size to a zero size.  This tries to
1510          * flush down new bytes that may have been written if the
1511          * application were using truncate to replace a file in place.
1512          */
1513         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1514                                &BTRFS_I(inode)->runtime_flags)) {
1515                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1516                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1517                         filemap_flush(inode->i_mapping);
1518         }
1519         if (filp->private_data)
1520                 btrfs_ioctl_trans_end(filp);
1521         return 0;
1522 }
1523
1524 /*
1525  * fsync call for both files and directories.  This logs the inode into
1526  * the tree log instead of forcing full commits whenever possible.
1527  *
1528  * It needs to call filemap_fdatawait so that all ordered extent updates are
1529  * in the metadata btree are up to date for copying to the log.
1530  *
1531  * It drops the inode mutex before doing the tree log commit.  This is an
1532  * important optimization for directories because holding the mutex prevents
1533  * new operations on the dir while we write to disk.
1534  */
1535 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1536 {
1537         struct dentry *dentry = file->f_path.dentry;
1538         struct inode *inode = dentry->d_inode;
1539         struct btrfs_root *root = BTRFS_I(inode)->root;
1540         int ret = 0;
1541         struct btrfs_trans_handle *trans;
1542
1543         trace_btrfs_sync_file(file, datasync);
1544
1545         mutex_lock(&inode->i_mutex);
1546
1547         /*
1548          * we wait first, since the writeback may change the inode, also wait
1549          * ordered range does a filemape_write_and_wait_range which is why we
1550          * don't do it above like other file systems.
1551          */
1552         root->log_batch++;
1553         btrfs_wait_ordered_range(inode, start, end);
1554         root->log_batch++;
1555
1556         /*
1557          * check the transaction that last modified this inode
1558          * and see if its already been committed
1559          */
1560         if (!BTRFS_I(inode)->last_trans) {
1561                 mutex_unlock(&inode->i_mutex);
1562                 goto out;
1563         }
1564
1565         /*
1566          * if the last transaction that changed this file was before
1567          * the current transaction, we can bail out now without any
1568          * syncing
1569          */
1570         smp_mb();
1571         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1572             BTRFS_I(inode)->last_trans <=
1573             root->fs_info->last_trans_committed) {
1574                 BTRFS_I(inode)->last_trans = 0;
1575
1576                 /*
1577                  * We'v had everything committed since the last time we were
1578                  * modified so clear this flag in case it was set for whatever
1579                  * reason, it's no longer relevant.
1580                  */
1581                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1582                           &BTRFS_I(inode)->runtime_flags);
1583                 mutex_unlock(&inode->i_mutex);
1584                 goto out;
1585         }
1586
1587         /*
1588          * ok we haven't committed the transaction yet, lets do a commit
1589          */
1590         if (file->private_data)
1591                 btrfs_ioctl_trans_end(file);
1592
1593         trans = btrfs_start_transaction(root, 0);
1594         if (IS_ERR(trans)) {
1595                 ret = PTR_ERR(trans);
1596                 mutex_unlock(&inode->i_mutex);
1597                 goto out;
1598         }
1599
1600         ret = btrfs_log_dentry_safe(trans, root, dentry);
1601         if (ret < 0) {
1602                 mutex_unlock(&inode->i_mutex);
1603                 goto out;
1604         }
1605
1606         /* we've logged all the items and now have a consistent
1607          * version of the file in the log.  It is possible that
1608          * someone will come in and modify the file, but that's
1609          * fine because the log is consistent on disk, and we
1610          * have references to all of the file's extents
1611          *
1612          * It is possible that someone will come in and log the
1613          * file again, but that will end up using the synchronization
1614          * inside btrfs_sync_log to keep things safe.
1615          */
1616         mutex_unlock(&inode->i_mutex);
1617
1618         if (ret != BTRFS_NO_LOG_SYNC) {
1619                 if (ret > 0) {
1620                         ret = btrfs_commit_transaction(trans, root);
1621                 } else {
1622                         ret = btrfs_sync_log(trans, root);
1623                         if (ret == 0)
1624                                 ret = btrfs_end_transaction(trans, root);
1625                         else
1626                                 ret = btrfs_commit_transaction(trans, root);
1627                 }
1628         } else {
1629                 ret = btrfs_end_transaction(trans, root);
1630         }
1631 out:
1632         return ret > 0 ? -EIO : ret;
1633 }
1634
1635 static const struct vm_operations_struct btrfs_file_vm_ops = {
1636         .fault          = filemap_fault,
1637         .page_mkwrite   = btrfs_page_mkwrite,
1638 };
1639
1640 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1641 {
1642         struct address_space *mapping = filp->f_mapping;
1643
1644         if (!mapping->a_ops->readpage)
1645                 return -ENOEXEC;
1646
1647         file_accessed(filp);
1648         vma->vm_ops = &btrfs_file_vm_ops;
1649         vma->vm_flags |= VM_CAN_NONLINEAR;
1650
1651         return 0;
1652 }
1653
1654 static long btrfs_fallocate(struct file *file, int mode,
1655                             loff_t offset, loff_t len)
1656 {
1657         struct inode *inode = file->f_path.dentry->d_inode;
1658         struct extent_state *cached_state = NULL;
1659         u64 cur_offset;
1660         u64 last_byte;
1661         u64 alloc_start;
1662         u64 alloc_end;
1663         u64 alloc_hint = 0;
1664         u64 locked_end;
1665         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1666         struct extent_map *em;
1667         int ret;
1668
1669         alloc_start = offset & ~mask;
1670         alloc_end =  (offset + len + mask) & ~mask;
1671
1672         /* We only support the FALLOC_FL_KEEP_SIZE mode */
1673         if (mode & ~FALLOC_FL_KEEP_SIZE)
1674                 return -EOPNOTSUPP;
1675
1676         /*
1677          * Make sure we have enough space before we do the
1678          * allocation.
1679          */
1680         ret = btrfs_check_data_free_space(inode, len);
1681         if (ret)
1682                 return ret;
1683
1684         /*
1685          * wait for ordered IO before we have any locks.  We'll loop again
1686          * below with the locks held.
1687          */
1688         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1689
1690         mutex_lock(&inode->i_mutex);
1691         ret = inode_newsize_ok(inode, alloc_end);
1692         if (ret)
1693                 goto out;
1694
1695         if (alloc_start > inode->i_size) {
1696                 ret = btrfs_cont_expand(inode, i_size_read(inode),
1697                                         alloc_start);
1698                 if (ret)
1699                         goto out;
1700         }
1701
1702         locked_end = alloc_end - 1;
1703         while (1) {
1704                 struct btrfs_ordered_extent *ordered;
1705
1706                 /* the extent lock is ordered inside the running
1707                  * transaction
1708                  */
1709                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1710                                  locked_end, 0, &cached_state);
1711                 ordered = btrfs_lookup_first_ordered_extent(inode,
1712                                                             alloc_end - 1);
1713                 if (ordered &&
1714                     ordered->file_offset + ordered->len > alloc_start &&
1715                     ordered->file_offset < alloc_end) {
1716                         btrfs_put_ordered_extent(ordered);
1717                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1718                                              alloc_start, locked_end,
1719                                              &cached_state, GFP_NOFS);
1720                         /*
1721                          * we can't wait on the range with the transaction
1722                          * running or with the extent lock held
1723                          */
1724                         btrfs_wait_ordered_range(inode, alloc_start,
1725                                                  alloc_end - alloc_start);
1726                 } else {
1727                         if (ordered)
1728                                 btrfs_put_ordered_extent(ordered);
1729                         break;
1730                 }
1731         }
1732
1733         cur_offset = alloc_start;
1734         while (1) {
1735                 u64 actual_end;
1736
1737                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1738                                       alloc_end - cur_offset, 0);
1739                 if (IS_ERR_OR_NULL(em)) {
1740                         if (!em)
1741                                 ret = -ENOMEM;
1742                         else
1743                                 ret = PTR_ERR(em);
1744                         break;
1745                 }
1746                 last_byte = min(extent_map_end(em), alloc_end);
1747                 actual_end = min_t(u64, extent_map_end(em), offset + len);
1748                 last_byte = (last_byte + mask) & ~mask;
1749
1750                 if (em->block_start == EXTENT_MAP_HOLE ||
1751                     (cur_offset >= inode->i_size &&
1752                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1753                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1754                                                         last_byte - cur_offset,
1755                                                         1 << inode->i_blkbits,
1756                                                         offset + len,
1757                                                         &alloc_hint);
1758
1759                         if (ret < 0) {
1760                                 free_extent_map(em);
1761                                 break;
1762                         }
1763                 } else if (actual_end > inode->i_size &&
1764                            !(mode & FALLOC_FL_KEEP_SIZE)) {
1765                         /*
1766                          * We didn't need to allocate any more space, but we
1767                          * still extended the size of the file so we need to
1768                          * update i_size.
1769                          */
1770                         inode->i_ctime = CURRENT_TIME;
1771                         i_size_write(inode, actual_end);
1772                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
1773                 }
1774                 free_extent_map(em);
1775
1776                 cur_offset = last_byte;
1777                 if (cur_offset >= alloc_end) {
1778                         ret = 0;
1779                         break;
1780                 }
1781         }
1782         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1783                              &cached_state, GFP_NOFS);
1784 out:
1785         mutex_unlock(&inode->i_mutex);
1786         /* Let go of our reservation. */
1787         btrfs_free_reserved_data_space(inode, len);
1788         return ret;
1789 }
1790
1791 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1792 {
1793         struct btrfs_root *root = BTRFS_I(inode)->root;
1794         struct extent_map *em;
1795         struct extent_state *cached_state = NULL;
1796         u64 lockstart = *offset;
1797         u64 lockend = i_size_read(inode);
1798         u64 start = *offset;
1799         u64 orig_start = *offset;
1800         u64 len = i_size_read(inode);
1801         u64 last_end = 0;
1802         int ret = 0;
1803
1804         lockend = max_t(u64, root->sectorsize, lockend);
1805         if (lockend <= lockstart)
1806                 lockend = lockstart + root->sectorsize;
1807
1808         len = lockend - lockstart + 1;
1809
1810         len = max_t(u64, len, root->sectorsize);
1811         if (inode->i_size == 0)
1812                 return -ENXIO;
1813
1814         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
1815                          &cached_state);
1816
1817         /*
1818          * Delalloc is such a pain.  If we have a hole and we have pending
1819          * delalloc for a portion of the hole we will get back a hole that
1820          * exists for the entire range since it hasn't been actually written
1821          * yet.  So to take care of this case we need to look for an extent just
1822          * before the position we want in case there is outstanding delalloc
1823          * going on here.
1824          */
1825         if (origin == SEEK_HOLE && start != 0) {
1826                 if (start <= root->sectorsize)
1827                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
1828                                                      root->sectorsize, 0);
1829                 else
1830                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
1831                                                      start - root->sectorsize,
1832                                                      root->sectorsize, 0);
1833                 if (IS_ERR(em)) {
1834                         ret = PTR_ERR(em);
1835                         goto out;
1836                 }
1837                 last_end = em->start + em->len;
1838                 if (em->block_start == EXTENT_MAP_DELALLOC)
1839                         last_end = min_t(u64, last_end, inode->i_size);
1840                 free_extent_map(em);
1841         }
1842
1843         while (1) {
1844                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
1845                 if (IS_ERR(em)) {
1846                         ret = PTR_ERR(em);
1847                         break;
1848                 }
1849
1850                 if (em->block_start == EXTENT_MAP_HOLE) {
1851                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1852                                 if (last_end <= orig_start) {
1853                                         free_extent_map(em);
1854                                         ret = -ENXIO;
1855                                         break;
1856                                 }
1857                         }
1858
1859                         if (origin == SEEK_HOLE) {
1860                                 *offset = start;
1861                                 free_extent_map(em);
1862                                 break;
1863                         }
1864                 } else {
1865                         if (origin == SEEK_DATA) {
1866                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
1867                                         if (start >= inode->i_size) {
1868                                                 free_extent_map(em);
1869                                                 ret = -ENXIO;
1870                                                 break;
1871                                         }
1872                                 }
1873
1874                                 *offset = start;
1875                                 free_extent_map(em);
1876                                 break;
1877                         }
1878                 }
1879
1880                 start = em->start + em->len;
1881                 last_end = em->start + em->len;
1882
1883                 if (em->block_start == EXTENT_MAP_DELALLOC)
1884                         last_end = min_t(u64, last_end, inode->i_size);
1885
1886                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1887                         free_extent_map(em);
1888                         ret = -ENXIO;
1889                         break;
1890                 }
1891                 free_extent_map(em);
1892                 cond_resched();
1893         }
1894         if (!ret)
1895                 *offset = min(*offset, inode->i_size);
1896 out:
1897         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1898                              &cached_state, GFP_NOFS);
1899         return ret;
1900 }
1901
1902 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
1903 {
1904         struct inode *inode = file->f_mapping->host;
1905         int ret;
1906
1907         mutex_lock(&inode->i_mutex);
1908         switch (origin) {
1909         case SEEK_END:
1910         case SEEK_CUR:
1911                 offset = generic_file_llseek(file, offset, origin);
1912                 goto out;
1913         case SEEK_DATA:
1914         case SEEK_HOLE:
1915                 if (offset >= i_size_read(inode)) {
1916                         mutex_unlock(&inode->i_mutex);
1917                         return -ENXIO;
1918                 }
1919
1920                 ret = find_desired_extent(inode, &offset, origin);
1921                 if (ret) {
1922                         mutex_unlock(&inode->i_mutex);
1923                         return ret;
1924                 }
1925         }
1926
1927         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
1928                 offset = -EINVAL;
1929                 goto out;
1930         }
1931         if (offset > inode->i_sb->s_maxbytes) {
1932                 offset = -EINVAL;
1933                 goto out;
1934         }
1935
1936         /* Special lock needed here? */
1937         if (offset != file->f_pos) {
1938                 file->f_pos = offset;
1939                 file->f_version = 0;
1940         }
1941 out:
1942         mutex_unlock(&inode->i_mutex);
1943         return offset;
1944 }
1945
1946 const struct file_operations btrfs_file_operations = {
1947         .llseek         = btrfs_file_llseek,
1948         .read           = do_sync_read,
1949         .write          = do_sync_write,
1950         .aio_read       = generic_file_aio_read,
1951         .splice_read    = generic_file_splice_read,
1952         .aio_write      = btrfs_file_aio_write,
1953         .mmap           = btrfs_file_mmap,
1954         .open           = generic_file_open,
1955         .release        = btrfs_release_file,
1956         .fsync          = btrfs_sync_file,
1957         .fallocate      = btrfs_fallocate,
1958         .unlocked_ioctl = btrfs_ioctl,
1959 #ifdef CONFIG_COMPAT
1960         .compat_ioctl   = btrfs_ioctl,
1961 #endif
1962 };