]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/inode.c
btrfs: let writepage_end_io_hook return void
[karo-tx-linux.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/compat.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/xattr.h>
36 #include <linux/posix_acl.h>
37 #include <linux/falloc.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
40 #include <linux/mount.h>
41 #include <linux/btrfs.h>
42 #include <linux/blkdev.h>
43 #include <linux/posix_acl_xattr.h>
44 #include <linux/uio.h>
45 #include "ctree.h"
46 #include "disk-io.h"
47 #include "transaction.h"
48 #include "btrfs_inode.h"
49 #include "print-tree.h"
50 #include "ordered-data.h"
51 #include "xattr.h"
52 #include "tree-log.h"
53 #include "volumes.h"
54 #include "compression.h"
55 #include "locking.h"
56 #include "free-space-cache.h"
57 #include "inode-map.h"
58 #include "backref.h"
59 #include "hash.h"
60 #include "props.h"
61 #include "qgroup.h"
62 #include "dedupe.h"
63
64 struct btrfs_iget_args {
65         struct btrfs_key *location;
66         struct btrfs_root *root;
67 };
68
69 struct btrfs_dio_data {
70         u64 outstanding_extents;
71         u64 reserve;
72         u64 unsubmitted_oe_range_start;
73         u64 unsubmitted_oe_range_end;
74         int overwrite;
75 };
76
77 static const struct inode_operations btrfs_dir_inode_operations;
78 static const struct inode_operations btrfs_symlink_inode_operations;
79 static const struct inode_operations btrfs_dir_ro_inode_operations;
80 static const struct inode_operations btrfs_special_inode_operations;
81 static const struct inode_operations btrfs_file_inode_operations;
82 static const struct address_space_operations btrfs_aops;
83 static const struct address_space_operations btrfs_symlink_aops;
84 static const struct file_operations btrfs_dir_file_operations;
85 static const struct extent_io_ops btrfs_extent_io_ops;
86
87 static struct kmem_cache *btrfs_inode_cachep;
88 struct kmem_cache *btrfs_trans_handle_cachep;
89 struct kmem_cache *btrfs_transaction_cachep;
90 struct kmem_cache *btrfs_path_cachep;
91 struct kmem_cache *btrfs_free_space_cachep;
92
93 #define S_SHIFT 12
94 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
95         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
96         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
97         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
98         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
99         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
100         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
101         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
102 };
103
104 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
105 static int btrfs_truncate(struct inode *inode);
106 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
107 static noinline int cow_file_range(struct inode *inode,
108                                    struct page *locked_page,
109                                    u64 start, u64 end, u64 delalloc_end,
110                                    int *page_started, unsigned long *nr_written,
111                                    int unlock, struct btrfs_dedupe_hash *hash);
112 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
113                                        u64 orig_start, u64 block_start,
114                                        u64 block_len, u64 orig_block_len,
115                                        u64 ram_bytes, int compress_type,
116                                        int type);
117
118 static int btrfs_dirty_inode(struct inode *inode);
119
120 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
121 void btrfs_test_inode_set_ops(struct inode *inode)
122 {
123         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
124 }
125 #endif
126
127 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
128                                      struct inode *inode,  struct inode *dir,
129                                      const struct qstr *qstr)
130 {
131         int err;
132
133         err = btrfs_init_acl(trans, inode, dir);
134         if (!err)
135                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
136         return err;
137 }
138
139 /*
140  * this does all the hard work for inserting an inline extent into
141  * the btree.  The caller should have done a btrfs_drop_extents so that
142  * no overlapping inline items exist in the btree
143  */
144 static int insert_inline_extent(struct btrfs_trans_handle *trans,
145                                 struct btrfs_path *path, int extent_inserted,
146                                 struct btrfs_root *root, struct inode *inode,
147                                 u64 start, size_t size, size_t compressed_size,
148                                 int compress_type,
149                                 struct page **compressed_pages)
150 {
151         struct extent_buffer *leaf;
152         struct page *page = NULL;
153         char *kaddr;
154         unsigned long ptr;
155         struct btrfs_file_extent_item *ei;
156         int err = 0;
157         int ret;
158         size_t cur_size = size;
159         unsigned long offset;
160
161         if (compressed_size && compressed_pages)
162                 cur_size = compressed_size;
163
164         inode_add_bytes(inode, size);
165
166         if (!extent_inserted) {
167                 struct btrfs_key key;
168                 size_t datasize;
169
170                 key.objectid = btrfs_ino(BTRFS_I(inode));
171                 key.offset = start;
172                 key.type = BTRFS_EXTENT_DATA_KEY;
173
174                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
175                 path->leave_spinning = 1;
176                 ret = btrfs_insert_empty_item(trans, root, path, &key,
177                                               datasize);
178                 if (ret) {
179                         err = ret;
180                         goto fail;
181                 }
182         }
183         leaf = path->nodes[0];
184         ei = btrfs_item_ptr(leaf, path->slots[0],
185                             struct btrfs_file_extent_item);
186         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
187         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
188         btrfs_set_file_extent_encryption(leaf, ei, 0);
189         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
190         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
191         ptr = btrfs_file_extent_inline_start(ei);
192
193         if (compress_type != BTRFS_COMPRESS_NONE) {
194                 struct page *cpage;
195                 int i = 0;
196                 while (compressed_size > 0) {
197                         cpage = compressed_pages[i];
198                         cur_size = min_t(unsigned long, compressed_size,
199                                        PAGE_SIZE);
200
201                         kaddr = kmap_atomic(cpage);
202                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
203                         kunmap_atomic(kaddr);
204
205                         i++;
206                         ptr += cur_size;
207                         compressed_size -= cur_size;
208                 }
209                 btrfs_set_file_extent_compression(leaf, ei,
210                                                   compress_type);
211         } else {
212                 page = find_get_page(inode->i_mapping,
213                                      start >> PAGE_SHIFT);
214                 btrfs_set_file_extent_compression(leaf, ei, 0);
215                 kaddr = kmap_atomic(page);
216                 offset = start & (PAGE_SIZE - 1);
217                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
218                 kunmap_atomic(kaddr);
219                 put_page(page);
220         }
221         btrfs_mark_buffer_dirty(leaf);
222         btrfs_release_path(path);
223
224         /*
225          * we're an inline extent, so nobody can
226          * extend the file past i_size without locking
227          * a page we already have locked.
228          *
229          * We must do any isize and inode updates
230          * before we unlock the pages.  Otherwise we
231          * could end up racing with unlink.
232          */
233         BTRFS_I(inode)->disk_i_size = inode->i_size;
234         ret = btrfs_update_inode(trans, root, inode);
235
236         return ret;
237 fail:
238         return err;
239 }
240
241
242 /*
243  * conditionally insert an inline extent into the file.  This
244  * does the checks required to make sure the data is small enough
245  * to fit as an inline extent.
246  */
247 static noinline int cow_file_range_inline(struct btrfs_root *root,
248                                           struct inode *inode, u64 start,
249                                           u64 end, size_t compressed_size,
250                                           int compress_type,
251                                           struct page **compressed_pages)
252 {
253         struct btrfs_fs_info *fs_info = root->fs_info;
254         struct btrfs_trans_handle *trans;
255         u64 isize = i_size_read(inode);
256         u64 actual_end = min(end + 1, isize);
257         u64 inline_len = actual_end - start;
258         u64 aligned_end = ALIGN(end, fs_info->sectorsize);
259         u64 data_len = inline_len;
260         int ret;
261         struct btrfs_path *path;
262         int extent_inserted = 0;
263         u32 extent_item_size;
264
265         if (compressed_size)
266                 data_len = compressed_size;
267
268         if (start > 0 ||
269             actual_end > fs_info->sectorsize ||
270             data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
271             (!compressed_size &&
272             (actual_end & (fs_info->sectorsize - 1)) == 0) ||
273             end + 1 < isize ||
274             data_len > fs_info->max_inline) {
275                 return 1;
276         }
277
278         path = btrfs_alloc_path();
279         if (!path)
280                 return -ENOMEM;
281
282         trans = btrfs_join_transaction(root);
283         if (IS_ERR(trans)) {
284                 btrfs_free_path(path);
285                 return PTR_ERR(trans);
286         }
287         trans->block_rsv = &fs_info->delalloc_block_rsv;
288
289         if (compressed_size && compressed_pages)
290                 extent_item_size = btrfs_file_extent_calc_inline_size(
291                    compressed_size);
292         else
293                 extent_item_size = btrfs_file_extent_calc_inline_size(
294                     inline_len);
295
296         ret = __btrfs_drop_extents(trans, root, inode, path,
297                                    start, aligned_end, NULL,
298                                    1, 1, extent_item_size, &extent_inserted);
299         if (ret) {
300                 btrfs_abort_transaction(trans, ret);
301                 goto out;
302         }
303
304         if (isize > actual_end)
305                 inline_len = min_t(u64, isize, actual_end);
306         ret = insert_inline_extent(trans, path, extent_inserted,
307                                    root, inode, start,
308                                    inline_len, compressed_size,
309                                    compress_type, compressed_pages);
310         if (ret && ret != -ENOSPC) {
311                 btrfs_abort_transaction(trans, ret);
312                 goto out;
313         } else if (ret == -ENOSPC) {
314                 ret = 1;
315                 goto out;
316         }
317
318         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
319         btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start);
320         btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
321 out:
322         /*
323          * Don't forget to free the reserved space, as for inlined extent
324          * it won't count as data extent, free them directly here.
325          * And at reserve time, it's always aligned to page size, so
326          * just free one page here.
327          */
328         btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
329         btrfs_free_path(path);
330         btrfs_end_transaction(trans);
331         return ret;
332 }
333
334 struct async_extent {
335         u64 start;
336         u64 ram_size;
337         u64 compressed_size;
338         struct page **pages;
339         unsigned long nr_pages;
340         int compress_type;
341         struct list_head list;
342 };
343
344 struct async_cow {
345         struct inode *inode;
346         struct btrfs_root *root;
347         struct page *locked_page;
348         u64 start;
349         u64 end;
350         struct list_head extents;
351         struct btrfs_work work;
352 };
353
354 static noinline int add_async_extent(struct async_cow *cow,
355                                      u64 start, u64 ram_size,
356                                      u64 compressed_size,
357                                      struct page **pages,
358                                      unsigned long nr_pages,
359                                      int compress_type)
360 {
361         struct async_extent *async_extent;
362
363         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
364         BUG_ON(!async_extent); /* -ENOMEM */
365         async_extent->start = start;
366         async_extent->ram_size = ram_size;
367         async_extent->compressed_size = compressed_size;
368         async_extent->pages = pages;
369         async_extent->nr_pages = nr_pages;
370         async_extent->compress_type = compress_type;
371         list_add_tail(&async_extent->list, &cow->extents);
372         return 0;
373 }
374
375 static inline int inode_need_compress(struct inode *inode)
376 {
377         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
378
379         /* force compress */
380         if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
381                 return 1;
382         /* bad compression ratios */
383         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
384                 return 0;
385         if (btrfs_test_opt(fs_info, COMPRESS) ||
386             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
387             BTRFS_I(inode)->force_compress)
388                 return 1;
389         return 0;
390 }
391
392 static inline void inode_should_defrag(struct btrfs_inode *inode,
393                 u64 start, u64 end, u64 num_bytes, u64 small_write)
394 {
395         /* If this is a small write inside eof, kick off a defrag */
396         if (num_bytes < small_write &&
397             (start > 0 || end + 1 < inode->disk_i_size))
398                 btrfs_add_inode_defrag(NULL, inode);
399 }
400
401 /*
402  * we create compressed extents in two phases.  The first
403  * phase compresses a range of pages that have already been
404  * locked (both pages and state bits are locked).
405  *
406  * This is done inside an ordered work queue, and the compression
407  * is spread across many cpus.  The actual IO submission is step
408  * two, and the ordered work queue takes care of making sure that
409  * happens in the same order things were put onto the queue by
410  * writepages and friends.
411  *
412  * If this code finds it can't get good compression, it puts an
413  * entry onto the work queue to write the uncompressed bytes.  This
414  * makes sure that both compressed inodes and uncompressed inodes
415  * are written in the same order that the flusher thread sent them
416  * down.
417  */
418 static noinline void compress_file_range(struct inode *inode,
419                                         struct page *locked_page,
420                                         u64 start, u64 end,
421                                         struct async_cow *async_cow,
422                                         int *num_added)
423 {
424         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
425         struct btrfs_root *root = BTRFS_I(inode)->root;
426         u64 num_bytes;
427         u64 blocksize = fs_info->sectorsize;
428         u64 actual_end;
429         u64 isize = i_size_read(inode);
430         int ret = 0;
431         struct page **pages = NULL;
432         unsigned long nr_pages;
433         unsigned long total_compressed = 0;
434         unsigned long total_in = 0;
435         int i;
436         int will_compress;
437         int compress_type = fs_info->compress_type;
438         int redirty = 0;
439
440         inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
441                         SZ_16K);
442
443         actual_end = min_t(u64, isize, end + 1);
444 again:
445         will_compress = 0;
446         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
447         BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
448         nr_pages = min_t(unsigned long, nr_pages,
449                         BTRFS_MAX_COMPRESSED / PAGE_SIZE);
450
451         /*
452          * we don't want to send crud past the end of i_size through
453          * compression, that's just a waste of CPU time.  So, if the
454          * end of the file is before the start of our current
455          * requested range of bytes, we bail out to the uncompressed
456          * cleanup code that can deal with all of this.
457          *
458          * It isn't really the fastest way to fix things, but this is a
459          * very uncommon corner.
460          */
461         if (actual_end <= start)
462                 goto cleanup_and_bail_uncompressed;
463
464         total_compressed = actual_end - start;
465
466         /*
467          * skip compression for a small file range(<=blocksize) that
468          * isn't an inline extent, since it doesn't save disk space at all.
469          */
470         if (total_compressed <= blocksize &&
471            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
472                 goto cleanup_and_bail_uncompressed;
473
474         total_compressed = min_t(unsigned long, total_compressed,
475                         BTRFS_MAX_UNCOMPRESSED);
476         num_bytes = ALIGN(end - start + 1, blocksize);
477         num_bytes = max(blocksize,  num_bytes);
478         total_in = 0;
479         ret = 0;
480
481         /*
482          * we do compression for mount -o compress and when the
483          * inode has not been flagged as nocompress.  This flag can
484          * change at any time if we discover bad compression ratios.
485          */
486         if (inode_need_compress(inode)) {
487                 WARN_ON(pages);
488                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
489                 if (!pages) {
490                         /* just bail out to the uncompressed code */
491                         goto cont;
492                 }
493
494                 if (BTRFS_I(inode)->force_compress)
495                         compress_type = BTRFS_I(inode)->force_compress;
496
497                 /*
498                  * we need to call clear_page_dirty_for_io on each
499                  * page in the range.  Otherwise applications with the file
500                  * mmap'd can wander in and change the page contents while
501                  * we are compressing them.
502                  *
503                  * If the compression fails for any reason, we set the pages
504                  * dirty again later on.
505                  */
506                 extent_range_clear_dirty_for_io(inode, start, end);
507                 redirty = 1;
508                 ret = btrfs_compress_pages(compress_type,
509                                            inode->i_mapping, start,
510                                            pages,
511                                            &nr_pages,
512                                            &total_in,
513                                            &total_compressed);
514
515                 if (!ret) {
516                         unsigned long offset = total_compressed &
517                                 (PAGE_SIZE - 1);
518                         struct page *page = pages[nr_pages - 1];
519                         char *kaddr;
520
521                         /* zero the tail end of the last page, we might be
522                          * sending it down to disk
523                          */
524                         if (offset) {
525                                 kaddr = kmap_atomic(page);
526                                 memset(kaddr + offset, 0,
527                                        PAGE_SIZE - offset);
528                                 kunmap_atomic(kaddr);
529                         }
530                         will_compress = 1;
531                 }
532         }
533 cont:
534         if (start == 0) {
535                 /* lets try to make an inline extent */
536                 if (ret || total_in < (actual_end - start)) {
537                         /* we didn't compress the entire range, try
538                          * to make an uncompressed inline extent.
539                          */
540                         ret = cow_file_range_inline(root, inode, start, end,
541                                             0, BTRFS_COMPRESS_NONE, NULL);
542                 } else {
543                         /* try making a compressed inline extent */
544                         ret = cow_file_range_inline(root, inode, start, end,
545                                                     total_compressed,
546                                                     compress_type, pages);
547                 }
548                 if (ret <= 0) {
549                         unsigned long clear_flags = EXTENT_DELALLOC |
550                                 EXTENT_DEFRAG;
551                         unsigned long page_error_op;
552
553                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
554                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
555
556                         /*
557                          * inline extent creation worked or returned error,
558                          * we don't need to create any more async work items.
559                          * Unlock and free up our temp pages.
560                          */
561                         extent_clear_unlock_delalloc(inode, start, end, end,
562                                                      NULL, clear_flags,
563                                                      PAGE_UNLOCK |
564                                                      PAGE_CLEAR_DIRTY |
565                                                      PAGE_SET_WRITEBACK |
566                                                      page_error_op |
567                                                      PAGE_END_WRITEBACK);
568                         btrfs_free_reserved_data_space_noquota(inode, start,
569                                                 end - start + 1);
570                         goto free_pages_out;
571                 }
572         }
573
574         if (will_compress) {
575                 /*
576                  * we aren't doing an inline extent round the compressed size
577                  * up to a block size boundary so the allocator does sane
578                  * things
579                  */
580                 total_compressed = ALIGN(total_compressed, blocksize);
581
582                 /*
583                  * one last check to make sure the compression is really a
584                  * win, compare the page count read with the blocks on disk
585                  */
586                 total_in = ALIGN(total_in, PAGE_SIZE);
587                 if (total_compressed >= total_in) {
588                         will_compress = 0;
589                 } else {
590                         num_bytes = total_in;
591                         *num_added += 1;
592
593                         /*
594                          * The async work queues will take care of doing actual
595                          * allocation on disk for these compressed pages, and
596                          * will submit them to the elevator.
597                          */
598                         add_async_extent(async_cow, start, num_bytes,
599                                         total_compressed, pages, nr_pages,
600                                         compress_type);
601
602                         if (start + num_bytes < end) {
603                                 start += num_bytes;
604                                 pages = NULL;
605                                 cond_resched();
606                                 goto again;
607                         }
608                         return;
609                 }
610         }
611         if (pages) {
612                 /*
613                  * the compression code ran but failed to make things smaller,
614                  * free any pages it allocated and our page pointer array
615                  */
616                 for (i = 0; i < nr_pages; i++) {
617                         WARN_ON(pages[i]->mapping);
618                         put_page(pages[i]);
619                 }
620                 kfree(pages);
621                 pages = NULL;
622                 total_compressed = 0;
623                 nr_pages = 0;
624
625                 /* flag the file so we don't compress in the future */
626                 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
627                     !(BTRFS_I(inode)->force_compress)) {
628                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
629                 }
630         }
631 cleanup_and_bail_uncompressed:
632         /*
633          * No compression, but we still need to write the pages in the file
634          * we've been given so far.  redirty the locked page if it corresponds
635          * to our extent and set things up for the async work queue to run
636          * cow_file_range to do the normal delalloc dance.
637          */
638         if (page_offset(locked_page) >= start &&
639             page_offset(locked_page) <= end)
640                 __set_page_dirty_nobuffers(locked_page);
641                 /* unlocked later on in the async handlers */
642
643         if (redirty)
644                 extent_range_redirty_for_io(inode, start, end);
645         add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
646                          BTRFS_COMPRESS_NONE);
647         *num_added += 1;
648
649         return;
650
651 free_pages_out:
652         for (i = 0; i < nr_pages; i++) {
653                 WARN_ON(pages[i]->mapping);
654                 put_page(pages[i]);
655         }
656         kfree(pages);
657 }
658
659 static void free_async_extent_pages(struct async_extent *async_extent)
660 {
661         int i;
662
663         if (!async_extent->pages)
664                 return;
665
666         for (i = 0; i < async_extent->nr_pages; i++) {
667                 WARN_ON(async_extent->pages[i]->mapping);
668                 put_page(async_extent->pages[i]);
669         }
670         kfree(async_extent->pages);
671         async_extent->nr_pages = 0;
672         async_extent->pages = NULL;
673 }
674
675 /*
676  * phase two of compressed writeback.  This is the ordered portion
677  * of the code, which only gets called in the order the work was
678  * queued.  We walk all the async extents created by compress_file_range
679  * and send them down to the disk.
680  */
681 static noinline void submit_compressed_extents(struct inode *inode,
682                                               struct async_cow *async_cow)
683 {
684         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
685         struct async_extent *async_extent;
686         u64 alloc_hint = 0;
687         struct btrfs_key ins;
688         struct extent_map *em;
689         struct btrfs_root *root = BTRFS_I(inode)->root;
690         struct extent_io_tree *io_tree;
691         int ret = 0;
692
693 again:
694         while (!list_empty(&async_cow->extents)) {
695                 async_extent = list_entry(async_cow->extents.next,
696                                           struct async_extent, list);
697                 list_del(&async_extent->list);
698
699                 io_tree = &BTRFS_I(inode)->io_tree;
700
701 retry:
702                 /* did the compression code fall back to uncompressed IO? */
703                 if (!async_extent->pages) {
704                         int page_started = 0;
705                         unsigned long nr_written = 0;
706
707                         lock_extent(io_tree, async_extent->start,
708                                          async_extent->start +
709                                          async_extent->ram_size - 1);
710
711                         /* allocate blocks */
712                         ret = cow_file_range(inode, async_cow->locked_page,
713                                              async_extent->start,
714                                              async_extent->start +
715                                              async_extent->ram_size - 1,
716                                              async_extent->start +
717                                              async_extent->ram_size - 1,
718                                              &page_started, &nr_written, 0,
719                                              NULL);
720
721                         /* JDM XXX */
722
723                         /*
724                          * if page_started, cow_file_range inserted an
725                          * inline extent and took care of all the unlocking
726                          * and IO for us.  Otherwise, we need to submit
727                          * all those pages down to the drive.
728                          */
729                         if (!page_started && !ret)
730                                 extent_write_locked_range(io_tree,
731                                                   inode, async_extent->start,
732                                                   async_extent->start +
733                                                   async_extent->ram_size - 1,
734                                                   btrfs_get_extent,
735                                                   WB_SYNC_ALL);
736                         else if (ret)
737                                 unlock_page(async_cow->locked_page);
738                         kfree(async_extent);
739                         cond_resched();
740                         continue;
741                 }
742
743                 lock_extent(io_tree, async_extent->start,
744                             async_extent->start + async_extent->ram_size - 1);
745
746                 ret = btrfs_reserve_extent(root, async_extent->ram_size,
747                                            async_extent->compressed_size,
748                                            async_extent->compressed_size,
749                                            0, alloc_hint, &ins, 1, 1);
750                 if (ret) {
751                         free_async_extent_pages(async_extent);
752
753                         if (ret == -ENOSPC) {
754                                 unlock_extent(io_tree, async_extent->start,
755                                               async_extent->start +
756                                               async_extent->ram_size - 1);
757
758                                 /*
759                                  * we need to redirty the pages if we decide to
760                                  * fallback to uncompressed IO, otherwise we
761                                  * will not submit these pages down to lower
762                                  * layers.
763                                  */
764                                 extent_range_redirty_for_io(inode,
765                                                 async_extent->start,
766                                                 async_extent->start +
767                                                 async_extent->ram_size - 1);
768
769                                 goto retry;
770                         }
771                         goto out_free;
772                 }
773                 /*
774                  * here we're doing allocation and writeback of the
775                  * compressed pages
776                  */
777                 em = create_io_em(inode, async_extent->start,
778                                   async_extent->ram_size, /* len */
779                                   async_extent->start, /* orig_start */
780                                   ins.objectid, /* block_start */
781                                   ins.offset, /* block_len */
782                                   ins.offset, /* orig_block_len */
783                                   async_extent->ram_size, /* ram_bytes */
784                                   async_extent->compress_type,
785                                   BTRFS_ORDERED_COMPRESSED);
786                 if (IS_ERR(em))
787                         /* ret value is not necessary due to void function */
788                         goto out_free_reserve;
789                 free_extent_map(em);
790
791                 ret = btrfs_add_ordered_extent_compress(inode,
792                                                 async_extent->start,
793                                                 ins.objectid,
794                                                 async_extent->ram_size,
795                                                 ins.offset,
796                                                 BTRFS_ORDERED_COMPRESSED,
797                                                 async_extent->compress_type);
798                 if (ret) {
799                         btrfs_drop_extent_cache(BTRFS_I(inode),
800                                                 async_extent->start,
801                                                 async_extent->start +
802                                                 async_extent->ram_size - 1, 0);
803                         goto out_free_reserve;
804                 }
805                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
806
807                 /*
808                  * clear dirty, set writeback and unlock the pages.
809                  */
810                 extent_clear_unlock_delalloc(inode, async_extent->start,
811                                 async_extent->start +
812                                 async_extent->ram_size - 1,
813                                 async_extent->start +
814                                 async_extent->ram_size - 1,
815                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
816                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
817                                 PAGE_SET_WRITEBACK);
818                 ret = btrfs_submit_compressed_write(inode,
819                                     async_extent->start,
820                                     async_extent->ram_size,
821                                     ins.objectid,
822                                     ins.offset, async_extent->pages,
823                                     async_extent->nr_pages);
824                 if (ret) {
825                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
826                         struct page *p = async_extent->pages[0];
827                         const u64 start = async_extent->start;
828                         const u64 end = start + async_extent->ram_size - 1;
829
830                         p->mapping = inode->i_mapping;
831                         tree->ops->writepage_end_io_hook(p, start, end,
832                                                          NULL, 0);
833                         p->mapping = NULL;
834                         extent_clear_unlock_delalloc(inode, start, end, end,
835                                                      NULL, 0,
836                                                      PAGE_END_WRITEBACK |
837                                                      PAGE_SET_ERROR);
838                         free_async_extent_pages(async_extent);
839                 }
840                 alloc_hint = ins.objectid + ins.offset;
841                 kfree(async_extent);
842                 cond_resched();
843         }
844         return;
845 out_free_reserve:
846         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
847         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
848 out_free:
849         extent_clear_unlock_delalloc(inode, async_extent->start,
850                                      async_extent->start +
851                                      async_extent->ram_size - 1,
852                                      async_extent->start +
853                                      async_extent->ram_size - 1,
854                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
855                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
856                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
857                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
858                                      PAGE_SET_ERROR);
859         free_async_extent_pages(async_extent);
860         kfree(async_extent);
861         goto again;
862 }
863
864 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
865                                       u64 num_bytes)
866 {
867         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
868         struct extent_map *em;
869         u64 alloc_hint = 0;
870
871         read_lock(&em_tree->lock);
872         em = search_extent_mapping(em_tree, start, num_bytes);
873         if (em) {
874                 /*
875                  * if block start isn't an actual block number then find the
876                  * first block in this inode and use that as a hint.  If that
877                  * block is also bogus then just don't worry about it.
878                  */
879                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
880                         free_extent_map(em);
881                         em = search_extent_mapping(em_tree, 0, 0);
882                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
883                                 alloc_hint = em->block_start;
884                         if (em)
885                                 free_extent_map(em);
886                 } else {
887                         alloc_hint = em->block_start;
888                         free_extent_map(em);
889                 }
890         }
891         read_unlock(&em_tree->lock);
892
893         return alloc_hint;
894 }
895
896 /*
897  * when extent_io.c finds a delayed allocation range in the file,
898  * the call backs end up in this code.  The basic idea is to
899  * allocate extents on disk for the range, and create ordered data structs
900  * in ram to track those extents.
901  *
902  * locked_page is the page that writepage had locked already.  We use
903  * it to make sure we don't do extra locks or unlocks.
904  *
905  * *page_started is set to one if we unlock locked_page and do everything
906  * required to start IO on it.  It may be clean and already done with
907  * IO when we return.
908  */
909 static noinline int cow_file_range(struct inode *inode,
910                                    struct page *locked_page,
911                                    u64 start, u64 end, u64 delalloc_end,
912                                    int *page_started, unsigned long *nr_written,
913                                    int unlock, struct btrfs_dedupe_hash *hash)
914 {
915         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
916         struct btrfs_root *root = BTRFS_I(inode)->root;
917         u64 alloc_hint = 0;
918         u64 num_bytes;
919         unsigned long ram_size;
920         u64 disk_num_bytes;
921         u64 cur_alloc_size;
922         u64 blocksize = fs_info->sectorsize;
923         struct btrfs_key ins;
924         struct extent_map *em;
925         int ret = 0;
926
927         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
928                 WARN_ON_ONCE(1);
929                 ret = -EINVAL;
930                 goto out_unlock;
931         }
932
933         num_bytes = ALIGN(end - start + 1, blocksize);
934         num_bytes = max(blocksize,  num_bytes);
935         disk_num_bytes = num_bytes;
936
937         inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
938
939         if (start == 0) {
940                 /* lets try to make an inline extent */
941                 ret = cow_file_range_inline(root, inode, start, end, 0,
942                                         BTRFS_COMPRESS_NONE, NULL);
943                 if (ret == 0) {
944                         extent_clear_unlock_delalloc(inode, start, end,
945                                      delalloc_end, NULL,
946                                      EXTENT_LOCKED | EXTENT_DELALLOC |
947                                      EXTENT_DEFRAG, PAGE_UNLOCK |
948                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
949                                      PAGE_END_WRITEBACK);
950                         btrfs_free_reserved_data_space_noquota(inode, start,
951                                                 end - start + 1);
952                         *nr_written = *nr_written +
953                              (end - start + PAGE_SIZE) / PAGE_SIZE;
954                         *page_started = 1;
955                         goto out;
956                 } else if (ret < 0) {
957                         goto out_unlock;
958                 }
959         }
960
961         BUG_ON(disk_num_bytes >
962                btrfs_super_total_bytes(fs_info->super_copy));
963
964         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
965         btrfs_drop_extent_cache(BTRFS_I(inode), start,
966                         start + num_bytes - 1, 0);
967
968         while (disk_num_bytes > 0) {
969                 unsigned long op;
970
971                 cur_alloc_size = disk_num_bytes;
972                 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
973                                            fs_info->sectorsize, 0, alloc_hint,
974                                            &ins, 1, 1);
975                 if (ret < 0)
976                         goto out_unlock;
977
978                 ram_size = ins.offset;
979                 em = create_io_em(inode, start, ins.offset, /* len */
980                                   start, /* orig_start */
981                                   ins.objectid, /* block_start */
982                                   ins.offset, /* block_len */
983                                   ins.offset, /* orig_block_len */
984                                   ram_size, /* ram_bytes */
985                                   BTRFS_COMPRESS_NONE, /* compress_type */
986                                   BTRFS_ORDERED_REGULAR /* type */);
987                 if (IS_ERR(em))
988                         goto out_reserve;
989                 free_extent_map(em);
990
991                 cur_alloc_size = ins.offset;
992                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
993                                                ram_size, cur_alloc_size, 0);
994                 if (ret)
995                         goto out_drop_extent_cache;
996
997                 if (root->root_key.objectid ==
998                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
999                         ret = btrfs_reloc_clone_csums(inode, start,
1000                                                       cur_alloc_size);
1001                         if (ret)
1002                                 goto out_drop_extent_cache;
1003                 }
1004
1005                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1006
1007                 if (disk_num_bytes < cur_alloc_size)
1008                         break;
1009
1010                 /* we're not doing compressed IO, don't unlock the first
1011                  * page (which the caller expects to stay locked), don't
1012                  * clear any dirty bits and don't set any writeback bits
1013                  *
1014                  * Do set the Private2 bit so we know this page was properly
1015                  * setup for writepage
1016                  */
1017                 op = unlock ? PAGE_UNLOCK : 0;
1018                 op |= PAGE_SET_PRIVATE2;
1019
1020                 extent_clear_unlock_delalloc(inode, start,
1021                                              start + ram_size - 1,
1022                                              delalloc_end, locked_page,
1023                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1024                                              op);
1025                 disk_num_bytes -= cur_alloc_size;
1026                 num_bytes -= cur_alloc_size;
1027                 alloc_hint = ins.objectid + ins.offset;
1028                 start += cur_alloc_size;
1029         }
1030 out:
1031         return ret;
1032
1033 out_drop_extent_cache:
1034         btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1035 out_reserve:
1036         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1037         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1038 out_unlock:
1039         extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1040                                      locked_page,
1041                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1042                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1043                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1044                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1045         goto out;
1046 }
1047
1048 /*
1049  * work queue call back to started compression on a file and pages
1050  */
1051 static noinline void async_cow_start(struct btrfs_work *work)
1052 {
1053         struct async_cow *async_cow;
1054         int num_added = 0;
1055         async_cow = container_of(work, struct async_cow, work);
1056
1057         compress_file_range(async_cow->inode, async_cow->locked_page,
1058                             async_cow->start, async_cow->end, async_cow,
1059                             &num_added);
1060         if (num_added == 0) {
1061                 btrfs_add_delayed_iput(async_cow->inode);
1062                 async_cow->inode = NULL;
1063         }
1064 }
1065
1066 /*
1067  * work queue call back to submit previously compressed pages
1068  */
1069 static noinline void async_cow_submit(struct btrfs_work *work)
1070 {
1071         struct btrfs_fs_info *fs_info;
1072         struct async_cow *async_cow;
1073         struct btrfs_root *root;
1074         unsigned long nr_pages;
1075
1076         async_cow = container_of(work, struct async_cow, work);
1077
1078         root = async_cow->root;
1079         fs_info = root->fs_info;
1080         nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1081                 PAGE_SHIFT;
1082
1083         /*
1084          * atomic_sub_return implies a barrier for waitqueue_active
1085          */
1086         if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1087             5 * SZ_1M &&
1088             waitqueue_active(&fs_info->async_submit_wait))
1089                 wake_up(&fs_info->async_submit_wait);
1090
1091         if (async_cow->inode)
1092                 submit_compressed_extents(async_cow->inode, async_cow);
1093 }
1094
1095 static noinline void async_cow_free(struct btrfs_work *work)
1096 {
1097         struct async_cow *async_cow;
1098         async_cow = container_of(work, struct async_cow, work);
1099         if (async_cow->inode)
1100                 btrfs_add_delayed_iput(async_cow->inode);
1101         kfree(async_cow);
1102 }
1103
1104 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1105                                 u64 start, u64 end, int *page_started,
1106                                 unsigned long *nr_written)
1107 {
1108         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1109         struct async_cow *async_cow;
1110         struct btrfs_root *root = BTRFS_I(inode)->root;
1111         unsigned long nr_pages;
1112         u64 cur_end;
1113
1114         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1115                          1, 0, NULL, GFP_NOFS);
1116         while (start < end) {
1117                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1118                 BUG_ON(!async_cow); /* -ENOMEM */
1119                 async_cow->inode = igrab(inode);
1120                 async_cow->root = root;
1121                 async_cow->locked_page = locked_page;
1122                 async_cow->start = start;
1123
1124                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1125                     !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1126                         cur_end = end;
1127                 else
1128                         cur_end = min(end, start + SZ_512K - 1);
1129
1130                 async_cow->end = cur_end;
1131                 INIT_LIST_HEAD(&async_cow->extents);
1132
1133                 btrfs_init_work(&async_cow->work,
1134                                 btrfs_delalloc_helper,
1135                                 async_cow_start, async_cow_submit,
1136                                 async_cow_free);
1137
1138                 nr_pages = (cur_end - start + PAGE_SIZE) >>
1139                         PAGE_SHIFT;
1140                 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1141
1142                 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1143
1144                 while (atomic_read(&fs_info->async_submit_draining) &&
1145                        atomic_read(&fs_info->async_delalloc_pages)) {
1146                         wait_event(fs_info->async_submit_wait,
1147                                    (atomic_read(&fs_info->async_delalloc_pages) ==
1148                                     0));
1149                 }
1150
1151                 *nr_written += nr_pages;
1152                 start = cur_end + 1;
1153         }
1154         *page_started = 1;
1155         return 0;
1156 }
1157
1158 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1159                                         u64 bytenr, u64 num_bytes)
1160 {
1161         int ret;
1162         struct btrfs_ordered_sum *sums;
1163         LIST_HEAD(list);
1164
1165         ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1166                                        bytenr + num_bytes - 1, &list, 0);
1167         if (ret == 0 && list_empty(&list))
1168                 return 0;
1169
1170         while (!list_empty(&list)) {
1171                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1172                 list_del(&sums->list);
1173                 kfree(sums);
1174         }
1175         return 1;
1176 }
1177
1178 /*
1179  * when nowcow writeback call back.  This checks for snapshots or COW copies
1180  * of the extents that exist in the file, and COWs the file as required.
1181  *
1182  * If no cow copies or snapshots exist, we write directly to the existing
1183  * blocks on disk
1184  */
1185 static noinline int run_delalloc_nocow(struct inode *inode,
1186                                        struct page *locked_page,
1187                               u64 start, u64 end, int *page_started, int force,
1188                               unsigned long *nr_written)
1189 {
1190         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1191         struct btrfs_root *root = BTRFS_I(inode)->root;
1192         struct extent_buffer *leaf;
1193         struct btrfs_path *path;
1194         struct btrfs_file_extent_item *fi;
1195         struct btrfs_key found_key;
1196         struct extent_map *em;
1197         u64 cow_start;
1198         u64 cur_offset;
1199         u64 extent_end;
1200         u64 extent_offset;
1201         u64 disk_bytenr;
1202         u64 num_bytes;
1203         u64 disk_num_bytes;
1204         u64 ram_bytes;
1205         int extent_type;
1206         int ret, err;
1207         int type;
1208         int nocow;
1209         int check_prev = 1;
1210         bool nolock;
1211         u64 ino = btrfs_ino(BTRFS_I(inode));
1212
1213         path = btrfs_alloc_path();
1214         if (!path) {
1215                 extent_clear_unlock_delalloc(inode, start, end, end,
1216                                              locked_page,
1217                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1218                                              EXTENT_DO_ACCOUNTING |
1219                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1220                                              PAGE_CLEAR_DIRTY |
1221                                              PAGE_SET_WRITEBACK |
1222                                              PAGE_END_WRITEBACK);
1223                 return -ENOMEM;
1224         }
1225
1226         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1227
1228         cow_start = (u64)-1;
1229         cur_offset = start;
1230         while (1) {
1231                 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1232                                                cur_offset, 0);
1233                 if (ret < 0)
1234                         goto error;
1235                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1236                         leaf = path->nodes[0];
1237                         btrfs_item_key_to_cpu(leaf, &found_key,
1238                                               path->slots[0] - 1);
1239                         if (found_key.objectid == ino &&
1240                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1241                                 path->slots[0]--;
1242                 }
1243                 check_prev = 0;
1244 next_slot:
1245                 leaf = path->nodes[0];
1246                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1247                         ret = btrfs_next_leaf(root, path);
1248                         if (ret < 0)
1249                                 goto error;
1250                         if (ret > 0)
1251                                 break;
1252                         leaf = path->nodes[0];
1253                 }
1254
1255                 nocow = 0;
1256                 disk_bytenr = 0;
1257                 num_bytes = 0;
1258                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1259
1260                 if (found_key.objectid > ino)
1261                         break;
1262                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1263                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1264                         path->slots[0]++;
1265                         goto next_slot;
1266                 }
1267                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1268                     found_key.offset > end)
1269                         break;
1270
1271                 if (found_key.offset > cur_offset) {
1272                         extent_end = found_key.offset;
1273                         extent_type = 0;
1274                         goto out_check;
1275                 }
1276
1277                 fi = btrfs_item_ptr(leaf, path->slots[0],
1278                                     struct btrfs_file_extent_item);
1279                 extent_type = btrfs_file_extent_type(leaf, fi);
1280
1281                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1282                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1283                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1284                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1285                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1286                         extent_end = found_key.offset +
1287                                 btrfs_file_extent_num_bytes(leaf, fi);
1288                         disk_num_bytes =
1289                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1290                         if (extent_end <= start) {
1291                                 path->slots[0]++;
1292                                 goto next_slot;
1293                         }
1294                         if (disk_bytenr == 0)
1295                                 goto out_check;
1296                         if (btrfs_file_extent_compression(leaf, fi) ||
1297                             btrfs_file_extent_encryption(leaf, fi) ||
1298                             btrfs_file_extent_other_encoding(leaf, fi))
1299                                 goto out_check;
1300                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1301                                 goto out_check;
1302                         if (btrfs_extent_readonly(fs_info, disk_bytenr))
1303                                 goto out_check;
1304                         if (btrfs_cross_ref_exist(root, ino,
1305                                                   found_key.offset -
1306                                                   extent_offset, disk_bytenr))
1307                                 goto out_check;
1308                         disk_bytenr += extent_offset;
1309                         disk_bytenr += cur_offset - found_key.offset;
1310                         num_bytes = min(end + 1, extent_end) - cur_offset;
1311                         /*
1312                          * if there are pending snapshots for this root,
1313                          * we fall into common COW way.
1314                          */
1315                         if (!nolock) {
1316                                 err = btrfs_start_write_no_snapshoting(root);
1317                                 if (!err)
1318                                         goto out_check;
1319                         }
1320                         /*
1321                          * force cow if csum exists in the range.
1322                          * this ensure that csum for a given extent are
1323                          * either valid or do not exist.
1324                          */
1325                         if (csum_exist_in_range(fs_info, disk_bytenr,
1326                                                 num_bytes))
1327                                 goto out_check;
1328                         if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1329                                 goto out_check;
1330                         nocow = 1;
1331                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1332                         extent_end = found_key.offset +
1333                                 btrfs_file_extent_inline_len(leaf,
1334                                                      path->slots[0], fi);
1335                         extent_end = ALIGN(extent_end,
1336                                            fs_info->sectorsize);
1337                 } else {
1338                         BUG_ON(1);
1339                 }
1340 out_check:
1341                 if (extent_end <= start) {
1342                         path->slots[0]++;
1343                         if (!nolock && nocow)
1344                                 btrfs_end_write_no_snapshoting(root);
1345                         if (nocow)
1346                                 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1347                         goto next_slot;
1348                 }
1349                 if (!nocow) {
1350                         if (cow_start == (u64)-1)
1351                                 cow_start = cur_offset;
1352                         cur_offset = extent_end;
1353                         if (cur_offset > end)
1354                                 break;
1355                         path->slots[0]++;
1356                         goto next_slot;
1357                 }
1358
1359                 btrfs_release_path(path);
1360                 if (cow_start != (u64)-1) {
1361                         ret = cow_file_range(inode, locked_page,
1362                                              cow_start, found_key.offset - 1,
1363                                              end, page_started, nr_written, 1,
1364                                              NULL);
1365                         if (ret) {
1366                                 if (!nolock && nocow)
1367                                         btrfs_end_write_no_snapshoting(root);
1368                                 if (nocow)
1369                                         btrfs_dec_nocow_writers(fs_info,
1370                                                                 disk_bytenr);
1371                                 goto error;
1372                         }
1373                         cow_start = (u64)-1;
1374                 }
1375
1376                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1377                         u64 orig_start = found_key.offset - extent_offset;
1378
1379                         em = create_io_em(inode, cur_offset, num_bytes,
1380                                           orig_start,
1381                                           disk_bytenr, /* block_start */
1382                                           num_bytes, /* block_len */
1383                                           disk_num_bytes, /* orig_block_len */
1384                                           ram_bytes, BTRFS_COMPRESS_NONE,
1385                                           BTRFS_ORDERED_PREALLOC);
1386                         if (IS_ERR(em)) {
1387                                 if (!nolock && nocow)
1388                                         btrfs_end_write_no_snapshoting(root);
1389                                 if (nocow)
1390                                         btrfs_dec_nocow_writers(fs_info,
1391                                                                 disk_bytenr);
1392                                 ret = PTR_ERR(em);
1393                                 goto error;
1394                         }
1395                         free_extent_map(em);
1396                 }
1397
1398                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1399                         type = BTRFS_ORDERED_PREALLOC;
1400                 } else {
1401                         type = BTRFS_ORDERED_NOCOW;
1402                 }
1403
1404                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1405                                                num_bytes, num_bytes, type);
1406                 if (nocow)
1407                         btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1408                 BUG_ON(ret); /* -ENOMEM */
1409
1410                 if (root->root_key.objectid ==
1411                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1412                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1413                                                       num_bytes);
1414                         if (ret) {
1415                                 if (!nolock && nocow)
1416                                         btrfs_end_write_no_snapshoting(root);
1417                                 goto error;
1418                         }
1419                 }
1420
1421                 extent_clear_unlock_delalloc(inode, cur_offset,
1422                                              cur_offset + num_bytes - 1, end,
1423                                              locked_page, EXTENT_LOCKED |
1424                                              EXTENT_DELALLOC |
1425                                              EXTENT_CLEAR_DATA_RESV,
1426                                              PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1427
1428                 if (!nolock && nocow)
1429                         btrfs_end_write_no_snapshoting(root);
1430                 cur_offset = extent_end;
1431                 if (cur_offset > end)
1432                         break;
1433         }
1434         btrfs_release_path(path);
1435
1436         if (cur_offset <= end && cow_start == (u64)-1) {
1437                 cow_start = cur_offset;
1438                 cur_offset = end;
1439         }
1440
1441         if (cow_start != (u64)-1) {
1442                 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1443                                      page_started, nr_written, 1, NULL);
1444                 if (ret)
1445                         goto error;
1446         }
1447
1448 error:
1449         if (ret && cur_offset < end)
1450                 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1451                                              locked_page, EXTENT_LOCKED |
1452                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1453                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1454                                              PAGE_CLEAR_DIRTY |
1455                                              PAGE_SET_WRITEBACK |
1456                                              PAGE_END_WRITEBACK);
1457         btrfs_free_path(path);
1458         return ret;
1459 }
1460
1461 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1462 {
1463
1464         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1465             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1466                 return 0;
1467
1468         /*
1469          * @defrag_bytes is a hint value, no spinlock held here,
1470          * if is not zero, it means the file is defragging.
1471          * Force cow if given extent needs to be defragged.
1472          */
1473         if (BTRFS_I(inode)->defrag_bytes &&
1474             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1475                            EXTENT_DEFRAG, 0, NULL))
1476                 return 1;
1477
1478         return 0;
1479 }
1480
1481 /*
1482  * extent_io.c call back to do delayed allocation processing
1483  */
1484 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1485                               u64 start, u64 end, int *page_started,
1486                               unsigned long *nr_written)
1487 {
1488         int ret;
1489         int force_cow = need_force_cow(inode, start, end);
1490
1491         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1492                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1493                                          page_started, 1, nr_written);
1494         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1495                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1496                                          page_started, 0, nr_written);
1497         } else if (!inode_need_compress(inode)) {
1498                 ret = cow_file_range(inode, locked_page, start, end, end,
1499                                       page_started, nr_written, 1, NULL);
1500         } else {
1501                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1502                         &BTRFS_I(inode)->runtime_flags);
1503                 ret = cow_file_range_async(inode, locked_page, start, end,
1504                                            page_started, nr_written);
1505         }
1506         return ret;
1507 }
1508
1509 static void btrfs_split_extent_hook(struct inode *inode,
1510                                     struct extent_state *orig, u64 split)
1511 {
1512         u64 size;
1513
1514         /* not delalloc, ignore it */
1515         if (!(orig->state & EXTENT_DELALLOC))
1516                 return;
1517
1518         size = orig->end - orig->start + 1;
1519         if (size > BTRFS_MAX_EXTENT_SIZE) {
1520                 u32 num_extents;
1521                 u64 new_size;
1522
1523                 /*
1524                  * See the explanation in btrfs_merge_extent_hook, the same
1525                  * applies here, just in reverse.
1526                  */
1527                 new_size = orig->end - split + 1;
1528                 num_extents = count_max_extents(new_size);
1529                 new_size = split - orig->start;
1530                 num_extents += count_max_extents(new_size);
1531                 if (count_max_extents(size) >= num_extents)
1532                         return;
1533         }
1534
1535         spin_lock(&BTRFS_I(inode)->lock);
1536         BTRFS_I(inode)->outstanding_extents++;
1537         spin_unlock(&BTRFS_I(inode)->lock);
1538 }
1539
1540 /*
1541  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1542  * extents so we can keep track of new extents that are just merged onto old
1543  * extents, such as when we are doing sequential writes, so we can properly
1544  * account for the metadata space we'll need.
1545  */
1546 static void btrfs_merge_extent_hook(struct inode *inode,
1547                                     struct extent_state *new,
1548                                     struct extent_state *other)
1549 {
1550         u64 new_size, old_size;
1551         u32 num_extents;
1552
1553         /* not delalloc, ignore it */
1554         if (!(other->state & EXTENT_DELALLOC))
1555                 return;
1556
1557         if (new->start > other->start)
1558                 new_size = new->end - other->start + 1;
1559         else
1560                 new_size = other->end - new->start + 1;
1561
1562         /* we're not bigger than the max, unreserve the space and go */
1563         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1564                 spin_lock(&BTRFS_I(inode)->lock);
1565                 BTRFS_I(inode)->outstanding_extents--;
1566                 spin_unlock(&BTRFS_I(inode)->lock);
1567                 return;
1568         }
1569
1570         /*
1571          * We have to add up either side to figure out how many extents were
1572          * accounted for before we merged into one big extent.  If the number of
1573          * extents we accounted for is <= the amount we need for the new range
1574          * then we can return, otherwise drop.  Think of it like this
1575          *
1576          * [ 4k][MAX_SIZE]
1577          *
1578          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1579          * need 2 outstanding extents, on one side we have 1 and the other side
1580          * we have 1 so they are == and we can return.  But in this case
1581          *
1582          * [MAX_SIZE+4k][MAX_SIZE+4k]
1583          *
1584          * Each range on their own accounts for 2 extents, but merged together
1585          * they are only 3 extents worth of accounting, so we need to drop in
1586          * this case.
1587          */
1588         old_size = other->end - other->start + 1;
1589         num_extents = count_max_extents(old_size);
1590         old_size = new->end - new->start + 1;
1591         num_extents += count_max_extents(old_size);
1592         if (count_max_extents(new_size) >= num_extents)
1593                 return;
1594
1595         spin_lock(&BTRFS_I(inode)->lock);
1596         BTRFS_I(inode)->outstanding_extents--;
1597         spin_unlock(&BTRFS_I(inode)->lock);
1598 }
1599
1600 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1601                                       struct inode *inode)
1602 {
1603         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1604
1605         spin_lock(&root->delalloc_lock);
1606         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1607                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1608                               &root->delalloc_inodes);
1609                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1610                         &BTRFS_I(inode)->runtime_flags);
1611                 root->nr_delalloc_inodes++;
1612                 if (root->nr_delalloc_inodes == 1) {
1613                         spin_lock(&fs_info->delalloc_root_lock);
1614                         BUG_ON(!list_empty(&root->delalloc_root));
1615                         list_add_tail(&root->delalloc_root,
1616                                       &fs_info->delalloc_roots);
1617                         spin_unlock(&fs_info->delalloc_root_lock);
1618                 }
1619         }
1620         spin_unlock(&root->delalloc_lock);
1621 }
1622
1623 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1624                                      struct btrfs_inode *inode)
1625 {
1626         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1627
1628         spin_lock(&root->delalloc_lock);
1629         if (!list_empty(&inode->delalloc_inodes)) {
1630                 list_del_init(&inode->delalloc_inodes);
1631                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1632                           &inode->runtime_flags);
1633                 root->nr_delalloc_inodes--;
1634                 if (!root->nr_delalloc_inodes) {
1635                         spin_lock(&fs_info->delalloc_root_lock);
1636                         BUG_ON(list_empty(&root->delalloc_root));
1637                         list_del_init(&root->delalloc_root);
1638                         spin_unlock(&fs_info->delalloc_root_lock);
1639                 }
1640         }
1641         spin_unlock(&root->delalloc_lock);
1642 }
1643
1644 /*
1645  * extent_io.c set_bit_hook, used to track delayed allocation
1646  * bytes in this file, and to maintain the list of inodes that
1647  * have pending delalloc work to be done.
1648  */
1649 static void btrfs_set_bit_hook(struct inode *inode,
1650                                struct extent_state *state, unsigned *bits)
1651 {
1652
1653         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1654
1655         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1656                 WARN_ON(1);
1657         /*
1658          * set_bit and clear bit hooks normally require _irqsave/restore
1659          * but in this case, we are only testing for the DELALLOC
1660          * bit, which is only set or cleared with irqs on
1661          */
1662         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1663                 struct btrfs_root *root = BTRFS_I(inode)->root;
1664                 u64 len = state->end + 1 - state->start;
1665                 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1666
1667                 if (*bits & EXTENT_FIRST_DELALLOC) {
1668                         *bits &= ~EXTENT_FIRST_DELALLOC;
1669                 } else {
1670                         spin_lock(&BTRFS_I(inode)->lock);
1671                         BTRFS_I(inode)->outstanding_extents++;
1672                         spin_unlock(&BTRFS_I(inode)->lock);
1673                 }
1674
1675                 /* For sanity tests */
1676                 if (btrfs_is_testing(fs_info))
1677                         return;
1678
1679                 __percpu_counter_add(&fs_info->delalloc_bytes, len,
1680                                      fs_info->delalloc_batch);
1681                 spin_lock(&BTRFS_I(inode)->lock);
1682                 BTRFS_I(inode)->delalloc_bytes += len;
1683                 if (*bits & EXTENT_DEFRAG)
1684                         BTRFS_I(inode)->defrag_bytes += len;
1685                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1686                                          &BTRFS_I(inode)->runtime_flags))
1687                         btrfs_add_delalloc_inodes(root, inode);
1688                 spin_unlock(&BTRFS_I(inode)->lock);
1689         }
1690 }
1691
1692 /*
1693  * extent_io.c clear_bit_hook, see set_bit_hook for why
1694  */
1695 static void btrfs_clear_bit_hook(struct btrfs_inode *inode,
1696                                  struct extent_state *state,
1697                                  unsigned *bits)
1698 {
1699         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1700         u64 len = state->end + 1 - state->start;
1701         u32 num_extents = count_max_extents(len);
1702
1703         spin_lock(&inode->lock);
1704         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1705                 inode->defrag_bytes -= len;
1706         spin_unlock(&inode->lock);
1707
1708         /*
1709          * set_bit and clear bit hooks normally require _irqsave/restore
1710          * but in this case, we are only testing for the DELALLOC
1711          * bit, which is only set or cleared with irqs on
1712          */
1713         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1714                 struct btrfs_root *root = inode->root;
1715                 bool do_list = !btrfs_is_free_space_inode(inode);
1716
1717                 if (*bits & EXTENT_FIRST_DELALLOC) {
1718                         *bits &= ~EXTENT_FIRST_DELALLOC;
1719                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1720                         spin_lock(&inode->lock);
1721                         inode->outstanding_extents -= num_extents;
1722                         spin_unlock(&inode->lock);
1723                 }
1724
1725                 /*
1726                  * We don't reserve metadata space for space cache inodes so we
1727                  * don't need to call dellalloc_release_metadata if there is an
1728                  * error.
1729                  */
1730                 if (*bits & EXTENT_DO_ACCOUNTING &&
1731                     root != fs_info->tree_root)
1732                         btrfs_delalloc_release_metadata(inode, len);
1733
1734                 /* For sanity tests. */
1735                 if (btrfs_is_testing(fs_info))
1736                         return;
1737
1738                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1739                     && do_list && !(state->state & EXTENT_NORESERVE)
1740                     && (*bits & (EXTENT_DO_ACCOUNTING |
1741                     EXTENT_CLEAR_DATA_RESV)))
1742                         btrfs_free_reserved_data_space_noquota(
1743                                         &inode->vfs_inode,
1744                                         state->start, len);
1745
1746                 __percpu_counter_add(&fs_info->delalloc_bytes, -len,
1747                                      fs_info->delalloc_batch);
1748                 spin_lock(&inode->lock);
1749                 inode->delalloc_bytes -= len;
1750                 if (do_list && inode->delalloc_bytes == 0 &&
1751                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1752                                         &inode->runtime_flags))
1753                         btrfs_del_delalloc_inode(root, inode);
1754                 spin_unlock(&inode->lock);
1755         }
1756 }
1757
1758 /*
1759  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1760  * we don't create bios that span stripes or chunks
1761  *
1762  * return 1 if page cannot be merged to bio
1763  * return 0 if page can be merged to bio
1764  * return error otherwise
1765  */
1766 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1767                          size_t size, struct bio *bio,
1768                          unsigned long bio_flags)
1769 {
1770         struct inode *inode = page->mapping->host;
1771         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1772         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1773         u64 length = 0;
1774         u64 map_length;
1775         int ret;
1776
1777         if (bio_flags & EXTENT_BIO_COMPRESSED)
1778                 return 0;
1779
1780         length = bio->bi_iter.bi_size;
1781         map_length = length;
1782         ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1783                               NULL, 0);
1784         if (ret < 0)
1785                 return ret;
1786         if (map_length < length + size)
1787                 return 1;
1788         return 0;
1789 }
1790
1791 /*
1792  * in order to insert checksums into the metadata in large chunks,
1793  * we wait until bio submission time.   All the pages in the bio are
1794  * checksummed and sums are attached onto the ordered extent record.
1795  *
1796  * At IO completion time the cums attached on the ordered extent record
1797  * are inserted into the btree
1798  */
1799 static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
1800                                     int mirror_num, unsigned long bio_flags,
1801                                     u64 bio_offset)
1802 {
1803         int ret = 0;
1804
1805         ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1806         BUG_ON(ret); /* -ENOMEM */
1807         return 0;
1808 }
1809
1810 /*
1811  * in order to insert checksums into the metadata in large chunks,
1812  * we wait until bio submission time.   All the pages in the bio are
1813  * checksummed and sums are attached onto the ordered extent record.
1814  *
1815  * At IO completion time the cums attached on the ordered extent record
1816  * are inserted into the btree
1817  */
1818 static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
1819                           int mirror_num, unsigned long bio_flags,
1820                           u64 bio_offset)
1821 {
1822         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1823         int ret;
1824
1825         ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
1826         if (ret) {
1827                 bio->bi_error = ret;
1828                 bio_endio(bio);
1829         }
1830         return ret;
1831 }
1832
1833 /*
1834  * extent_io.c submission hook. This does the right thing for csum calculation
1835  * on write, or reading the csums from the tree before a read
1836  */
1837 static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
1838                           int mirror_num, unsigned long bio_flags,
1839                           u64 bio_offset)
1840 {
1841         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1842         struct btrfs_root *root = BTRFS_I(inode)->root;
1843         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1844         int ret = 0;
1845         int skip_sum;
1846         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1847
1848         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1849
1850         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
1851                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1852
1853         if (bio_op(bio) != REQ_OP_WRITE) {
1854                 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
1855                 if (ret)
1856                         goto out;
1857
1858                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1859                         ret = btrfs_submit_compressed_read(inode, bio,
1860                                                            mirror_num,
1861                                                            bio_flags);
1862                         goto out;
1863                 } else if (!skip_sum) {
1864                         ret = btrfs_lookup_bio_sums(inode, bio, NULL);
1865                         if (ret)
1866                                 goto out;
1867                 }
1868                 goto mapit;
1869         } else if (async && !skip_sum) {
1870                 /* csum items have already been cloned */
1871                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1872                         goto mapit;
1873                 /* we're doing a write, do the async checksumming */
1874                 ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num,
1875                                           bio_flags, bio_offset,
1876                                           __btrfs_submit_bio_start,
1877                                           __btrfs_submit_bio_done);
1878                 goto out;
1879         } else if (!skip_sum) {
1880                 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1881                 if (ret)
1882                         goto out;
1883         }
1884
1885 mapit:
1886         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1887
1888 out:
1889         if (ret < 0) {
1890                 bio->bi_error = ret;
1891                 bio_endio(bio);
1892         }
1893         return ret;
1894 }
1895
1896 /*
1897  * given a list of ordered sums record them in the inode.  This happens
1898  * at IO completion time based on sums calculated at bio submission time.
1899  */
1900 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1901                              struct inode *inode, struct list_head *list)
1902 {
1903         struct btrfs_ordered_sum *sum;
1904
1905         list_for_each_entry(sum, list, list) {
1906                 trans->adding_csums = 1;
1907                 btrfs_csum_file_blocks(trans,
1908                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1909                 trans->adding_csums = 0;
1910         }
1911         return 0;
1912 }
1913
1914 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1915                               struct extent_state **cached_state, int dedupe)
1916 {
1917         WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1918         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1919                                    cached_state);
1920 }
1921
1922 /* see btrfs_writepage_start_hook for details on why this is required */
1923 struct btrfs_writepage_fixup {
1924         struct page *page;
1925         struct btrfs_work work;
1926 };
1927
1928 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1929 {
1930         struct btrfs_writepage_fixup *fixup;
1931         struct btrfs_ordered_extent *ordered;
1932         struct extent_state *cached_state = NULL;
1933         struct page *page;
1934         struct inode *inode;
1935         u64 page_start;
1936         u64 page_end;
1937         int ret;
1938
1939         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1940         page = fixup->page;
1941 again:
1942         lock_page(page);
1943         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1944                 ClearPageChecked(page);
1945                 goto out_page;
1946         }
1947
1948         inode = page->mapping->host;
1949         page_start = page_offset(page);
1950         page_end = page_offset(page) + PAGE_SIZE - 1;
1951
1952         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
1953                          &cached_state);
1954
1955         /* already ordered? We're done */
1956         if (PagePrivate2(page))
1957                 goto out;
1958
1959         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
1960                                         PAGE_SIZE);
1961         if (ordered) {
1962                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1963                                      page_end, &cached_state, GFP_NOFS);
1964                 unlock_page(page);
1965                 btrfs_start_ordered_extent(inode, ordered, 1);
1966                 btrfs_put_ordered_extent(ordered);
1967                 goto again;
1968         }
1969
1970         ret = btrfs_delalloc_reserve_space(inode, page_start,
1971                                            PAGE_SIZE);
1972         if (ret) {
1973                 mapping_set_error(page->mapping, ret);
1974                 end_extent_writepage(page, ret, page_start, page_end);
1975                 ClearPageChecked(page);
1976                 goto out;
1977          }
1978
1979         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
1980                                   0);
1981         ClearPageChecked(page);
1982         set_page_dirty(page);
1983 out:
1984         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1985                              &cached_state, GFP_NOFS);
1986 out_page:
1987         unlock_page(page);
1988         put_page(page);
1989         kfree(fixup);
1990 }
1991
1992 /*
1993  * There are a few paths in the higher layers of the kernel that directly
1994  * set the page dirty bit without asking the filesystem if it is a
1995  * good idea.  This causes problems because we want to make sure COW
1996  * properly happens and the data=ordered rules are followed.
1997  *
1998  * In our case any range that doesn't have the ORDERED bit set
1999  * hasn't been properly setup for IO.  We kick off an async process
2000  * to fix it up.  The async helper will wait for ordered extents, set
2001  * the delalloc bit and make it safe to write the page.
2002  */
2003 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2004 {
2005         struct inode *inode = page->mapping->host;
2006         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2007         struct btrfs_writepage_fixup *fixup;
2008
2009         /* this page is properly in the ordered list */
2010         if (TestClearPagePrivate2(page))
2011                 return 0;
2012
2013         if (PageChecked(page))
2014                 return -EAGAIN;
2015
2016         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2017         if (!fixup)
2018                 return -EAGAIN;
2019
2020         SetPageChecked(page);
2021         get_page(page);
2022         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2023                         btrfs_writepage_fixup_worker, NULL, NULL);
2024         fixup->page = page;
2025         btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2026         return -EBUSY;
2027 }
2028
2029 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2030                                        struct inode *inode, u64 file_pos,
2031                                        u64 disk_bytenr, u64 disk_num_bytes,
2032                                        u64 num_bytes, u64 ram_bytes,
2033                                        u8 compression, u8 encryption,
2034                                        u16 other_encoding, int extent_type)
2035 {
2036         struct btrfs_root *root = BTRFS_I(inode)->root;
2037         struct btrfs_file_extent_item *fi;
2038         struct btrfs_path *path;
2039         struct extent_buffer *leaf;
2040         struct btrfs_key ins;
2041         int extent_inserted = 0;
2042         int ret;
2043
2044         path = btrfs_alloc_path();
2045         if (!path)
2046                 return -ENOMEM;
2047
2048         /*
2049          * we may be replacing one extent in the tree with another.
2050          * The new extent is pinned in the extent map, and we don't want
2051          * to drop it from the cache until it is completely in the btree.
2052          *
2053          * So, tell btrfs_drop_extents to leave this extent in the cache.
2054          * the caller is expected to unpin it and allow it to be merged
2055          * with the others.
2056          */
2057         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2058                                    file_pos + num_bytes, NULL, 0,
2059                                    1, sizeof(*fi), &extent_inserted);
2060         if (ret)
2061                 goto out;
2062
2063         if (!extent_inserted) {
2064                 ins.objectid = btrfs_ino(BTRFS_I(inode));
2065                 ins.offset = file_pos;
2066                 ins.type = BTRFS_EXTENT_DATA_KEY;
2067
2068                 path->leave_spinning = 1;
2069                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2070                                               sizeof(*fi));
2071                 if (ret)
2072                         goto out;
2073         }
2074         leaf = path->nodes[0];
2075         fi = btrfs_item_ptr(leaf, path->slots[0],
2076                             struct btrfs_file_extent_item);
2077         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2078         btrfs_set_file_extent_type(leaf, fi, extent_type);
2079         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2080         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2081         btrfs_set_file_extent_offset(leaf, fi, 0);
2082         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2083         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2084         btrfs_set_file_extent_compression(leaf, fi, compression);
2085         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2086         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2087
2088         btrfs_mark_buffer_dirty(leaf);
2089         btrfs_release_path(path);
2090
2091         inode_add_bytes(inode, num_bytes);
2092
2093         ins.objectid = disk_bytenr;
2094         ins.offset = disk_num_bytes;
2095         ins.type = BTRFS_EXTENT_ITEM_KEY;
2096         ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
2097                         btrfs_ino(BTRFS_I(inode)), file_pos, ram_bytes, &ins);
2098         /*
2099          * Release the reserved range from inode dirty range map, as it is
2100          * already moved into delayed_ref_head
2101          */
2102         btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2103 out:
2104         btrfs_free_path(path);
2105
2106         return ret;
2107 }
2108
2109 /* snapshot-aware defrag */
2110 struct sa_defrag_extent_backref {
2111         struct rb_node node;
2112         struct old_sa_defrag_extent *old;
2113         u64 root_id;
2114         u64 inum;
2115         u64 file_pos;
2116         u64 extent_offset;
2117         u64 num_bytes;
2118         u64 generation;
2119 };
2120
2121 struct old_sa_defrag_extent {
2122         struct list_head list;
2123         struct new_sa_defrag_extent *new;
2124
2125         u64 extent_offset;
2126         u64 bytenr;
2127         u64 offset;
2128         u64 len;
2129         int count;
2130 };
2131
2132 struct new_sa_defrag_extent {
2133         struct rb_root root;
2134         struct list_head head;
2135         struct btrfs_path *path;
2136         struct inode *inode;
2137         u64 file_pos;
2138         u64 len;
2139         u64 bytenr;
2140         u64 disk_len;
2141         u8 compress_type;
2142 };
2143
2144 static int backref_comp(struct sa_defrag_extent_backref *b1,
2145                         struct sa_defrag_extent_backref *b2)
2146 {
2147         if (b1->root_id < b2->root_id)
2148                 return -1;
2149         else if (b1->root_id > b2->root_id)
2150                 return 1;
2151
2152         if (b1->inum < b2->inum)
2153                 return -1;
2154         else if (b1->inum > b2->inum)
2155                 return 1;
2156
2157         if (b1->file_pos < b2->file_pos)
2158                 return -1;
2159         else if (b1->file_pos > b2->file_pos)
2160                 return 1;
2161
2162         /*
2163          * [------------------------------] ===> (a range of space)
2164          *     |<--->|   |<---->| =============> (fs/file tree A)
2165          * |<---------------------------->| ===> (fs/file tree B)
2166          *
2167          * A range of space can refer to two file extents in one tree while
2168          * refer to only one file extent in another tree.
2169          *
2170          * So we may process a disk offset more than one time(two extents in A)
2171          * and locate at the same extent(one extent in B), then insert two same
2172          * backrefs(both refer to the extent in B).
2173          */
2174         return 0;
2175 }
2176
2177 static void backref_insert(struct rb_root *root,
2178                            struct sa_defrag_extent_backref *backref)
2179 {
2180         struct rb_node **p = &root->rb_node;
2181         struct rb_node *parent = NULL;
2182         struct sa_defrag_extent_backref *entry;
2183         int ret;
2184
2185         while (*p) {
2186                 parent = *p;
2187                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2188
2189                 ret = backref_comp(backref, entry);
2190                 if (ret < 0)
2191                         p = &(*p)->rb_left;
2192                 else
2193                         p = &(*p)->rb_right;
2194         }
2195
2196         rb_link_node(&backref->node, parent, p);
2197         rb_insert_color(&backref->node, root);
2198 }
2199
2200 /*
2201  * Note the backref might has changed, and in this case we just return 0.
2202  */
2203 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2204                                        void *ctx)
2205 {
2206         struct btrfs_file_extent_item *extent;
2207         struct old_sa_defrag_extent *old = ctx;
2208         struct new_sa_defrag_extent *new = old->new;
2209         struct btrfs_path *path = new->path;
2210         struct btrfs_key key;
2211         struct btrfs_root *root;
2212         struct sa_defrag_extent_backref *backref;
2213         struct extent_buffer *leaf;
2214         struct inode *inode = new->inode;
2215         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2216         int slot;
2217         int ret;
2218         u64 extent_offset;
2219         u64 num_bytes;
2220
2221         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2222             inum == btrfs_ino(BTRFS_I(inode)))
2223                 return 0;
2224
2225         key.objectid = root_id;
2226         key.type = BTRFS_ROOT_ITEM_KEY;
2227         key.offset = (u64)-1;
2228
2229         root = btrfs_read_fs_root_no_name(fs_info, &key);
2230         if (IS_ERR(root)) {
2231                 if (PTR_ERR(root) == -ENOENT)
2232                         return 0;
2233                 WARN_ON(1);
2234                 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2235                          inum, offset, root_id);
2236                 return PTR_ERR(root);
2237         }
2238
2239         key.objectid = inum;
2240         key.type = BTRFS_EXTENT_DATA_KEY;
2241         if (offset > (u64)-1 << 32)
2242                 key.offset = 0;
2243         else
2244                 key.offset = offset;
2245
2246         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2247         if (WARN_ON(ret < 0))
2248                 return ret;
2249         ret = 0;
2250
2251         while (1) {
2252                 cond_resched();
2253
2254                 leaf = path->nodes[0];
2255                 slot = path->slots[0];
2256
2257                 if (slot >= btrfs_header_nritems(leaf)) {
2258                         ret = btrfs_next_leaf(root, path);
2259                         if (ret < 0) {
2260                                 goto out;
2261                         } else if (ret > 0) {
2262                                 ret = 0;
2263                                 goto out;
2264                         }
2265                         continue;
2266                 }
2267
2268                 path->slots[0]++;
2269
2270                 btrfs_item_key_to_cpu(leaf, &key, slot);
2271
2272                 if (key.objectid > inum)
2273                         goto out;
2274
2275                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2276                         continue;
2277
2278                 extent = btrfs_item_ptr(leaf, slot,
2279                                         struct btrfs_file_extent_item);
2280
2281                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2282                         continue;
2283
2284                 /*
2285                  * 'offset' refers to the exact key.offset,
2286                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2287                  * (key.offset - extent_offset).
2288                  */
2289                 if (key.offset != offset)
2290                         continue;
2291
2292                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2293                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2294
2295                 if (extent_offset >= old->extent_offset + old->offset +
2296                     old->len || extent_offset + num_bytes <=
2297                     old->extent_offset + old->offset)
2298                         continue;
2299                 break;
2300         }
2301
2302         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2303         if (!backref) {
2304                 ret = -ENOENT;
2305                 goto out;
2306         }
2307
2308         backref->root_id = root_id;
2309         backref->inum = inum;
2310         backref->file_pos = offset;
2311         backref->num_bytes = num_bytes;
2312         backref->extent_offset = extent_offset;
2313         backref->generation = btrfs_file_extent_generation(leaf, extent);
2314         backref->old = old;
2315         backref_insert(&new->root, backref);
2316         old->count++;
2317 out:
2318         btrfs_release_path(path);
2319         WARN_ON(ret);
2320         return ret;
2321 }
2322
2323 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2324                                    struct new_sa_defrag_extent *new)
2325 {
2326         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2327         struct old_sa_defrag_extent *old, *tmp;
2328         int ret;
2329
2330         new->path = path;
2331
2332         list_for_each_entry_safe(old, tmp, &new->head, list) {
2333                 ret = iterate_inodes_from_logical(old->bytenr +
2334                                                   old->extent_offset, fs_info,
2335                                                   path, record_one_backref,
2336                                                   old);
2337                 if (ret < 0 && ret != -ENOENT)
2338                         return false;
2339
2340                 /* no backref to be processed for this extent */
2341                 if (!old->count) {
2342                         list_del(&old->list);
2343                         kfree(old);
2344                 }
2345         }
2346
2347         if (list_empty(&new->head))
2348                 return false;
2349
2350         return true;
2351 }
2352
2353 static int relink_is_mergable(struct extent_buffer *leaf,
2354                               struct btrfs_file_extent_item *fi,
2355                               struct new_sa_defrag_extent *new)
2356 {
2357         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2358                 return 0;
2359
2360         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2361                 return 0;
2362
2363         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2364                 return 0;
2365
2366         if (btrfs_file_extent_encryption(leaf, fi) ||
2367             btrfs_file_extent_other_encoding(leaf, fi))
2368                 return 0;
2369
2370         return 1;
2371 }
2372
2373 /*
2374  * Note the backref might has changed, and in this case we just return 0.
2375  */
2376 static noinline int relink_extent_backref(struct btrfs_path *path,
2377                                  struct sa_defrag_extent_backref *prev,
2378                                  struct sa_defrag_extent_backref *backref)
2379 {
2380         struct btrfs_file_extent_item *extent;
2381         struct btrfs_file_extent_item *item;
2382         struct btrfs_ordered_extent *ordered;
2383         struct btrfs_trans_handle *trans;
2384         struct btrfs_root *root;
2385         struct btrfs_key key;
2386         struct extent_buffer *leaf;
2387         struct old_sa_defrag_extent *old = backref->old;
2388         struct new_sa_defrag_extent *new = old->new;
2389         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2390         struct inode *inode;
2391         struct extent_state *cached = NULL;
2392         int ret = 0;
2393         u64 start;
2394         u64 len;
2395         u64 lock_start;
2396         u64 lock_end;
2397         bool merge = false;
2398         int index;
2399
2400         if (prev && prev->root_id == backref->root_id &&
2401             prev->inum == backref->inum &&
2402             prev->file_pos + prev->num_bytes == backref->file_pos)
2403                 merge = true;
2404
2405         /* step 1: get root */
2406         key.objectid = backref->root_id;
2407         key.type = BTRFS_ROOT_ITEM_KEY;
2408         key.offset = (u64)-1;
2409
2410         index = srcu_read_lock(&fs_info->subvol_srcu);
2411
2412         root = btrfs_read_fs_root_no_name(fs_info, &key);
2413         if (IS_ERR(root)) {
2414                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2415                 if (PTR_ERR(root) == -ENOENT)
2416                         return 0;
2417                 return PTR_ERR(root);
2418         }
2419
2420         if (btrfs_root_readonly(root)) {
2421                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2422                 return 0;
2423         }
2424
2425         /* step 2: get inode */
2426         key.objectid = backref->inum;
2427         key.type = BTRFS_INODE_ITEM_KEY;
2428         key.offset = 0;
2429
2430         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2431         if (IS_ERR(inode)) {
2432                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2433                 return 0;
2434         }
2435
2436         srcu_read_unlock(&fs_info->subvol_srcu, index);
2437
2438         /* step 3: relink backref */
2439         lock_start = backref->file_pos;
2440         lock_end = backref->file_pos + backref->num_bytes - 1;
2441         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2442                          &cached);
2443
2444         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2445         if (ordered) {
2446                 btrfs_put_ordered_extent(ordered);
2447                 goto out_unlock;
2448         }
2449
2450         trans = btrfs_join_transaction(root);
2451         if (IS_ERR(trans)) {
2452                 ret = PTR_ERR(trans);
2453                 goto out_unlock;
2454         }
2455
2456         key.objectid = backref->inum;
2457         key.type = BTRFS_EXTENT_DATA_KEY;
2458         key.offset = backref->file_pos;
2459
2460         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2461         if (ret < 0) {
2462                 goto out_free_path;
2463         } else if (ret > 0) {
2464                 ret = 0;
2465                 goto out_free_path;
2466         }
2467
2468         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2469                                 struct btrfs_file_extent_item);
2470
2471         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2472             backref->generation)
2473                 goto out_free_path;
2474
2475         btrfs_release_path(path);
2476
2477         start = backref->file_pos;
2478         if (backref->extent_offset < old->extent_offset + old->offset)
2479                 start += old->extent_offset + old->offset -
2480                          backref->extent_offset;
2481
2482         len = min(backref->extent_offset + backref->num_bytes,
2483                   old->extent_offset + old->offset + old->len);
2484         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2485
2486         ret = btrfs_drop_extents(trans, root, inode, start,
2487                                  start + len, 1);
2488         if (ret)
2489                 goto out_free_path;
2490 again:
2491         key.objectid = btrfs_ino(BTRFS_I(inode));
2492         key.type = BTRFS_EXTENT_DATA_KEY;
2493         key.offset = start;
2494
2495         path->leave_spinning = 1;
2496         if (merge) {
2497                 struct btrfs_file_extent_item *fi;
2498                 u64 extent_len;
2499                 struct btrfs_key found_key;
2500
2501                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2502                 if (ret < 0)
2503                         goto out_free_path;
2504
2505                 path->slots[0]--;
2506                 leaf = path->nodes[0];
2507                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2508
2509                 fi = btrfs_item_ptr(leaf, path->slots[0],
2510                                     struct btrfs_file_extent_item);
2511                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2512
2513                 if (extent_len + found_key.offset == start &&
2514                     relink_is_mergable(leaf, fi, new)) {
2515                         btrfs_set_file_extent_num_bytes(leaf, fi,
2516                                                         extent_len + len);
2517                         btrfs_mark_buffer_dirty(leaf);
2518                         inode_add_bytes(inode, len);
2519
2520                         ret = 1;
2521                         goto out_free_path;
2522                 } else {
2523                         merge = false;
2524                         btrfs_release_path(path);
2525                         goto again;
2526                 }
2527         }
2528
2529         ret = btrfs_insert_empty_item(trans, root, path, &key,
2530                                         sizeof(*extent));
2531         if (ret) {
2532                 btrfs_abort_transaction(trans, ret);
2533                 goto out_free_path;
2534         }
2535
2536         leaf = path->nodes[0];
2537         item = btrfs_item_ptr(leaf, path->slots[0],
2538                                 struct btrfs_file_extent_item);
2539         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2540         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2541         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2542         btrfs_set_file_extent_num_bytes(leaf, item, len);
2543         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2544         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2545         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2546         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2547         btrfs_set_file_extent_encryption(leaf, item, 0);
2548         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2549
2550         btrfs_mark_buffer_dirty(leaf);
2551         inode_add_bytes(inode, len);
2552         btrfs_release_path(path);
2553
2554         ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
2555                         new->disk_len, 0,
2556                         backref->root_id, backref->inum,
2557                         new->file_pos); /* start - extent_offset */
2558         if (ret) {
2559                 btrfs_abort_transaction(trans, ret);
2560                 goto out_free_path;
2561         }
2562
2563         ret = 1;
2564 out_free_path:
2565         btrfs_release_path(path);
2566         path->leave_spinning = 0;
2567         btrfs_end_transaction(trans);
2568 out_unlock:
2569         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2570                              &cached, GFP_NOFS);
2571         iput(inode);
2572         return ret;
2573 }
2574
2575 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2576 {
2577         struct old_sa_defrag_extent *old, *tmp;
2578
2579         if (!new)
2580                 return;
2581
2582         list_for_each_entry_safe(old, tmp, &new->head, list) {
2583                 kfree(old);
2584         }
2585         kfree(new);
2586 }
2587
2588 static void relink_file_extents(struct new_sa_defrag_extent *new)
2589 {
2590         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2591         struct btrfs_path *path;
2592         struct sa_defrag_extent_backref *backref;
2593         struct sa_defrag_extent_backref *prev = NULL;
2594         struct inode *inode;
2595         struct btrfs_root *root;
2596         struct rb_node *node;
2597         int ret;
2598
2599         inode = new->inode;
2600         root = BTRFS_I(inode)->root;
2601
2602         path = btrfs_alloc_path();
2603         if (!path)
2604                 return;
2605
2606         if (!record_extent_backrefs(path, new)) {
2607                 btrfs_free_path(path);
2608                 goto out;
2609         }
2610         btrfs_release_path(path);
2611
2612         while (1) {
2613                 node = rb_first(&new->root);
2614                 if (!node)
2615                         break;
2616                 rb_erase(node, &new->root);
2617
2618                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2619
2620                 ret = relink_extent_backref(path, prev, backref);
2621                 WARN_ON(ret < 0);
2622
2623                 kfree(prev);
2624
2625                 if (ret == 1)
2626                         prev = backref;
2627                 else
2628                         prev = NULL;
2629                 cond_resched();
2630         }
2631         kfree(prev);
2632
2633         btrfs_free_path(path);
2634 out:
2635         free_sa_defrag_extent(new);
2636
2637         atomic_dec(&fs_info->defrag_running);
2638         wake_up(&fs_info->transaction_wait);
2639 }
2640
2641 static struct new_sa_defrag_extent *
2642 record_old_file_extents(struct inode *inode,
2643                         struct btrfs_ordered_extent *ordered)
2644 {
2645         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2646         struct btrfs_root *root = BTRFS_I(inode)->root;
2647         struct btrfs_path *path;
2648         struct btrfs_key key;
2649         struct old_sa_defrag_extent *old;
2650         struct new_sa_defrag_extent *new;
2651         int ret;
2652
2653         new = kmalloc(sizeof(*new), GFP_NOFS);
2654         if (!new)
2655                 return NULL;
2656
2657         new->inode = inode;
2658         new->file_pos = ordered->file_offset;
2659         new->len = ordered->len;
2660         new->bytenr = ordered->start;
2661         new->disk_len = ordered->disk_len;
2662         new->compress_type = ordered->compress_type;
2663         new->root = RB_ROOT;
2664         INIT_LIST_HEAD(&new->head);
2665
2666         path = btrfs_alloc_path();
2667         if (!path)
2668                 goto out_kfree;
2669
2670         key.objectid = btrfs_ino(BTRFS_I(inode));
2671         key.type = BTRFS_EXTENT_DATA_KEY;
2672         key.offset = new->file_pos;
2673
2674         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2675         if (ret < 0)
2676                 goto out_free_path;
2677         if (ret > 0 && path->slots[0] > 0)
2678                 path->slots[0]--;
2679
2680         /* find out all the old extents for the file range */
2681         while (1) {
2682                 struct btrfs_file_extent_item *extent;
2683                 struct extent_buffer *l;
2684                 int slot;
2685                 u64 num_bytes;
2686                 u64 offset;
2687                 u64 end;
2688                 u64 disk_bytenr;
2689                 u64 extent_offset;
2690
2691                 l = path->nodes[0];
2692                 slot = path->slots[0];
2693
2694                 if (slot >= btrfs_header_nritems(l)) {
2695                         ret = btrfs_next_leaf(root, path);
2696                         if (ret < 0)
2697                                 goto out_free_path;
2698                         else if (ret > 0)
2699                                 break;
2700                         continue;
2701                 }
2702
2703                 btrfs_item_key_to_cpu(l, &key, slot);
2704
2705                 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2706                         break;
2707                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2708                         break;
2709                 if (key.offset >= new->file_pos + new->len)
2710                         break;
2711
2712                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2713
2714                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2715                 if (key.offset + num_bytes < new->file_pos)
2716                         goto next;
2717
2718                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2719                 if (!disk_bytenr)
2720                         goto next;
2721
2722                 extent_offset = btrfs_file_extent_offset(l, extent);
2723
2724                 old = kmalloc(sizeof(*old), GFP_NOFS);
2725                 if (!old)
2726                         goto out_free_path;
2727
2728                 offset = max(new->file_pos, key.offset);
2729                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2730
2731                 old->bytenr = disk_bytenr;
2732                 old->extent_offset = extent_offset;
2733                 old->offset = offset - key.offset;
2734                 old->len = end - offset;
2735                 old->new = new;
2736                 old->count = 0;
2737                 list_add_tail(&old->list, &new->head);
2738 next:
2739                 path->slots[0]++;
2740                 cond_resched();
2741         }
2742
2743         btrfs_free_path(path);
2744         atomic_inc(&fs_info->defrag_running);
2745
2746         return new;
2747
2748 out_free_path:
2749         btrfs_free_path(path);
2750 out_kfree:
2751         free_sa_defrag_extent(new);
2752         return NULL;
2753 }
2754
2755 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2756                                          u64 start, u64 len)
2757 {
2758         struct btrfs_block_group_cache *cache;
2759
2760         cache = btrfs_lookup_block_group(fs_info, start);
2761         ASSERT(cache);
2762
2763         spin_lock(&cache->lock);
2764         cache->delalloc_bytes -= len;
2765         spin_unlock(&cache->lock);
2766
2767         btrfs_put_block_group(cache);
2768 }
2769
2770 /* as ordered data IO finishes, this gets called so we can finish
2771  * an ordered extent if the range of bytes in the file it covers are
2772  * fully written.
2773  */
2774 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2775 {
2776         struct inode *inode = ordered_extent->inode;
2777         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2778         struct btrfs_root *root = BTRFS_I(inode)->root;
2779         struct btrfs_trans_handle *trans = NULL;
2780         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2781         struct extent_state *cached_state = NULL;
2782         struct new_sa_defrag_extent *new = NULL;
2783         int compress_type = 0;
2784         int ret = 0;
2785         u64 logical_len = ordered_extent->len;
2786         bool nolock;
2787         bool truncated = false;
2788
2789         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
2790
2791         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2792                 ret = -EIO;
2793                 goto out;
2794         }
2795
2796         btrfs_free_io_failure_record(BTRFS_I(inode),
2797                         ordered_extent->file_offset,
2798                         ordered_extent->file_offset +
2799                         ordered_extent->len - 1);
2800
2801         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2802                 truncated = true;
2803                 logical_len = ordered_extent->truncated_len;
2804                 /* Truncated the entire extent, don't bother adding */
2805                 if (!logical_len)
2806                         goto out;
2807         }
2808
2809         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2810                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2811
2812                 /*
2813                  * For mwrite(mmap + memset to write) case, we still reserve
2814                  * space for NOCOW range.
2815                  * As NOCOW won't cause a new delayed ref, just free the space
2816                  */
2817                 btrfs_qgroup_free_data(inode, ordered_extent->file_offset,
2818                                        ordered_extent->len);
2819                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2820                 if (nolock)
2821                         trans = btrfs_join_transaction_nolock(root);
2822                 else
2823                         trans = btrfs_join_transaction(root);
2824                 if (IS_ERR(trans)) {
2825                         ret = PTR_ERR(trans);
2826                         trans = NULL;
2827                         goto out;
2828                 }
2829                 trans->block_rsv = &fs_info->delalloc_block_rsv;
2830                 ret = btrfs_update_inode_fallback(trans, root, inode);
2831                 if (ret) /* -ENOMEM or corruption */
2832                         btrfs_abort_transaction(trans, ret);
2833                 goto out;
2834         }
2835
2836         lock_extent_bits(io_tree, ordered_extent->file_offset,
2837                          ordered_extent->file_offset + ordered_extent->len - 1,
2838                          &cached_state);
2839
2840         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2841                         ordered_extent->file_offset + ordered_extent->len - 1,
2842                         EXTENT_DEFRAG, 1, cached_state);
2843         if (ret) {
2844                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2845                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2846                         /* the inode is shared */
2847                         new = record_old_file_extents(inode, ordered_extent);
2848
2849                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2850                         ordered_extent->file_offset + ordered_extent->len - 1,
2851                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2852         }
2853
2854         if (nolock)
2855                 trans = btrfs_join_transaction_nolock(root);
2856         else
2857                 trans = btrfs_join_transaction(root);
2858         if (IS_ERR(trans)) {
2859                 ret = PTR_ERR(trans);
2860                 trans = NULL;
2861                 goto out_unlock;
2862         }
2863
2864         trans->block_rsv = &fs_info->delalloc_block_rsv;
2865
2866         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2867                 compress_type = ordered_extent->compress_type;
2868         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2869                 BUG_ON(compress_type);
2870                 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
2871                                                 ordered_extent->file_offset,
2872                                                 ordered_extent->file_offset +
2873                                                 logical_len);
2874         } else {
2875                 BUG_ON(root == fs_info->tree_root);
2876                 ret = insert_reserved_file_extent(trans, inode,
2877                                                 ordered_extent->file_offset,
2878                                                 ordered_extent->start,
2879                                                 ordered_extent->disk_len,
2880                                                 logical_len, logical_len,
2881                                                 compress_type, 0, 0,
2882                                                 BTRFS_FILE_EXTENT_REG);
2883                 if (!ret)
2884                         btrfs_release_delalloc_bytes(fs_info,
2885                                                      ordered_extent->start,
2886                                                      ordered_extent->disk_len);
2887         }
2888         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2889                            ordered_extent->file_offset, ordered_extent->len,
2890                            trans->transid);
2891         if (ret < 0) {
2892                 btrfs_abort_transaction(trans, ret);
2893                 goto out_unlock;
2894         }
2895
2896         add_pending_csums(trans, inode, &ordered_extent->list);
2897
2898         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2899         ret = btrfs_update_inode_fallback(trans, root, inode);
2900         if (ret) { /* -ENOMEM or corruption */
2901                 btrfs_abort_transaction(trans, ret);
2902                 goto out_unlock;
2903         }
2904         ret = 0;
2905 out_unlock:
2906         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2907                              ordered_extent->file_offset +
2908                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2909 out:
2910         if (root != fs_info->tree_root)
2911                 btrfs_delalloc_release_metadata(BTRFS_I(inode),
2912                                 ordered_extent->len);
2913         if (trans)
2914                 btrfs_end_transaction(trans);
2915
2916         if (ret || truncated) {
2917                 u64 start, end;
2918
2919                 if (truncated)
2920                         start = ordered_extent->file_offset + logical_len;
2921                 else
2922                         start = ordered_extent->file_offset;
2923                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2924                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2925
2926                 /* Drop the cache for the part of the extent we didn't write. */
2927                 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
2928
2929                 /*
2930                  * If the ordered extent had an IOERR or something else went
2931                  * wrong we need to return the space for this ordered extent
2932                  * back to the allocator.  We only free the extent in the
2933                  * truncated case if we didn't write out the extent at all.
2934                  */
2935                 if ((ret || !logical_len) &&
2936                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2937                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2938                         btrfs_free_reserved_extent(fs_info,
2939                                                    ordered_extent->start,
2940                                                    ordered_extent->disk_len, 1);
2941         }
2942
2943
2944         /*
2945          * This needs to be done to make sure anybody waiting knows we are done
2946          * updating everything for this ordered extent.
2947          */
2948         btrfs_remove_ordered_extent(inode, ordered_extent);
2949
2950         /* for snapshot-aware defrag */
2951         if (new) {
2952                 if (ret) {
2953                         free_sa_defrag_extent(new);
2954                         atomic_dec(&fs_info->defrag_running);
2955                 } else {
2956                         relink_file_extents(new);
2957                 }
2958         }
2959
2960         /* once for us */
2961         btrfs_put_ordered_extent(ordered_extent);
2962         /* once for the tree */
2963         btrfs_put_ordered_extent(ordered_extent);
2964
2965         return ret;
2966 }
2967
2968 static void finish_ordered_fn(struct btrfs_work *work)
2969 {
2970         struct btrfs_ordered_extent *ordered_extent;
2971         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2972         btrfs_finish_ordered_io(ordered_extent);
2973 }
2974
2975 static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2976                                 struct extent_state *state, int uptodate)
2977 {
2978         struct inode *inode = page->mapping->host;
2979         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2980         struct btrfs_ordered_extent *ordered_extent = NULL;
2981         struct btrfs_workqueue *wq;
2982         btrfs_work_func_t func;
2983
2984         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2985
2986         ClearPagePrivate2(page);
2987         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2988                                             end - start + 1, uptodate))
2989                 return;
2990
2991         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
2992                 wq = fs_info->endio_freespace_worker;
2993                 func = btrfs_freespace_write_helper;
2994         } else {
2995                 wq = fs_info->endio_write_workers;
2996                 func = btrfs_endio_write_helper;
2997         }
2998
2999         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3000                         NULL);
3001         btrfs_queue_work(wq, &ordered_extent->work);
3002 }
3003
3004 static int __readpage_endio_check(struct inode *inode,
3005                                   struct btrfs_io_bio *io_bio,
3006                                   int icsum, struct page *page,
3007                                   int pgoff, u64 start, size_t len)
3008 {
3009         char *kaddr;
3010         u32 csum_expected;
3011         u32 csum = ~(u32)0;
3012
3013         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3014
3015         kaddr = kmap_atomic(page);
3016         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3017         btrfs_csum_final(csum, (u8 *)&csum);
3018         if (csum != csum_expected)
3019                 goto zeroit;
3020
3021         kunmap_atomic(kaddr);
3022         return 0;
3023 zeroit:
3024         btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3025                                     io_bio->mirror_num);
3026         memset(kaddr + pgoff, 1, len);
3027         flush_dcache_page(page);
3028         kunmap_atomic(kaddr);
3029         if (csum_expected == 0)
3030                 return 0;
3031         return -EIO;
3032 }
3033
3034 /*
3035  * when reads are done, we need to check csums to verify the data is correct
3036  * if there's a match, we allow the bio to finish.  If not, the code in
3037  * extent_io.c will try to find good copies for us.
3038  */
3039 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3040                                       u64 phy_offset, struct page *page,
3041                                       u64 start, u64 end, int mirror)
3042 {
3043         size_t offset = start - page_offset(page);
3044         struct inode *inode = page->mapping->host;
3045         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3046         struct btrfs_root *root = BTRFS_I(inode)->root;
3047
3048         if (PageChecked(page)) {
3049                 ClearPageChecked(page);
3050                 return 0;
3051         }
3052
3053         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3054                 return 0;
3055
3056         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3057             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3058                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3059                 return 0;
3060         }
3061
3062         phy_offset >>= inode->i_sb->s_blocksize_bits;
3063         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3064                                       start, (size_t)(end - start + 1));
3065 }
3066
3067 void btrfs_add_delayed_iput(struct inode *inode)
3068 {
3069         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3070         struct btrfs_inode *binode = BTRFS_I(inode);
3071
3072         if (atomic_add_unless(&inode->i_count, -1, 1))
3073                 return;
3074
3075         spin_lock(&fs_info->delayed_iput_lock);
3076         if (binode->delayed_iput_count == 0) {
3077                 ASSERT(list_empty(&binode->delayed_iput));
3078                 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3079         } else {
3080                 binode->delayed_iput_count++;
3081         }
3082         spin_unlock(&fs_info->delayed_iput_lock);
3083 }
3084
3085 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3086 {
3087
3088         spin_lock(&fs_info->delayed_iput_lock);
3089         while (!list_empty(&fs_info->delayed_iputs)) {
3090                 struct btrfs_inode *inode;
3091
3092                 inode = list_first_entry(&fs_info->delayed_iputs,
3093                                 struct btrfs_inode, delayed_iput);
3094                 if (inode->delayed_iput_count) {
3095                         inode->delayed_iput_count--;
3096                         list_move_tail(&inode->delayed_iput,
3097                                         &fs_info->delayed_iputs);
3098                 } else {
3099                         list_del_init(&inode->delayed_iput);
3100                 }
3101                 spin_unlock(&fs_info->delayed_iput_lock);
3102                 iput(&inode->vfs_inode);
3103                 spin_lock(&fs_info->delayed_iput_lock);
3104         }
3105         spin_unlock(&fs_info->delayed_iput_lock);
3106 }
3107
3108 /*
3109  * This is called in transaction commit time. If there are no orphan
3110  * files in the subvolume, it removes orphan item and frees block_rsv
3111  * structure.
3112  */
3113 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3114                               struct btrfs_root *root)
3115 {
3116         struct btrfs_fs_info *fs_info = root->fs_info;
3117         struct btrfs_block_rsv *block_rsv;
3118         int ret;
3119
3120         if (atomic_read(&root->orphan_inodes) ||
3121             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3122                 return;
3123
3124         spin_lock(&root->orphan_lock);
3125         if (atomic_read(&root->orphan_inodes)) {
3126                 spin_unlock(&root->orphan_lock);
3127                 return;
3128         }
3129
3130         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3131                 spin_unlock(&root->orphan_lock);
3132                 return;
3133         }
3134
3135         block_rsv = root->orphan_block_rsv;
3136         root->orphan_block_rsv = NULL;
3137         spin_unlock(&root->orphan_lock);
3138
3139         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3140             btrfs_root_refs(&root->root_item) > 0) {
3141                 ret = btrfs_del_orphan_item(trans, fs_info->tree_root,
3142                                             root->root_key.objectid);
3143                 if (ret)
3144                         btrfs_abort_transaction(trans, ret);
3145                 else
3146                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3147                                   &root->state);
3148         }
3149
3150         if (block_rsv) {
3151                 WARN_ON(block_rsv->size > 0);
3152                 btrfs_free_block_rsv(fs_info, block_rsv);
3153         }
3154 }
3155
3156 /*
3157  * This creates an orphan entry for the given inode in case something goes
3158  * wrong in the middle of an unlink/truncate.
3159  *
3160  * NOTE: caller of this function should reserve 5 units of metadata for
3161  *       this function.
3162  */
3163 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3164                 struct btrfs_inode *inode)
3165 {
3166         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
3167         struct btrfs_root *root = inode->root;
3168         struct btrfs_block_rsv *block_rsv = NULL;
3169         int reserve = 0;
3170         int insert = 0;
3171         int ret;
3172
3173         if (!root->orphan_block_rsv) {
3174                 block_rsv = btrfs_alloc_block_rsv(fs_info,
3175                                                   BTRFS_BLOCK_RSV_TEMP);
3176                 if (!block_rsv)
3177                         return -ENOMEM;
3178         }
3179
3180         spin_lock(&root->orphan_lock);
3181         if (!root->orphan_block_rsv) {
3182                 root->orphan_block_rsv = block_rsv;
3183         } else if (block_rsv) {
3184                 btrfs_free_block_rsv(fs_info, block_rsv);
3185                 block_rsv = NULL;
3186         }
3187
3188         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3189                               &inode->runtime_flags)) {
3190 #if 0
3191                 /*
3192                  * For proper ENOSPC handling, we should do orphan
3193                  * cleanup when mounting. But this introduces backward
3194                  * compatibility issue.
3195                  */
3196                 if (!xchg(&root->orphan_item_inserted, 1))
3197                         insert = 2;
3198                 else
3199                         insert = 1;
3200 #endif
3201                 insert = 1;
3202                 atomic_inc(&root->orphan_inodes);
3203         }
3204
3205         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3206                               &inode->runtime_flags))
3207                 reserve = 1;
3208         spin_unlock(&root->orphan_lock);
3209
3210         /* grab metadata reservation from transaction handle */
3211         if (reserve) {
3212                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3213                 ASSERT(!ret);
3214                 if (ret) {
3215                         atomic_dec(&root->orphan_inodes);
3216                         clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3217                                   &inode->runtime_flags);
3218                         if (insert)
3219                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3220                                           &inode->runtime_flags);
3221                         return ret;
3222                 }
3223         }
3224
3225         /* insert an orphan item to track this unlinked/truncated file */
3226         if (insert >= 1) {
3227                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3228                 if (ret) {
3229                         atomic_dec(&root->orphan_inodes);
3230                         if (reserve) {
3231                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3232                                           &inode->runtime_flags);
3233                                 btrfs_orphan_release_metadata(inode);
3234                         }
3235                         if (ret != -EEXIST) {
3236                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3237                                           &inode->runtime_flags);
3238                                 btrfs_abort_transaction(trans, ret);
3239                                 return ret;
3240                         }
3241                 }
3242                 ret = 0;
3243         }
3244
3245         /* insert an orphan item to track subvolume contains orphan files */
3246         if (insert >= 2) {
3247                 ret = btrfs_insert_orphan_item(trans, fs_info->tree_root,
3248                                                root->root_key.objectid);
3249                 if (ret && ret != -EEXIST) {
3250                         btrfs_abort_transaction(trans, ret);
3251                         return ret;
3252                 }
3253         }
3254         return 0;
3255 }
3256
3257 /*
3258  * We have done the truncate/delete so we can go ahead and remove the orphan
3259  * item for this particular inode.
3260  */
3261 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3262                             struct btrfs_inode *inode)
3263 {
3264         struct btrfs_root *root = inode->root;
3265         int delete_item = 0;
3266         int release_rsv = 0;
3267         int ret = 0;
3268
3269         spin_lock(&root->orphan_lock);
3270         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3271                                &inode->runtime_flags))
3272                 delete_item = 1;
3273
3274         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3275                                &inode->runtime_flags))
3276                 release_rsv = 1;
3277         spin_unlock(&root->orphan_lock);
3278
3279         if (delete_item) {
3280                 atomic_dec(&root->orphan_inodes);
3281                 if (trans)
3282                         ret = btrfs_del_orphan_item(trans, root,
3283                                                     btrfs_ino(inode));
3284         }
3285
3286         if (release_rsv)
3287                 btrfs_orphan_release_metadata(inode);
3288
3289         return ret;
3290 }
3291
3292 /*
3293  * this cleans up any orphans that may be left on the list from the last use
3294  * of this root.
3295  */
3296 int btrfs_orphan_cleanup(struct btrfs_root *root)
3297 {
3298         struct btrfs_fs_info *fs_info = root->fs_info;
3299         struct btrfs_path *path;
3300         struct extent_buffer *leaf;
3301         struct btrfs_key key, found_key;
3302         struct btrfs_trans_handle *trans;
3303         struct inode *inode;
3304         u64 last_objectid = 0;
3305         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3306
3307         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3308                 return 0;
3309
3310         path = btrfs_alloc_path();
3311         if (!path) {
3312                 ret = -ENOMEM;
3313                 goto out;
3314         }
3315         path->reada = READA_BACK;
3316
3317         key.objectid = BTRFS_ORPHAN_OBJECTID;
3318         key.type = BTRFS_ORPHAN_ITEM_KEY;
3319         key.offset = (u64)-1;
3320
3321         while (1) {
3322                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3323                 if (ret < 0)
3324                         goto out;
3325
3326                 /*
3327                  * if ret == 0 means we found what we were searching for, which
3328                  * is weird, but possible, so only screw with path if we didn't
3329                  * find the key and see if we have stuff that matches
3330                  */
3331                 if (ret > 0) {
3332                         ret = 0;
3333                         if (path->slots[0] == 0)
3334                                 break;
3335                         path->slots[0]--;
3336                 }
3337
3338                 /* pull out the item */
3339                 leaf = path->nodes[0];
3340                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3341
3342                 /* make sure the item matches what we want */
3343                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3344                         break;
3345                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3346                         break;
3347
3348                 /* release the path since we're done with it */
3349                 btrfs_release_path(path);
3350
3351                 /*
3352                  * this is where we are basically btrfs_lookup, without the
3353                  * crossing root thing.  we store the inode number in the
3354                  * offset of the orphan item.
3355                  */
3356
3357                 if (found_key.offset == last_objectid) {
3358                         btrfs_err(fs_info,
3359                                   "Error removing orphan entry, stopping orphan cleanup");
3360                         ret = -EINVAL;
3361                         goto out;
3362                 }
3363
3364                 last_objectid = found_key.offset;
3365
3366                 found_key.objectid = found_key.offset;
3367                 found_key.type = BTRFS_INODE_ITEM_KEY;
3368                 found_key.offset = 0;
3369                 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3370                 ret = PTR_ERR_OR_ZERO(inode);
3371                 if (ret && ret != -ENOENT)
3372                         goto out;
3373
3374                 if (ret == -ENOENT && root == fs_info->tree_root) {
3375                         struct btrfs_root *dead_root;
3376                         struct btrfs_fs_info *fs_info = root->fs_info;
3377                         int is_dead_root = 0;
3378
3379                         /*
3380                          * this is an orphan in the tree root. Currently these
3381                          * could come from 2 sources:
3382                          *  a) a snapshot deletion in progress
3383                          *  b) a free space cache inode
3384                          * We need to distinguish those two, as the snapshot
3385                          * orphan must not get deleted.
3386                          * find_dead_roots already ran before us, so if this
3387                          * is a snapshot deletion, we should find the root
3388                          * in the dead_roots list
3389                          */
3390                         spin_lock(&fs_info->trans_lock);
3391                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3392                                             root_list) {
3393                                 if (dead_root->root_key.objectid ==
3394                                     found_key.objectid) {
3395                                         is_dead_root = 1;
3396                                         break;
3397                                 }
3398                         }
3399                         spin_unlock(&fs_info->trans_lock);
3400                         if (is_dead_root) {
3401                                 /* prevent this orphan from being found again */
3402                                 key.offset = found_key.objectid - 1;
3403                                 continue;
3404                         }
3405                 }
3406                 /*
3407                  * Inode is already gone but the orphan item is still there,
3408                  * kill the orphan item.
3409                  */
3410                 if (ret == -ENOENT) {
3411                         trans = btrfs_start_transaction(root, 1);
3412                         if (IS_ERR(trans)) {
3413                                 ret = PTR_ERR(trans);
3414                                 goto out;
3415                         }
3416                         btrfs_debug(fs_info, "auto deleting %Lu",
3417                                     found_key.objectid);
3418                         ret = btrfs_del_orphan_item(trans, root,
3419                                                     found_key.objectid);
3420                         btrfs_end_transaction(trans);
3421                         if (ret)
3422                                 goto out;
3423                         continue;
3424                 }
3425
3426                 /*
3427                  * add this inode to the orphan list so btrfs_orphan_del does
3428                  * the proper thing when we hit it
3429                  */
3430                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3431                         &BTRFS_I(inode)->runtime_flags);
3432                 atomic_inc(&root->orphan_inodes);
3433
3434                 /* if we have links, this was a truncate, lets do that */
3435                 if (inode->i_nlink) {
3436                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3437                                 iput(inode);
3438                                 continue;
3439                         }
3440                         nr_truncate++;
3441
3442                         /* 1 for the orphan item deletion. */
3443                         trans = btrfs_start_transaction(root, 1);
3444                         if (IS_ERR(trans)) {
3445                                 iput(inode);
3446                                 ret = PTR_ERR(trans);
3447                                 goto out;
3448                         }
3449                         ret = btrfs_orphan_add(trans, BTRFS_I(inode));
3450                         btrfs_end_transaction(trans);
3451                         if (ret) {
3452                                 iput(inode);
3453                                 goto out;
3454                         }
3455
3456                         ret = btrfs_truncate(inode);
3457                         if (ret)
3458                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
3459                 } else {
3460                         nr_unlink++;
3461                 }
3462
3463                 /* this will do delete_inode and everything for us */
3464                 iput(inode);
3465                 if (ret)
3466                         goto out;
3467         }
3468         /* release the path since we're done with it */
3469         btrfs_release_path(path);
3470
3471         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3472
3473         if (root->orphan_block_rsv)
3474                 btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
3475                                         (u64)-1);
3476
3477         if (root->orphan_block_rsv ||
3478             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3479                 trans = btrfs_join_transaction(root);
3480                 if (!IS_ERR(trans))
3481                         btrfs_end_transaction(trans);
3482         }
3483
3484         if (nr_unlink)
3485                 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3486         if (nr_truncate)
3487                 btrfs_debug(fs_info, "truncated %d orphans", nr_truncate);
3488
3489 out:
3490         if (ret)
3491                 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3492         btrfs_free_path(path);
3493         return ret;
3494 }
3495
3496 /*
3497  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3498  * don't find any xattrs, we know there can't be any acls.
3499  *
3500  * slot is the slot the inode is in, objectid is the objectid of the inode
3501  */
3502 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3503                                           int slot, u64 objectid,
3504                                           int *first_xattr_slot)
3505 {
3506         u32 nritems = btrfs_header_nritems(leaf);
3507         struct btrfs_key found_key;
3508         static u64 xattr_access = 0;
3509         static u64 xattr_default = 0;
3510         int scanned = 0;
3511
3512         if (!xattr_access) {
3513                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3514                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3515                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3516                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3517         }
3518
3519         slot++;
3520         *first_xattr_slot = -1;
3521         while (slot < nritems) {
3522                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3523
3524                 /* we found a different objectid, there must not be acls */
3525                 if (found_key.objectid != objectid)
3526                         return 0;
3527
3528                 /* we found an xattr, assume we've got an acl */
3529                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3530                         if (*first_xattr_slot == -1)
3531                                 *first_xattr_slot = slot;
3532                         if (found_key.offset == xattr_access ||
3533                             found_key.offset == xattr_default)
3534                                 return 1;
3535                 }
3536
3537                 /*
3538                  * we found a key greater than an xattr key, there can't
3539                  * be any acls later on
3540                  */
3541                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3542                         return 0;
3543
3544                 slot++;
3545                 scanned++;
3546
3547                 /*
3548                  * it goes inode, inode backrefs, xattrs, extents,
3549                  * so if there are a ton of hard links to an inode there can
3550                  * be a lot of backrefs.  Don't waste time searching too hard,
3551                  * this is just an optimization
3552                  */
3553                 if (scanned >= 8)
3554                         break;
3555         }
3556         /* we hit the end of the leaf before we found an xattr or
3557          * something larger than an xattr.  We have to assume the inode
3558          * has acls
3559          */
3560         if (*first_xattr_slot == -1)
3561                 *first_xattr_slot = slot;
3562         return 1;
3563 }
3564
3565 /*
3566  * read an inode from the btree into the in-memory inode
3567  */
3568 static int btrfs_read_locked_inode(struct inode *inode)
3569 {
3570         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3571         struct btrfs_path *path;
3572         struct extent_buffer *leaf;
3573         struct btrfs_inode_item *inode_item;
3574         struct btrfs_root *root = BTRFS_I(inode)->root;
3575         struct btrfs_key location;
3576         unsigned long ptr;
3577         int maybe_acls;
3578         u32 rdev;
3579         int ret;
3580         bool filled = false;
3581         int first_xattr_slot;
3582
3583         ret = btrfs_fill_inode(inode, &rdev);
3584         if (!ret)
3585                 filled = true;
3586
3587         path = btrfs_alloc_path();
3588         if (!path) {
3589                 ret = -ENOMEM;
3590                 goto make_bad;
3591         }
3592
3593         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3594
3595         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3596         if (ret) {
3597                 if (ret > 0)
3598                         ret = -ENOENT;
3599                 goto make_bad;
3600         }
3601
3602         leaf = path->nodes[0];
3603
3604         if (filled)
3605                 goto cache_index;
3606
3607         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3608                                     struct btrfs_inode_item);
3609         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3610         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3611         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3612         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3613         btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3614
3615         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3616         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3617
3618         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3619         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3620
3621         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3622         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3623
3624         BTRFS_I(inode)->i_otime.tv_sec =
3625                 btrfs_timespec_sec(leaf, &inode_item->otime);
3626         BTRFS_I(inode)->i_otime.tv_nsec =
3627                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3628
3629         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3630         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3631         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3632
3633         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3634         inode->i_generation = BTRFS_I(inode)->generation;
3635         inode->i_rdev = 0;
3636         rdev = btrfs_inode_rdev(leaf, inode_item);
3637
3638         BTRFS_I(inode)->index_cnt = (u64)-1;
3639         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3640
3641 cache_index:
3642         /*
3643          * If we were modified in the current generation and evicted from memory
3644          * and then re-read we need to do a full sync since we don't have any
3645          * idea about which extents were modified before we were evicted from
3646          * cache.
3647          *
3648          * This is required for both inode re-read from disk and delayed inode
3649          * in delayed_nodes_tree.
3650          */
3651         if (BTRFS_I(inode)->last_trans == fs_info->generation)
3652                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3653                         &BTRFS_I(inode)->runtime_flags);
3654
3655         /*
3656          * We don't persist the id of the transaction where an unlink operation
3657          * against the inode was last made. So here we assume the inode might
3658          * have been evicted, and therefore the exact value of last_unlink_trans
3659          * lost, and set it to last_trans to avoid metadata inconsistencies
3660          * between the inode and its parent if the inode is fsync'ed and the log
3661          * replayed. For example, in the scenario:
3662          *
3663          * touch mydir/foo
3664          * ln mydir/foo mydir/bar
3665          * sync
3666          * unlink mydir/bar
3667          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3668          * xfs_io -c fsync mydir/foo
3669          * <power failure>
3670          * mount fs, triggers fsync log replay
3671          *
3672          * We must make sure that when we fsync our inode foo we also log its
3673          * parent inode, otherwise after log replay the parent still has the
3674          * dentry with the "bar" name but our inode foo has a link count of 1
3675          * and doesn't have an inode ref with the name "bar" anymore.
3676          *
3677          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3678          * but it guarantees correctness at the expense of occasional full
3679          * transaction commits on fsync if our inode is a directory, or if our
3680          * inode is not a directory, logging its parent unnecessarily.
3681          */
3682         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3683
3684         path->slots[0]++;
3685         if (inode->i_nlink != 1 ||
3686             path->slots[0] >= btrfs_header_nritems(leaf))
3687                 goto cache_acl;
3688
3689         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3690         if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3691                 goto cache_acl;
3692
3693         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3694         if (location.type == BTRFS_INODE_REF_KEY) {
3695                 struct btrfs_inode_ref *ref;
3696
3697                 ref = (struct btrfs_inode_ref *)ptr;
3698                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3699         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3700                 struct btrfs_inode_extref *extref;
3701
3702                 extref = (struct btrfs_inode_extref *)ptr;
3703                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3704                                                                      extref);
3705         }
3706 cache_acl:
3707         /*
3708          * try to precache a NULL acl entry for files that don't have
3709          * any xattrs or acls
3710          */
3711         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3712                         btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3713         if (first_xattr_slot != -1) {
3714                 path->slots[0] = first_xattr_slot;
3715                 ret = btrfs_load_inode_props(inode, path);
3716                 if (ret)
3717                         btrfs_err(fs_info,
3718                                   "error loading props for ino %llu (root %llu): %d",
3719                                   btrfs_ino(BTRFS_I(inode)),
3720                                   root->root_key.objectid, ret);
3721         }
3722         btrfs_free_path(path);
3723
3724         if (!maybe_acls)
3725                 cache_no_acl(inode);
3726
3727         switch (inode->i_mode & S_IFMT) {
3728         case S_IFREG:
3729                 inode->i_mapping->a_ops = &btrfs_aops;
3730                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3731                 inode->i_fop = &btrfs_file_operations;
3732                 inode->i_op = &btrfs_file_inode_operations;
3733                 break;
3734         case S_IFDIR:
3735                 inode->i_fop = &btrfs_dir_file_operations;
3736                 inode->i_op = &btrfs_dir_inode_operations;
3737                 break;
3738         case S_IFLNK:
3739                 inode->i_op = &btrfs_symlink_inode_operations;
3740                 inode_nohighmem(inode);
3741                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3742                 break;
3743         default:
3744                 inode->i_op = &btrfs_special_inode_operations;
3745                 init_special_inode(inode, inode->i_mode, rdev);
3746                 break;
3747         }
3748
3749         btrfs_update_iflags(inode);
3750         return 0;
3751
3752 make_bad:
3753         btrfs_free_path(path);
3754         make_bad_inode(inode);
3755         return ret;
3756 }
3757
3758 /*
3759  * given a leaf and an inode, copy the inode fields into the leaf
3760  */
3761 static void fill_inode_item(struct btrfs_trans_handle *trans,
3762                             struct extent_buffer *leaf,
3763                             struct btrfs_inode_item *item,
3764                             struct inode *inode)
3765 {
3766         struct btrfs_map_token token;
3767
3768         btrfs_init_map_token(&token);
3769
3770         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3771         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3772         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3773                                    &token);
3774         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3775         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3776
3777         btrfs_set_token_timespec_sec(leaf, &item->atime,
3778                                      inode->i_atime.tv_sec, &token);
3779         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3780                                       inode->i_atime.tv_nsec, &token);
3781
3782         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3783                                      inode->i_mtime.tv_sec, &token);
3784         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3785                                       inode->i_mtime.tv_nsec, &token);
3786
3787         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3788                                      inode->i_ctime.tv_sec, &token);
3789         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3790                                       inode->i_ctime.tv_nsec, &token);
3791
3792         btrfs_set_token_timespec_sec(leaf, &item->otime,
3793                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3794         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3795                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3796
3797         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3798                                      &token);
3799         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3800                                          &token);
3801         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3802         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3803         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3804         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3805         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3806 }
3807
3808 /*
3809  * copy everything in the in-memory inode into the btree.
3810  */
3811 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3812                                 struct btrfs_root *root, struct inode *inode)
3813 {
3814         struct btrfs_inode_item *inode_item;
3815         struct btrfs_path *path;
3816         struct extent_buffer *leaf;
3817         int ret;
3818
3819         path = btrfs_alloc_path();
3820         if (!path)
3821                 return -ENOMEM;
3822
3823         path->leave_spinning = 1;
3824         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3825                                  1);
3826         if (ret) {
3827                 if (ret > 0)
3828                         ret = -ENOENT;
3829                 goto failed;
3830         }
3831
3832         leaf = path->nodes[0];
3833         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3834                                     struct btrfs_inode_item);
3835
3836         fill_inode_item(trans, leaf, inode_item, inode);
3837         btrfs_mark_buffer_dirty(leaf);
3838         btrfs_set_inode_last_trans(trans, inode);
3839         ret = 0;
3840 failed:
3841         btrfs_free_path(path);
3842         return ret;
3843 }
3844
3845 /*
3846  * copy everything in the in-memory inode into the btree.
3847  */
3848 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3849                                 struct btrfs_root *root, struct inode *inode)
3850 {
3851         struct btrfs_fs_info *fs_info = root->fs_info;
3852         int ret;
3853
3854         /*
3855          * If the inode is a free space inode, we can deadlock during commit
3856          * if we put it into the delayed code.
3857          *
3858          * The data relocation inode should also be directly updated
3859          * without delay
3860          */
3861         if (!btrfs_is_free_space_inode(BTRFS_I(inode))
3862             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3863             && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
3864                 btrfs_update_root_times(trans, root);
3865
3866                 ret = btrfs_delayed_update_inode(trans, root, inode);
3867                 if (!ret)
3868                         btrfs_set_inode_last_trans(trans, inode);
3869                 return ret;
3870         }
3871
3872         return btrfs_update_inode_item(trans, root, inode);
3873 }
3874
3875 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3876                                          struct btrfs_root *root,
3877                                          struct inode *inode)
3878 {
3879         int ret;
3880
3881         ret = btrfs_update_inode(trans, root, inode);
3882         if (ret == -ENOSPC)
3883                 return btrfs_update_inode_item(trans, root, inode);
3884         return ret;
3885 }
3886
3887 /*
3888  * unlink helper that gets used here in inode.c and in the tree logging
3889  * recovery code.  It remove a link in a directory with a given name, and
3890  * also drops the back refs in the inode to the directory
3891  */
3892 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3893                                 struct btrfs_root *root,
3894                                 struct btrfs_inode *dir,
3895                                 struct btrfs_inode *inode,
3896                                 const char *name, int name_len)
3897 {
3898         struct btrfs_fs_info *fs_info = root->fs_info;
3899         struct btrfs_path *path;
3900         int ret = 0;
3901         struct extent_buffer *leaf;
3902         struct btrfs_dir_item *di;
3903         struct btrfs_key key;
3904         u64 index;
3905         u64 ino = btrfs_ino(inode);
3906         u64 dir_ino = btrfs_ino(dir);
3907
3908         path = btrfs_alloc_path();
3909         if (!path) {
3910                 ret = -ENOMEM;
3911                 goto out;
3912         }
3913
3914         path->leave_spinning = 1;
3915         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3916                                     name, name_len, -1);
3917         if (IS_ERR(di)) {
3918                 ret = PTR_ERR(di);
3919                 goto err;
3920         }
3921         if (!di) {
3922                 ret = -ENOENT;
3923                 goto err;
3924         }
3925         leaf = path->nodes[0];
3926         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3927         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3928         if (ret)
3929                 goto err;
3930         btrfs_release_path(path);
3931
3932         /*
3933          * If we don't have dir index, we have to get it by looking up
3934          * the inode ref, since we get the inode ref, remove it directly,
3935          * it is unnecessary to do delayed deletion.
3936          *
3937          * But if we have dir index, needn't search inode ref to get it.
3938          * Since the inode ref is close to the inode item, it is better
3939          * that we delay to delete it, and just do this deletion when
3940          * we update the inode item.
3941          */
3942         if (inode->dir_index) {
3943                 ret = btrfs_delayed_delete_inode_ref(inode);
3944                 if (!ret) {
3945                         index = inode->dir_index;
3946                         goto skip_backref;
3947                 }
3948         }
3949
3950         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3951                                   dir_ino, &index);
3952         if (ret) {
3953                 btrfs_info(fs_info,
3954                         "failed to delete reference to %.*s, inode %llu parent %llu",
3955                         name_len, name, ino, dir_ino);
3956                 btrfs_abort_transaction(trans, ret);
3957                 goto err;
3958         }
3959 skip_backref:
3960         ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
3961         if (ret) {
3962                 btrfs_abort_transaction(trans, ret);
3963                 goto err;
3964         }
3965
3966         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
3967                         dir_ino);
3968         if (ret != 0 && ret != -ENOENT) {
3969                 btrfs_abort_transaction(trans, ret);
3970                 goto err;
3971         }
3972
3973         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
3974                         index);
3975         if (ret == -ENOENT)
3976                 ret = 0;
3977         else if (ret)
3978                 btrfs_abort_transaction(trans, ret);
3979 err:
3980         btrfs_free_path(path);
3981         if (ret)
3982                 goto out;
3983
3984         btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
3985         inode_inc_iversion(&inode->vfs_inode);
3986         inode_inc_iversion(&dir->vfs_inode);
3987         inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
3988                 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
3989         ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
3990 out:
3991         return ret;
3992 }
3993
3994 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3995                        struct btrfs_root *root,
3996                        struct btrfs_inode *dir, struct btrfs_inode *inode,
3997                        const char *name, int name_len)
3998 {
3999         int ret;
4000         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4001         if (!ret) {
4002                 drop_nlink(&inode->vfs_inode);
4003                 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4004         }
4005         return ret;
4006 }
4007
4008 /*
4009  * helper to start transaction for unlink and rmdir.
4010  *
4011  * unlink and rmdir are special in btrfs, they do not always free space, so
4012  * if we cannot make our reservations the normal way try and see if there is
4013  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4014  * allow the unlink to occur.
4015  */
4016 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4017 {
4018         struct btrfs_root *root = BTRFS_I(dir)->root;
4019
4020         /*
4021          * 1 for the possible orphan item
4022          * 1 for the dir item
4023          * 1 for the dir index
4024          * 1 for the inode ref
4025          * 1 for the inode
4026          */
4027         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4028 }
4029
4030 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4031 {
4032         struct btrfs_root *root = BTRFS_I(dir)->root;
4033         struct btrfs_trans_handle *trans;
4034         struct inode *inode = d_inode(dentry);
4035         int ret;
4036
4037         trans = __unlink_start_trans(dir);
4038         if (IS_ERR(trans))
4039                 return PTR_ERR(trans);
4040
4041         btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4042                         0);
4043
4044         ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4045                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4046                         dentry->d_name.len);
4047         if (ret)
4048                 goto out;
4049
4050         if (inode->i_nlink == 0) {
4051                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4052                 if (ret)
4053                         goto out;
4054         }
4055
4056 out:
4057         btrfs_end_transaction(trans);
4058         btrfs_btree_balance_dirty(root->fs_info);
4059         return ret;
4060 }
4061
4062 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4063                         struct btrfs_root *root,
4064                         struct inode *dir, u64 objectid,
4065                         const char *name, int name_len)
4066 {
4067         struct btrfs_fs_info *fs_info = root->fs_info;
4068         struct btrfs_path *path;
4069         struct extent_buffer *leaf;
4070         struct btrfs_dir_item *di;
4071         struct btrfs_key key;
4072         u64 index;
4073         int ret;
4074         u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4075
4076         path = btrfs_alloc_path();
4077         if (!path)
4078                 return -ENOMEM;
4079
4080         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4081                                    name, name_len, -1);
4082         if (IS_ERR_OR_NULL(di)) {
4083                 if (!di)
4084                         ret = -ENOENT;
4085                 else
4086                         ret = PTR_ERR(di);
4087                 goto out;
4088         }
4089
4090         leaf = path->nodes[0];
4091         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4092         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4093         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4094         if (ret) {
4095                 btrfs_abort_transaction(trans, ret);
4096                 goto out;
4097         }
4098         btrfs_release_path(path);
4099
4100         ret = btrfs_del_root_ref(trans, fs_info, objectid,
4101                                  root->root_key.objectid, dir_ino,
4102                                  &index, name, name_len);
4103         if (ret < 0) {
4104                 if (ret != -ENOENT) {
4105                         btrfs_abort_transaction(trans, ret);
4106                         goto out;
4107                 }
4108                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4109                                                  name, name_len);
4110                 if (IS_ERR_OR_NULL(di)) {
4111                         if (!di)
4112                                 ret = -ENOENT;
4113                         else
4114                                 ret = PTR_ERR(di);
4115                         btrfs_abort_transaction(trans, ret);
4116                         goto out;
4117                 }
4118
4119                 leaf = path->nodes[0];
4120                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4121                 btrfs_release_path(path);
4122                 index = key.offset;
4123         }
4124         btrfs_release_path(path);
4125
4126         ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index);
4127         if (ret) {
4128                 btrfs_abort_transaction(trans, ret);
4129                 goto out;
4130         }
4131
4132         btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4133         inode_inc_iversion(dir);
4134         dir->i_mtime = dir->i_ctime = current_time(dir);
4135         ret = btrfs_update_inode_fallback(trans, root, dir);
4136         if (ret)
4137                 btrfs_abort_transaction(trans, ret);
4138 out:
4139         btrfs_free_path(path);
4140         return ret;
4141 }
4142
4143 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4144 {
4145         struct inode *inode = d_inode(dentry);
4146         int err = 0;
4147         struct btrfs_root *root = BTRFS_I(dir)->root;
4148         struct btrfs_trans_handle *trans;
4149         u64 last_unlink_trans;
4150
4151         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4152                 return -ENOTEMPTY;
4153         if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4154                 return -EPERM;
4155
4156         trans = __unlink_start_trans(dir);
4157         if (IS_ERR(trans))
4158                 return PTR_ERR(trans);
4159
4160         if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4161                 err = btrfs_unlink_subvol(trans, root, dir,
4162                                           BTRFS_I(inode)->location.objectid,
4163                                           dentry->d_name.name,
4164                                           dentry->d_name.len);
4165                 goto out;
4166         }
4167
4168         err = btrfs_orphan_add(trans, BTRFS_I(inode));
4169         if (err)
4170                 goto out;
4171
4172         last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4173
4174         /* now the directory is empty */
4175         err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4176                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4177                         dentry->d_name.len);
4178         if (!err) {
4179                 btrfs_i_size_write(BTRFS_I(inode), 0);
4180                 /*
4181                  * Propagate the last_unlink_trans value of the deleted dir to
4182                  * its parent directory. This is to prevent an unrecoverable
4183                  * log tree in the case we do something like this:
4184                  * 1) create dir foo
4185                  * 2) create snapshot under dir foo
4186                  * 3) delete the snapshot
4187                  * 4) rmdir foo
4188                  * 5) mkdir foo
4189                  * 6) fsync foo or some file inside foo
4190                  */
4191                 if (last_unlink_trans >= trans->transid)
4192                         BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4193         }
4194 out:
4195         btrfs_end_transaction(trans);
4196         btrfs_btree_balance_dirty(root->fs_info);
4197
4198         return err;
4199 }
4200
4201 static int truncate_space_check(struct btrfs_trans_handle *trans,
4202                                 struct btrfs_root *root,
4203                                 u64 bytes_deleted)
4204 {
4205         struct btrfs_fs_info *fs_info = root->fs_info;
4206         int ret;
4207
4208         /*
4209          * This is only used to apply pressure to the enospc system, we don't
4210          * intend to use this reservation at all.
4211          */
4212         bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
4213         bytes_deleted *= fs_info->nodesize;
4214         ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
4215                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4216         if (!ret) {
4217                 trace_btrfs_space_reservation(fs_info, "transaction",
4218                                               trans->transid,
4219                                               bytes_deleted, 1);
4220                 trans->bytes_reserved += bytes_deleted;
4221         }
4222         return ret;
4223
4224 }
4225
4226 static int truncate_inline_extent(struct inode *inode,
4227                                   struct btrfs_path *path,
4228                                   struct btrfs_key *found_key,
4229                                   const u64 item_end,
4230                                   const u64 new_size)
4231 {
4232         struct extent_buffer *leaf = path->nodes[0];
4233         int slot = path->slots[0];
4234         struct btrfs_file_extent_item *fi;
4235         u32 size = (u32)(new_size - found_key->offset);
4236         struct btrfs_root *root = BTRFS_I(inode)->root;
4237
4238         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4239
4240         if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4241                 loff_t offset = new_size;
4242                 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4243
4244                 /*
4245                  * Zero out the remaining of the last page of our inline extent,
4246                  * instead of directly truncating our inline extent here - that
4247                  * would be much more complex (decompressing all the data, then
4248                  * compressing the truncated data, which might be bigger than
4249                  * the size of the inline extent, resize the extent, etc).
4250                  * We release the path because to get the page we might need to
4251                  * read the extent item from disk (data not in the page cache).
4252                  */
4253                 btrfs_release_path(path);
4254                 return btrfs_truncate_block(inode, offset, page_end - offset,
4255                                         0);
4256         }
4257
4258         btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4259         size = btrfs_file_extent_calc_inline_size(size);
4260         btrfs_truncate_item(root->fs_info, path, size, 1);
4261
4262         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4263                 inode_sub_bytes(inode, item_end + 1 - new_size);
4264
4265         return 0;
4266 }
4267
4268 /*
4269  * this can truncate away extent items, csum items and directory items.
4270  * It starts at a high offset and removes keys until it can't find
4271  * any higher than new_size
4272  *
4273  * csum items that cross the new i_size are truncated to the new size
4274  * as well.
4275  *
4276  * min_type is the minimum key type to truncate down to.  If set to 0, this
4277  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4278  */
4279 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4280                                struct btrfs_root *root,
4281                                struct inode *inode,
4282                                u64 new_size, u32 min_type)
4283 {
4284         struct btrfs_fs_info *fs_info = root->fs_info;
4285         struct btrfs_path *path;
4286         struct extent_buffer *leaf;
4287         struct btrfs_file_extent_item *fi;
4288         struct btrfs_key key;
4289         struct btrfs_key found_key;
4290         u64 extent_start = 0;
4291         u64 extent_num_bytes = 0;
4292         u64 extent_offset = 0;
4293         u64 item_end = 0;
4294         u64 last_size = new_size;
4295         u32 found_type = (u8)-1;
4296         int found_extent;
4297         int del_item;
4298         int pending_del_nr = 0;
4299         int pending_del_slot = 0;
4300         int extent_type = -1;
4301         int ret;
4302         int err = 0;
4303         u64 ino = btrfs_ino(BTRFS_I(inode));
4304         u64 bytes_deleted = 0;
4305         bool be_nice = 0;
4306         bool should_throttle = 0;
4307         bool should_end = 0;
4308
4309         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4310
4311         /*
4312          * for non-free space inodes and ref cows, we want to back off from
4313          * time to time
4314          */
4315         if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4316             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4317                 be_nice = 1;
4318
4319         path = btrfs_alloc_path();
4320         if (!path)
4321                 return -ENOMEM;
4322         path->reada = READA_BACK;
4323
4324         /*
4325          * We want to drop from the next block forward in case this new size is
4326          * not block aligned since we will be keeping the last block of the
4327          * extent just the way it is.
4328          */
4329         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4330             root == fs_info->tree_root)
4331                 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4332                                         fs_info->sectorsize),
4333                                         (u64)-1, 0);
4334
4335         /*
4336          * This function is also used to drop the items in the log tree before
4337          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4338          * it is used to drop the loged items. So we shouldn't kill the delayed
4339          * items.
4340          */
4341         if (min_type == 0 && root == BTRFS_I(inode)->root)
4342                 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4343
4344         key.objectid = ino;
4345         key.offset = (u64)-1;
4346         key.type = (u8)-1;
4347
4348 search_again:
4349         /*
4350          * with a 16K leaf size and 128MB extents, you can actually queue
4351          * up a huge file in a single leaf.  Most of the time that
4352          * bytes_deleted is > 0, it will be huge by the time we get here
4353          */
4354         if (be_nice && bytes_deleted > SZ_32M) {
4355                 if (btrfs_should_end_transaction(trans)) {
4356                         err = -EAGAIN;
4357                         goto error;
4358                 }
4359         }
4360
4361
4362         path->leave_spinning = 1;
4363         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4364         if (ret < 0) {
4365                 err = ret;
4366                 goto out;
4367         }
4368
4369         if (ret > 0) {
4370                 /* there are no items in the tree for us to truncate, we're
4371                  * done
4372                  */
4373                 if (path->slots[0] == 0)
4374                         goto out;
4375                 path->slots[0]--;
4376         }
4377
4378         while (1) {
4379                 fi = NULL;
4380                 leaf = path->nodes[0];
4381                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4382                 found_type = found_key.type;
4383
4384                 if (found_key.objectid != ino)
4385                         break;
4386
4387                 if (found_type < min_type)
4388                         break;
4389
4390                 item_end = found_key.offset;
4391                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4392                         fi = btrfs_item_ptr(leaf, path->slots[0],
4393                                             struct btrfs_file_extent_item);
4394                         extent_type = btrfs_file_extent_type(leaf, fi);
4395                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4396                                 item_end +=
4397                                     btrfs_file_extent_num_bytes(leaf, fi);
4398                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4399                                 item_end += btrfs_file_extent_inline_len(leaf,
4400                                                          path->slots[0], fi);
4401                         }
4402                         item_end--;
4403                 }
4404                 if (found_type > min_type) {
4405                         del_item = 1;
4406                 } else {
4407                         if (item_end < new_size) {
4408                                 /*
4409                                  * With NO_HOLES mode, for the following mapping
4410                                  *
4411                                  * [0-4k][hole][8k-12k]
4412                                  *
4413                                  * if truncating isize down to 6k, it ends up
4414                                  * isize being 8k.
4415                                  */
4416                                 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
4417                                         last_size = new_size;
4418                                 break;
4419                         }
4420                         if (found_key.offset >= new_size)
4421                                 del_item = 1;
4422                         else
4423                                 del_item = 0;
4424                 }
4425                 found_extent = 0;
4426                 /* FIXME, shrink the extent if the ref count is only 1 */
4427                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4428                         goto delete;
4429
4430                 if (del_item)
4431                         last_size = found_key.offset;
4432                 else
4433                         last_size = new_size;
4434
4435                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4436                         u64 num_dec;
4437                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4438                         if (!del_item) {
4439                                 u64 orig_num_bytes =
4440                                         btrfs_file_extent_num_bytes(leaf, fi);
4441                                 extent_num_bytes = ALIGN(new_size -
4442                                                 found_key.offset,
4443                                                 fs_info->sectorsize);
4444                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4445                                                          extent_num_bytes);
4446                                 num_dec = (orig_num_bytes -
4447                                            extent_num_bytes);
4448                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4449                                              &root->state) &&
4450                                     extent_start != 0)
4451                                         inode_sub_bytes(inode, num_dec);
4452                                 btrfs_mark_buffer_dirty(leaf);
4453                         } else {
4454                                 extent_num_bytes =
4455                                         btrfs_file_extent_disk_num_bytes(leaf,
4456                                                                          fi);
4457                                 extent_offset = found_key.offset -
4458                                         btrfs_file_extent_offset(leaf, fi);
4459
4460                                 /* FIXME blocksize != 4096 */
4461                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4462                                 if (extent_start != 0) {
4463                                         found_extent = 1;
4464                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4465                                                      &root->state))
4466                                                 inode_sub_bytes(inode, num_dec);
4467                                 }
4468                         }
4469                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4470                         /*
4471                          * we can't truncate inline items that have had
4472                          * special encodings
4473                          */
4474                         if (!del_item &&
4475                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4476                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4477
4478                                 /*
4479                                  * Need to release path in order to truncate a
4480                                  * compressed extent. So delete any accumulated
4481                                  * extent items so far.
4482                                  */
4483                                 if (btrfs_file_extent_compression(leaf, fi) !=
4484                                     BTRFS_COMPRESS_NONE && pending_del_nr) {
4485                                         err = btrfs_del_items(trans, root, path,
4486                                                               pending_del_slot,
4487                                                               pending_del_nr);
4488                                         if (err) {
4489                                                 btrfs_abort_transaction(trans,
4490                                                                         err);
4491                                                 goto error;
4492                                         }
4493                                         pending_del_nr = 0;
4494                                 }
4495
4496                                 err = truncate_inline_extent(inode, path,
4497                                                              &found_key,
4498                                                              item_end,
4499                                                              new_size);
4500                                 if (err) {
4501                                         btrfs_abort_transaction(trans, err);
4502                                         goto error;
4503                                 }
4504                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4505                                             &root->state)) {
4506                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4507                         }
4508                 }
4509 delete:
4510                 if (del_item) {
4511                         if (!pending_del_nr) {
4512                                 /* no pending yet, add ourselves */
4513                                 pending_del_slot = path->slots[0];
4514                                 pending_del_nr = 1;
4515                         } else if (pending_del_nr &&
4516                                    path->slots[0] + 1 == pending_del_slot) {
4517                                 /* hop on the pending chunk */
4518                                 pending_del_nr++;
4519                                 pending_del_slot = path->slots[0];
4520                         } else {
4521                                 BUG();
4522                         }
4523                 } else {
4524                         break;
4525                 }
4526                 should_throttle = 0;
4527
4528                 if (found_extent &&
4529                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4530                      root == fs_info->tree_root)) {
4531                         btrfs_set_path_blocking(path);
4532                         bytes_deleted += extent_num_bytes;
4533                         ret = btrfs_free_extent(trans, fs_info, extent_start,
4534                                                 extent_num_bytes, 0,
4535                                                 btrfs_header_owner(leaf),
4536                                                 ino, extent_offset);
4537                         BUG_ON(ret);
4538                         if (btrfs_should_throttle_delayed_refs(trans, fs_info))
4539                                 btrfs_async_run_delayed_refs(fs_info,
4540                                         trans->delayed_ref_updates * 2,
4541                                         trans->transid, 0);
4542                         if (be_nice) {
4543                                 if (truncate_space_check(trans, root,
4544                                                          extent_num_bytes)) {
4545                                         should_end = 1;
4546                                 }
4547                                 if (btrfs_should_throttle_delayed_refs(trans,
4548                                                                        fs_info))
4549                                         should_throttle = 1;
4550                         }
4551                 }
4552
4553                 if (found_type == BTRFS_INODE_ITEM_KEY)
4554                         break;
4555
4556                 if (path->slots[0] == 0 ||
4557                     path->slots[0] != pending_del_slot ||
4558                     should_throttle || should_end) {
4559                         if (pending_del_nr) {
4560                                 ret = btrfs_del_items(trans, root, path,
4561                                                 pending_del_slot,
4562                                                 pending_del_nr);
4563                                 if (ret) {
4564                                         btrfs_abort_transaction(trans, ret);
4565                                         goto error;
4566                                 }
4567                                 pending_del_nr = 0;
4568                         }
4569                         btrfs_release_path(path);
4570                         if (should_throttle) {
4571                                 unsigned long updates = trans->delayed_ref_updates;
4572                                 if (updates) {
4573                                         trans->delayed_ref_updates = 0;
4574                                         ret = btrfs_run_delayed_refs(trans,
4575                                                                    fs_info,
4576                                                                    updates * 2);
4577                                         if (ret && !err)
4578                                                 err = ret;
4579                                 }
4580                         }
4581                         /*
4582                          * if we failed to refill our space rsv, bail out
4583                          * and let the transaction restart
4584                          */
4585                         if (should_end) {
4586                                 err = -EAGAIN;
4587                                 goto error;
4588                         }
4589                         goto search_again;
4590                 } else {
4591                         path->slots[0]--;
4592                 }
4593         }
4594 out:
4595         if (pending_del_nr) {
4596                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4597                                       pending_del_nr);
4598                 if (ret)
4599                         btrfs_abort_transaction(trans, ret);
4600         }
4601 error:
4602         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4603                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4604
4605         btrfs_free_path(path);
4606
4607         if (err == 0) {
4608                 /* only inline file may have last_size != new_size */
4609                 if (new_size >= fs_info->sectorsize ||
4610                     new_size > fs_info->max_inline)
4611                         ASSERT(last_size == new_size);
4612         }
4613
4614         if (be_nice && bytes_deleted > SZ_32M) {
4615                 unsigned long updates = trans->delayed_ref_updates;
4616                 if (updates) {
4617                         trans->delayed_ref_updates = 0;
4618                         ret = btrfs_run_delayed_refs(trans, fs_info,
4619                                                      updates * 2);
4620                         if (ret && !err)
4621                                 err = ret;
4622                 }
4623         }
4624         return err;
4625 }
4626
4627 /*
4628  * btrfs_truncate_block - read, zero a chunk and write a block
4629  * @inode - inode that we're zeroing
4630  * @from - the offset to start zeroing
4631  * @len - the length to zero, 0 to zero the entire range respective to the
4632  *      offset
4633  * @front - zero up to the offset instead of from the offset on
4634  *
4635  * This will find the block for the "from" offset and cow the block and zero the
4636  * part we want to zero.  This is used with truncate and hole punching.
4637  */
4638 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4639                         int front)
4640 {
4641         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4642         struct address_space *mapping = inode->i_mapping;
4643         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4644         struct btrfs_ordered_extent *ordered;
4645         struct extent_state *cached_state = NULL;
4646         char *kaddr;
4647         u32 blocksize = fs_info->sectorsize;
4648         pgoff_t index = from >> PAGE_SHIFT;
4649         unsigned offset = from & (blocksize - 1);
4650         struct page *page;
4651         gfp_t mask = btrfs_alloc_write_mask(mapping);
4652         int ret = 0;
4653         u64 block_start;
4654         u64 block_end;
4655
4656         if ((offset & (blocksize - 1)) == 0 &&
4657             (!len || ((len & (blocksize - 1)) == 0)))
4658                 goto out;
4659
4660         ret = btrfs_delalloc_reserve_space(inode,
4661                         round_down(from, blocksize), blocksize);
4662         if (ret)
4663                 goto out;
4664
4665 again:
4666         page = find_or_create_page(mapping, index, mask);
4667         if (!page) {
4668                 btrfs_delalloc_release_space(inode,
4669                                 round_down(from, blocksize),
4670                                 blocksize);
4671                 ret = -ENOMEM;
4672                 goto out;
4673         }
4674
4675         block_start = round_down(from, blocksize);
4676         block_end = block_start + blocksize - 1;
4677
4678         if (!PageUptodate(page)) {
4679                 ret = btrfs_readpage(NULL, page);
4680                 lock_page(page);
4681                 if (page->mapping != mapping) {
4682                         unlock_page(page);
4683                         put_page(page);
4684                         goto again;
4685                 }
4686                 if (!PageUptodate(page)) {
4687                         ret = -EIO;
4688                         goto out_unlock;
4689                 }
4690         }
4691         wait_on_page_writeback(page);
4692
4693         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4694         set_page_extent_mapped(page);
4695
4696         ordered = btrfs_lookup_ordered_extent(inode, block_start);
4697         if (ordered) {
4698                 unlock_extent_cached(io_tree, block_start, block_end,
4699                                      &cached_state, GFP_NOFS);
4700                 unlock_page(page);
4701                 put_page(page);
4702                 btrfs_start_ordered_extent(inode, ordered, 1);
4703                 btrfs_put_ordered_extent(ordered);
4704                 goto again;
4705         }
4706
4707         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4708                           EXTENT_DIRTY | EXTENT_DELALLOC |
4709                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4710                           0, 0, &cached_state, GFP_NOFS);
4711
4712         ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
4713                                         &cached_state, 0);
4714         if (ret) {
4715                 unlock_extent_cached(io_tree, block_start, block_end,
4716                                      &cached_state, GFP_NOFS);
4717                 goto out_unlock;
4718         }
4719
4720         if (offset != blocksize) {
4721                 if (!len)
4722                         len = blocksize - offset;
4723                 kaddr = kmap(page);
4724                 if (front)
4725                         memset(kaddr + (block_start - page_offset(page)),
4726                                 0, offset);
4727                 else
4728                         memset(kaddr + (block_start - page_offset(page)) +  offset,
4729                                 0, len);
4730                 flush_dcache_page(page);
4731                 kunmap(page);
4732         }
4733         ClearPageChecked(page);
4734         set_page_dirty(page);
4735         unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
4736                              GFP_NOFS);
4737
4738 out_unlock:
4739         if (ret)
4740                 btrfs_delalloc_release_space(inode, block_start,
4741                                              blocksize);
4742         unlock_page(page);
4743         put_page(page);
4744 out:
4745         return ret;
4746 }
4747
4748 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4749                              u64 offset, u64 len)
4750 {
4751         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4752         struct btrfs_trans_handle *trans;
4753         int ret;
4754
4755         /*
4756          * Still need to make sure the inode looks like it's been updated so
4757          * that any holes get logged if we fsync.
4758          */
4759         if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4760                 BTRFS_I(inode)->last_trans = fs_info->generation;
4761                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4762                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4763                 return 0;
4764         }
4765
4766         /*
4767          * 1 - for the one we're dropping
4768          * 1 - for the one we're adding
4769          * 1 - for updating the inode.
4770          */
4771         trans = btrfs_start_transaction(root, 3);
4772         if (IS_ERR(trans))
4773                 return PTR_ERR(trans);
4774
4775         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4776         if (ret) {
4777                 btrfs_abort_transaction(trans, ret);
4778                 btrfs_end_transaction(trans);
4779                 return ret;
4780         }
4781
4782         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
4783                         offset, 0, 0, len, 0, len, 0, 0, 0);
4784         if (ret)
4785                 btrfs_abort_transaction(trans, ret);
4786         else
4787                 btrfs_update_inode(trans, root, inode);
4788         btrfs_end_transaction(trans);
4789         return ret;
4790 }
4791
4792 /*
4793  * This function puts in dummy file extents for the area we're creating a hole
4794  * for.  So if we are truncating this file to a larger size we need to insert
4795  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4796  * the range between oldsize and size
4797  */
4798 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4799 {
4800         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4801         struct btrfs_root *root = BTRFS_I(inode)->root;
4802         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4803         struct extent_map *em = NULL;
4804         struct extent_state *cached_state = NULL;
4805         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4806         u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4807         u64 block_end = ALIGN(size, fs_info->sectorsize);
4808         u64 last_byte;
4809         u64 cur_offset;
4810         u64 hole_size;
4811         int err = 0;
4812
4813         /*
4814          * If our size started in the middle of a block we need to zero out the
4815          * rest of the block before we expand the i_size, otherwise we could
4816          * expose stale data.
4817          */
4818         err = btrfs_truncate_block(inode, oldsize, 0, 0);
4819         if (err)
4820                 return err;
4821
4822         if (size <= hole_start)
4823                 return 0;
4824
4825         while (1) {
4826                 struct btrfs_ordered_extent *ordered;
4827
4828                 lock_extent_bits(io_tree, hole_start, block_end - 1,
4829                                  &cached_state);
4830                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
4831                                                      block_end - hole_start);
4832                 if (!ordered)
4833                         break;
4834                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4835                                      &cached_state, GFP_NOFS);
4836                 btrfs_start_ordered_extent(inode, ordered, 1);
4837                 btrfs_put_ordered_extent(ordered);
4838         }
4839
4840         cur_offset = hole_start;
4841         while (1) {
4842                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
4843                                 block_end - cur_offset, 0);
4844                 if (IS_ERR(em)) {
4845                         err = PTR_ERR(em);
4846                         em = NULL;
4847                         break;
4848                 }
4849                 last_byte = min(extent_map_end(em), block_end);
4850                 last_byte = ALIGN(last_byte, fs_info->sectorsize);
4851                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4852                         struct extent_map *hole_em;
4853                         hole_size = last_byte - cur_offset;
4854
4855                         err = maybe_insert_hole(root, inode, cur_offset,
4856                                                 hole_size);
4857                         if (err)
4858                                 break;
4859                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
4860                                                 cur_offset + hole_size - 1, 0);
4861                         hole_em = alloc_extent_map();
4862                         if (!hole_em) {
4863                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4864                                         &BTRFS_I(inode)->runtime_flags);
4865                                 goto next;
4866                         }
4867                         hole_em->start = cur_offset;
4868                         hole_em->len = hole_size;
4869                         hole_em->orig_start = cur_offset;
4870
4871                         hole_em->block_start = EXTENT_MAP_HOLE;
4872                         hole_em->block_len = 0;
4873                         hole_em->orig_block_len = 0;
4874                         hole_em->ram_bytes = hole_size;
4875                         hole_em->bdev = fs_info->fs_devices->latest_bdev;
4876                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4877                         hole_em->generation = fs_info->generation;
4878
4879                         while (1) {
4880                                 write_lock(&em_tree->lock);
4881                                 err = add_extent_mapping(em_tree, hole_em, 1);
4882                                 write_unlock(&em_tree->lock);
4883                                 if (err != -EEXIST)
4884                                         break;
4885                                 btrfs_drop_extent_cache(BTRFS_I(inode),
4886                                                         cur_offset,
4887                                                         cur_offset +
4888                                                         hole_size - 1, 0);
4889                         }
4890                         free_extent_map(hole_em);
4891                 }
4892 next:
4893                 free_extent_map(em);
4894                 em = NULL;
4895                 cur_offset = last_byte;
4896                 if (cur_offset >= block_end)
4897                         break;
4898         }
4899         free_extent_map(em);
4900         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4901                              GFP_NOFS);
4902         return err;
4903 }
4904
4905 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4906 {
4907         struct btrfs_root *root = BTRFS_I(inode)->root;
4908         struct btrfs_trans_handle *trans;
4909         loff_t oldsize = i_size_read(inode);
4910         loff_t newsize = attr->ia_size;
4911         int mask = attr->ia_valid;
4912         int ret;
4913
4914         /*
4915          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4916          * special case where we need to update the times despite not having
4917          * these flags set.  For all other operations the VFS set these flags
4918          * explicitly if it wants a timestamp update.
4919          */
4920         if (newsize != oldsize) {
4921                 inode_inc_iversion(inode);
4922                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4923                         inode->i_ctime = inode->i_mtime =
4924                                 current_time(inode);
4925         }
4926
4927         if (newsize > oldsize) {
4928                 /*
4929                  * Don't do an expanding truncate while snapshoting is ongoing.
4930                  * This is to ensure the snapshot captures a fully consistent
4931                  * state of this file - if the snapshot captures this expanding
4932                  * truncation, it must capture all writes that happened before
4933                  * this truncation.
4934                  */
4935                 btrfs_wait_for_snapshot_creation(root);
4936                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4937                 if (ret) {
4938                         btrfs_end_write_no_snapshoting(root);
4939                         return ret;
4940                 }
4941
4942                 trans = btrfs_start_transaction(root, 1);
4943                 if (IS_ERR(trans)) {
4944                         btrfs_end_write_no_snapshoting(root);
4945                         return PTR_ERR(trans);
4946                 }
4947
4948                 i_size_write(inode, newsize);
4949                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4950                 pagecache_isize_extended(inode, oldsize, newsize);
4951                 ret = btrfs_update_inode(trans, root, inode);
4952                 btrfs_end_write_no_snapshoting(root);
4953                 btrfs_end_transaction(trans);
4954         } else {
4955
4956                 /*
4957                  * We're truncating a file that used to have good data down to
4958                  * zero. Make sure it gets into the ordered flush list so that
4959                  * any new writes get down to disk quickly.
4960                  */
4961                 if (newsize == 0)
4962                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4963                                 &BTRFS_I(inode)->runtime_flags);
4964
4965                 /*
4966                  * 1 for the orphan item we're going to add
4967                  * 1 for the orphan item deletion.
4968                  */
4969                 trans = btrfs_start_transaction(root, 2);
4970                 if (IS_ERR(trans))
4971                         return PTR_ERR(trans);
4972
4973                 /*
4974                  * We need to do this in case we fail at _any_ point during the
4975                  * actual truncate.  Once we do the truncate_setsize we could
4976                  * invalidate pages which forces any outstanding ordered io to
4977                  * be instantly completed which will give us extents that need
4978                  * to be truncated.  If we fail to get an orphan inode down we
4979                  * could have left over extents that were never meant to live,
4980                  * so we need to guarantee from this point on that everything
4981                  * will be consistent.
4982                  */
4983                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4984                 btrfs_end_transaction(trans);
4985                 if (ret)
4986                         return ret;
4987
4988                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4989                 truncate_setsize(inode, newsize);
4990
4991                 /* Disable nonlocked read DIO to avoid the end less truncate */
4992                 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
4993                 inode_dio_wait(inode);
4994                 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
4995
4996                 ret = btrfs_truncate(inode);
4997                 if (ret && inode->i_nlink) {
4998                         int err;
4999
5000                         /* To get a stable disk_i_size */
5001                         err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5002                         if (err) {
5003                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5004                                 return err;
5005                         }
5006
5007                         /*
5008                          * failed to truncate, disk_i_size is only adjusted down
5009                          * as we remove extents, so it should represent the true
5010                          * size of the inode, so reset the in memory size and
5011                          * delete our orphan entry.
5012                          */
5013                         trans = btrfs_join_transaction(root);
5014                         if (IS_ERR(trans)) {
5015                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5016                                 return ret;
5017                         }
5018                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5019                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
5020                         if (err)
5021                                 btrfs_abort_transaction(trans, err);
5022                         btrfs_end_transaction(trans);
5023                 }
5024         }
5025
5026         return ret;
5027 }
5028
5029 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5030 {
5031         struct inode *inode = d_inode(dentry);
5032         struct btrfs_root *root = BTRFS_I(inode)->root;
5033         int err;
5034
5035         if (btrfs_root_readonly(root))
5036                 return -EROFS;
5037
5038         err = setattr_prepare(dentry, attr);
5039         if (err)
5040                 return err;
5041
5042         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5043                 err = btrfs_setsize(inode, attr);
5044                 if (err)
5045                         return err;
5046         }
5047
5048         if (attr->ia_valid) {
5049                 setattr_copy(inode, attr);
5050                 inode_inc_iversion(inode);
5051                 err = btrfs_dirty_inode(inode);
5052
5053                 if (!err && attr->ia_valid & ATTR_MODE)
5054                         err = posix_acl_chmod(inode, inode->i_mode);
5055         }
5056
5057         return err;
5058 }
5059
5060 /*
5061  * While truncating the inode pages during eviction, we get the VFS calling
5062  * btrfs_invalidatepage() against each page of the inode. This is slow because
5063  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5064  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5065  * extent_state structures over and over, wasting lots of time.
5066  *
5067  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5068  * those expensive operations on a per page basis and do only the ordered io
5069  * finishing, while we release here the extent_map and extent_state structures,
5070  * without the excessive merging and splitting.
5071  */
5072 static void evict_inode_truncate_pages(struct inode *inode)
5073 {
5074         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5075         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5076         struct rb_node *node;
5077
5078         ASSERT(inode->i_state & I_FREEING);
5079         truncate_inode_pages_final(&inode->i_data);
5080
5081         write_lock(&map_tree->lock);
5082         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5083                 struct extent_map *em;
5084
5085                 node = rb_first(&map_tree->map);
5086                 em = rb_entry(node, struct extent_map, rb_node);
5087                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5088                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5089                 remove_extent_mapping(map_tree, em);
5090                 free_extent_map(em);
5091                 if (need_resched()) {
5092                         write_unlock(&map_tree->lock);
5093                         cond_resched();
5094                         write_lock(&map_tree->lock);
5095                 }
5096         }
5097         write_unlock(&map_tree->lock);
5098
5099         /*
5100          * Keep looping until we have no more ranges in the io tree.
5101          * We can have ongoing bios started by readpages (called from readahead)
5102          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5103          * still in progress (unlocked the pages in the bio but did not yet
5104          * unlocked the ranges in the io tree). Therefore this means some
5105          * ranges can still be locked and eviction started because before
5106          * submitting those bios, which are executed by a separate task (work
5107          * queue kthread), inode references (inode->i_count) were not taken
5108          * (which would be dropped in the end io callback of each bio).
5109          * Therefore here we effectively end up waiting for those bios and
5110          * anyone else holding locked ranges without having bumped the inode's
5111          * reference count - if we don't do it, when they access the inode's
5112          * io_tree to unlock a range it may be too late, leading to an
5113          * use-after-free issue.
5114          */
5115         spin_lock(&io_tree->lock);
5116         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5117                 struct extent_state *state;
5118                 struct extent_state *cached_state = NULL;
5119                 u64 start;
5120                 u64 end;
5121
5122                 node = rb_first(&io_tree->state);
5123                 state = rb_entry(node, struct extent_state, rb_node);
5124                 start = state->start;
5125                 end = state->end;
5126                 spin_unlock(&io_tree->lock);
5127
5128                 lock_extent_bits(io_tree, start, end, &cached_state);
5129
5130                 /*
5131                  * If still has DELALLOC flag, the extent didn't reach disk,
5132                  * and its reserved space won't be freed by delayed_ref.
5133                  * So we need to free its reserved space here.
5134                  * (Refer to comment in btrfs_invalidatepage, case 2)
5135                  *
5136                  * Note, end is the bytenr of last byte, so we need + 1 here.
5137                  */
5138                 if (state->state & EXTENT_DELALLOC)
5139                         btrfs_qgroup_free_data(inode, start, end - start + 1);
5140
5141                 clear_extent_bit(io_tree, start, end,
5142                                  EXTENT_LOCKED | EXTENT_DIRTY |
5143                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5144                                  EXTENT_DEFRAG, 1, 1,
5145                                  &cached_state, GFP_NOFS);
5146
5147                 cond_resched();
5148                 spin_lock(&io_tree->lock);
5149         }
5150         spin_unlock(&io_tree->lock);
5151 }
5152
5153 void btrfs_evict_inode(struct inode *inode)
5154 {
5155         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5156         struct btrfs_trans_handle *trans;
5157         struct btrfs_root *root = BTRFS_I(inode)->root;
5158         struct btrfs_block_rsv *rsv, *global_rsv;
5159         int steal_from_global = 0;
5160         u64 min_size;
5161         int ret;
5162
5163         trace_btrfs_inode_evict(inode);
5164
5165         if (!root) {
5166                 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5167                 return;
5168         }
5169
5170         min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
5171
5172         evict_inode_truncate_pages(inode);
5173
5174         if (inode->i_nlink &&
5175             ((btrfs_root_refs(&root->root_item) != 0 &&
5176               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5177              btrfs_is_free_space_inode(BTRFS_I(inode))))
5178                 goto no_delete;
5179
5180         if (is_bad_inode(inode)) {
5181                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5182                 goto no_delete;
5183         }
5184         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5185         if (!special_file(inode->i_mode))
5186                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5187
5188         btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5189
5190         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
5191                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5192                                  &BTRFS_I(inode)->runtime_flags));
5193                 goto no_delete;
5194         }
5195
5196         if (inode->i_nlink > 0) {
5197                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5198                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5199                 goto no_delete;
5200         }
5201
5202         ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5203         if (ret) {
5204                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5205                 goto no_delete;
5206         }
5207
5208         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5209         if (!rsv) {
5210                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5211                 goto no_delete;
5212         }
5213         rsv->size = min_size;
5214         rsv->failfast = 1;
5215         global_rsv = &fs_info->global_block_rsv;
5216
5217         btrfs_i_size_write(BTRFS_I(inode), 0);
5218
5219         /*
5220          * This is a bit simpler than btrfs_truncate since we've already
5221          * reserved our space for our orphan item in the unlink, so we just
5222          * need to reserve some slack space in case we add bytes and update
5223          * inode item when doing the truncate.
5224          */
5225         while (1) {
5226                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5227                                              BTRFS_RESERVE_FLUSH_LIMIT);
5228
5229                 /*
5230                  * Try and steal from the global reserve since we will
5231                  * likely not use this space anyway, we want to try as
5232                  * hard as possible to get this to work.
5233                  */
5234                 if (ret)
5235                         steal_from_global++;
5236                 else
5237                         steal_from_global = 0;
5238                 ret = 0;
5239
5240                 /*
5241                  * steal_from_global == 0: we reserved stuff, hooray!
5242                  * steal_from_global == 1: we didn't reserve stuff, boo!
5243                  * steal_from_global == 2: we've committed, still not a lot of
5244                  * room but maybe we'll have room in the global reserve this
5245                  * time.
5246                  * steal_from_global == 3: abandon all hope!
5247                  */
5248                 if (steal_from_global > 2) {
5249                         btrfs_warn(fs_info,
5250                                    "Could not get space for a delete, will truncate on mount %d",
5251                                    ret);
5252                         btrfs_orphan_del(NULL, BTRFS_I(inode));
5253                         btrfs_free_block_rsv(fs_info, rsv);
5254                         goto no_delete;
5255                 }
5256
5257                 trans = btrfs_join_transaction(root);
5258                 if (IS_ERR(trans)) {
5259                         btrfs_orphan_del(NULL, BTRFS_I(inode));
5260                         btrfs_free_block_rsv(fs_info, rsv);
5261                         goto no_delete;
5262                 }
5263
5264                 /*
5265                  * We can't just steal from the global reserve, we need to make
5266                  * sure there is room to do it, if not we need to commit and try
5267                  * again.
5268                  */
5269                 if (steal_from_global) {
5270                         if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
5271                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5272                                                               min_size, 0);
5273                         else
5274                                 ret = -ENOSPC;
5275                 }
5276
5277                 /*
5278                  * Couldn't steal from the global reserve, we have too much
5279                  * pending stuff built up, commit the transaction and try it
5280                  * again.
5281                  */
5282                 if (ret) {
5283                         ret = btrfs_commit_transaction(trans);
5284                         if (ret) {
5285                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5286                                 btrfs_free_block_rsv(fs_info, rsv);
5287                                 goto no_delete;
5288                         }
5289                         continue;
5290                 } else {
5291                         steal_from_global = 0;
5292                 }
5293
5294                 trans->block_rsv = rsv;
5295
5296                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5297                 if (ret != -ENOSPC && ret != -EAGAIN)
5298                         break;
5299
5300                 trans->block_rsv = &fs_info->trans_block_rsv;
5301                 btrfs_end_transaction(trans);
5302                 trans = NULL;
5303                 btrfs_btree_balance_dirty(fs_info);
5304         }
5305
5306         btrfs_free_block_rsv(fs_info, rsv);
5307
5308         /*
5309          * Errors here aren't a big deal, it just means we leave orphan items
5310          * in the tree.  They will be cleaned up on the next mount.
5311          */
5312         if (ret == 0) {
5313                 trans->block_rsv = root->orphan_block_rsv;
5314                 btrfs_orphan_del(trans, BTRFS_I(inode));
5315         } else {
5316                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5317         }
5318
5319         trans->block_rsv = &fs_info->trans_block_rsv;
5320         if (!(root == fs_info->tree_root ||
5321               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5322                 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5323
5324         btrfs_end_transaction(trans);
5325         btrfs_btree_balance_dirty(fs_info);
5326 no_delete:
5327         btrfs_remove_delayed_node(BTRFS_I(inode));
5328         clear_inode(inode);
5329 }
5330
5331 /*
5332  * this returns the key found in the dir entry in the location pointer.
5333  * If no dir entries were found, location->objectid is 0.
5334  */
5335 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5336                                struct btrfs_key *location)
5337 {
5338         const char *name = dentry->d_name.name;
5339         int namelen = dentry->d_name.len;
5340         struct btrfs_dir_item *di;
5341         struct btrfs_path *path;
5342         struct btrfs_root *root = BTRFS_I(dir)->root;
5343         int ret = 0;
5344
5345         path = btrfs_alloc_path();
5346         if (!path)
5347                 return -ENOMEM;
5348
5349         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5350                         name, namelen, 0);
5351         if (IS_ERR(di))
5352                 ret = PTR_ERR(di);
5353
5354         if (IS_ERR_OR_NULL(di))
5355                 goto out_err;
5356
5357         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5358 out:
5359         btrfs_free_path(path);
5360         return ret;
5361 out_err:
5362         location->objectid = 0;
5363         goto out;
5364 }
5365
5366 /*
5367  * when we hit a tree root in a directory, the btrfs part of the inode
5368  * needs to be changed to reflect the root directory of the tree root.  This
5369  * is kind of like crossing a mount point.
5370  */
5371 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5372                                     struct inode *dir,
5373                                     struct dentry *dentry,
5374                                     struct btrfs_key *location,
5375                                     struct btrfs_root **sub_root)
5376 {
5377         struct btrfs_path *path;
5378         struct btrfs_root *new_root;
5379         struct btrfs_root_ref *ref;
5380         struct extent_buffer *leaf;
5381         struct btrfs_key key;
5382         int ret;
5383         int err = 0;
5384
5385         path = btrfs_alloc_path();
5386         if (!path) {
5387                 err = -ENOMEM;
5388                 goto out;
5389         }
5390
5391         err = -ENOENT;
5392         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5393         key.type = BTRFS_ROOT_REF_KEY;
5394         key.offset = location->objectid;
5395
5396         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5397         if (ret) {
5398                 if (ret < 0)
5399                         err = ret;
5400                 goto out;
5401         }
5402
5403         leaf = path->nodes[0];
5404         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5405         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5406             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5407                 goto out;
5408
5409         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5410                                    (unsigned long)(ref + 1),
5411                                    dentry->d_name.len);
5412         if (ret)
5413                 goto out;
5414
5415         btrfs_release_path(path);
5416
5417         new_root = btrfs_read_fs_root_no_name(fs_info, location);
5418         if (IS_ERR(new_root)) {
5419                 err = PTR_ERR(new_root);
5420                 goto out;
5421         }
5422
5423         *sub_root = new_root;
5424         location->objectid = btrfs_root_dirid(&new_root->root_item);
5425         location->type = BTRFS_INODE_ITEM_KEY;
5426         location->offset = 0;
5427         err = 0;
5428 out:
5429         btrfs_free_path(path);
5430         return err;
5431 }
5432
5433 static void inode_tree_add(struct inode *inode)
5434 {
5435         struct btrfs_root *root = BTRFS_I(inode)->root;
5436         struct btrfs_inode *entry;
5437         struct rb_node **p;
5438         struct rb_node *parent;
5439         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5440         u64 ino = btrfs_ino(BTRFS_I(inode));
5441
5442         if (inode_unhashed(inode))
5443                 return;
5444         parent = NULL;
5445         spin_lock(&root->inode_lock);
5446         p = &root->inode_tree.rb_node;
5447         while (*p) {
5448                 parent = *p;
5449                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5450
5451                 if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5452                         p = &parent->rb_left;
5453                 else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5454                         p = &parent->rb_right;
5455                 else {
5456                         WARN_ON(!(entry->vfs_inode.i_state &
5457                                   (I_WILL_FREE | I_FREEING)));
5458                         rb_replace_node(parent, new, &root->inode_tree);
5459                         RB_CLEAR_NODE(parent);
5460                         spin_unlock(&root->inode_lock);
5461                         return;
5462                 }
5463         }
5464         rb_link_node(new, parent, p);
5465         rb_insert_color(new, &root->inode_tree);
5466         spin_unlock(&root->inode_lock);
5467 }
5468
5469 static void inode_tree_del(struct inode *inode)
5470 {
5471         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5472         struct btrfs_root *root = BTRFS_I(inode)->root;
5473         int empty = 0;
5474
5475         spin_lock(&root->inode_lock);
5476         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5477                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5478                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5479                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5480         }
5481         spin_unlock(&root->inode_lock);
5482
5483         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5484                 synchronize_srcu(&fs_info->subvol_srcu);
5485                 spin_lock(&root->inode_lock);
5486                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5487                 spin_unlock(&root->inode_lock);
5488                 if (empty)
5489                         btrfs_add_dead_root(root);
5490         }
5491 }
5492
5493 void btrfs_invalidate_inodes(struct btrfs_root *root)
5494 {
5495         struct btrfs_fs_info *fs_info = root->fs_info;
5496         struct rb_node *node;
5497         struct rb_node *prev;
5498         struct btrfs_inode *entry;
5499         struct inode *inode;
5500         u64 objectid = 0;
5501
5502         if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
5503                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5504
5505         spin_lock(&root->inode_lock);
5506 again:
5507         node = root->inode_tree.rb_node;
5508         prev = NULL;
5509         while (node) {
5510                 prev = node;
5511                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5512
5513                 if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5514                         node = node->rb_left;
5515                 else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5516                         node = node->rb_right;
5517                 else
5518                         break;
5519         }
5520         if (!node) {
5521                 while (prev) {
5522                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5523                         if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) {
5524                                 node = prev;
5525                                 break;
5526                         }
5527                         prev = rb_next(prev);
5528                 }
5529         }
5530         while (node) {
5531                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5532                 objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1;
5533                 inode = igrab(&entry->vfs_inode);
5534                 if (inode) {
5535                         spin_unlock(&root->inode_lock);
5536                         if (atomic_read(&inode->i_count) > 1)
5537                                 d_prune_aliases(inode);
5538                         /*
5539                          * btrfs_drop_inode will have it removed from
5540                          * the inode cache when its usage count
5541                          * hits zero.
5542                          */
5543                         iput(inode);
5544                         cond_resched();
5545                         spin_lock(&root->inode_lock);
5546                         goto again;
5547                 }
5548
5549                 if (cond_resched_lock(&root->inode_lock))
5550                         goto again;
5551
5552                 node = rb_next(node);
5553         }
5554         spin_unlock(&root->inode_lock);
5555 }
5556
5557 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5558 {
5559         struct btrfs_iget_args *args = p;
5560         inode->i_ino = args->location->objectid;
5561         memcpy(&BTRFS_I(inode)->location, args->location,
5562                sizeof(*args->location));
5563         BTRFS_I(inode)->root = args->root;
5564         return 0;
5565 }
5566
5567 static int btrfs_find_actor(struct inode *inode, void *opaque)
5568 {
5569         struct btrfs_iget_args *args = opaque;
5570         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5571                 args->root == BTRFS_I(inode)->root;
5572 }
5573
5574 static struct inode *btrfs_iget_locked(struct super_block *s,
5575                                        struct btrfs_key *location,
5576                                        struct btrfs_root *root)
5577 {
5578         struct inode *inode;
5579         struct btrfs_iget_args args;
5580         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5581
5582         args.location = location;
5583         args.root = root;
5584
5585         inode = iget5_locked(s, hashval, btrfs_find_actor,
5586                              btrfs_init_locked_inode,
5587                              (void *)&args);
5588         return inode;
5589 }
5590
5591 /* Get an inode object given its location and corresponding root.
5592  * Returns in *is_new if the inode was read from disk
5593  */
5594 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5595                          struct btrfs_root *root, int *new)
5596 {
5597         struct inode *inode;
5598
5599         inode = btrfs_iget_locked(s, location, root);
5600         if (!inode)
5601                 return ERR_PTR(-ENOMEM);
5602
5603         if (inode->i_state & I_NEW) {
5604                 int ret;
5605
5606                 ret = btrfs_read_locked_inode(inode);
5607                 if (!is_bad_inode(inode)) {
5608                         inode_tree_add(inode);
5609                         unlock_new_inode(inode);
5610                         if (new)
5611                                 *new = 1;
5612                 } else {
5613                         unlock_new_inode(inode);
5614                         iput(inode);
5615                         ASSERT(ret < 0);
5616                         inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
5617                 }
5618         }
5619
5620         return inode;
5621 }
5622
5623 static struct inode *new_simple_dir(struct super_block *s,
5624                                     struct btrfs_key *key,
5625                                     struct btrfs_root *root)
5626 {
5627         struct inode *inode = new_inode(s);
5628
5629         if (!inode)
5630                 return ERR_PTR(-ENOMEM);
5631
5632         BTRFS_I(inode)->root = root;
5633         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5634         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5635
5636         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5637         inode->i_op = &btrfs_dir_ro_inode_operations;
5638         inode->i_opflags &= ~IOP_XATTR;
5639         inode->i_fop = &simple_dir_operations;
5640         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5641         inode->i_mtime = current_time(inode);
5642         inode->i_atime = inode->i_mtime;
5643         inode->i_ctime = inode->i_mtime;
5644         BTRFS_I(inode)->i_otime = inode->i_mtime;
5645
5646         return inode;
5647 }
5648
5649 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5650 {
5651         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5652         struct inode *inode;
5653         struct btrfs_root *root = BTRFS_I(dir)->root;
5654         struct btrfs_root *sub_root = root;
5655         struct btrfs_key location;
5656         int index;
5657         int ret = 0;
5658
5659         if (dentry->d_name.len > BTRFS_NAME_LEN)
5660                 return ERR_PTR(-ENAMETOOLONG);
5661
5662         ret = btrfs_inode_by_name(dir, dentry, &location);
5663         if (ret < 0)
5664                 return ERR_PTR(ret);
5665
5666         if (location.objectid == 0)
5667                 return ERR_PTR(-ENOENT);
5668
5669         if (location.type == BTRFS_INODE_ITEM_KEY) {
5670                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5671                 return inode;
5672         }
5673
5674         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5675
5676         index = srcu_read_lock(&fs_info->subvol_srcu);
5677         ret = fixup_tree_root_location(fs_info, dir, dentry,
5678                                        &location, &sub_root);
5679         if (ret < 0) {
5680                 if (ret != -ENOENT)
5681                         inode = ERR_PTR(ret);
5682                 else
5683                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5684         } else {
5685                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5686         }
5687         srcu_read_unlock(&fs_info->subvol_srcu, index);
5688
5689         if (!IS_ERR(inode) && root != sub_root) {
5690                 down_read(&fs_info->cleanup_work_sem);
5691                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5692                         ret = btrfs_orphan_cleanup(sub_root);
5693                 up_read(&fs_info->cleanup_work_sem);
5694                 if (ret) {
5695                         iput(inode);
5696                         inode = ERR_PTR(ret);
5697                 }
5698         }
5699
5700         return inode;
5701 }
5702
5703 static int btrfs_dentry_delete(const struct dentry *dentry)
5704 {
5705         struct btrfs_root *root;
5706         struct inode *inode = d_inode(dentry);
5707
5708         if (!inode && !IS_ROOT(dentry))
5709                 inode = d_inode(dentry->d_parent);
5710
5711         if (inode) {
5712                 root = BTRFS_I(inode)->root;
5713                 if (btrfs_root_refs(&root->root_item) == 0)
5714                         return 1;
5715
5716                 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5717                         return 1;
5718         }
5719         return 0;
5720 }
5721
5722 static void btrfs_dentry_release(struct dentry *dentry)
5723 {
5724         kfree(dentry->d_fsdata);
5725 }
5726
5727 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5728                                    unsigned int flags)
5729 {
5730         struct inode *inode;
5731
5732         inode = btrfs_lookup_dentry(dir, dentry);
5733         if (IS_ERR(inode)) {
5734                 if (PTR_ERR(inode) == -ENOENT)
5735                         inode = NULL;
5736                 else
5737                         return ERR_CAST(inode);
5738         }
5739
5740         return d_splice_alias(inode, dentry);
5741 }
5742
5743 unsigned char btrfs_filetype_table[] = {
5744         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5745 };
5746
5747 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5748 {
5749         struct inode *inode = file_inode(file);
5750         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5751         struct btrfs_root *root = BTRFS_I(inode)->root;
5752         struct btrfs_item *item;
5753         struct btrfs_dir_item *di;
5754         struct btrfs_key key;
5755         struct btrfs_key found_key;
5756         struct btrfs_path *path;
5757         struct list_head ins_list;
5758         struct list_head del_list;
5759         int ret;
5760         struct extent_buffer *leaf;
5761         int slot;
5762         unsigned char d_type;
5763         int over = 0;
5764         char tmp_name[32];
5765         char *name_ptr;
5766         int name_len;
5767         bool put = false;
5768         struct btrfs_key location;
5769
5770         if (!dir_emit_dots(file, ctx))
5771                 return 0;
5772
5773         path = btrfs_alloc_path();
5774         if (!path)
5775                 return -ENOMEM;
5776
5777         path->reada = READA_FORWARD;
5778
5779         INIT_LIST_HEAD(&ins_list);
5780         INIT_LIST_HEAD(&del_list);
5781         put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5782
5783         key.type = BTRFS_DIR_INDEX_KEY;
5784         key.offset = ctx->pos;
5785         key.objectid = btrfs_ino(BTRFS_I(inode));
5786
5787         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5788         if (ret < 0)
5789                 goto err;
5790
5791         while (1) {
5792                 leaf = path->nodes[0];
5793                 slot = path->slots[0];
5794                 if (slot >= btrfs_header_nritems(leaf)) {
5795                         ret = btrfs_next_leaf(root, path);
5796                         if (ret < 0)
5797                                 goto err;
5798                         else if (ret > 0)
5799                                 break;
5800                         continue;
5801                 }
5802
5803                 item = btrfs_item_nr(slot);
5804                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5805
5806                 if (found_key.objectid != key.objectid)
5807                         break;
5808                 if (found_key.type != BTRFS_DIR_INDEX_KEY)
5809                         break;
5810                 if (found_key.offset < ctx->pos)
5811                         goto next;
5812                 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5813                         goto next;
5814
5815                 ctx->pos = found_key.offset;
5816
5817                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5818                 if (verify_dir_item(fs_info, leaf, di))
5819                         goto next;
5820
5821                 name_len = btrfs_dir_name_len(leaf, di);
5822                 if (name_len <= sizeof(tmp_name)) {
5823                         name_ptr = tmp_name;
5824                 } else {
5825                         name_ptr = kmalloc(name_len, GFP_KERNEL);
5826                         if (!name_ptr) {
5827                                 ret = -ENOMEM;
5828                                 goto err;
5829                         }
5830                 }
5831                 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
5832                                    name_len);
5833
5834                 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5835                 btrfs_dir_item_key_to_cpu(leaf, di, &location);
5836
5837                 over = !dir_emit(ctx, name_ptr, name_len, location.objectid,
5838                                  d_type);
5839
5840                 if (name_ptr != tmp_name)
5841                         kfree(name_ptr);
5842
5843                 if (over)
5844                         goto nopos;
5845                 ctx->pos++;
5846 next:
5847                 path->slots[0]++;
5848         }
5849
5850         ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5851         if (ret)
5852                 goto nopos;
5853
5854         /*
5855          * Stop new entries from being returned after we return the last
5856          * entry.
5857          *
5858          * New directory entries are assigned a strictly increasing
5859          * offset.  This means that new entries created during readdir
5860          * are *guaranteed* to be seen in the future by that readdir.
5861          * This has broken buggy programs which operate on names as
5862          * they're returned by readdir.  Until we re-use freed offsets
5863          * we have this hack to stop new entries from being returned
5864          * under the assumption that they'll never reach this huge
5865          * offset.
5866          *
5867          * This is being careful not to overflow 32bit loff_t unless the
5868          * last entry requires it because doing so has broken 32bit apps
5869          * in the past.
5870          */
5871         if (ctx->pos >= INT_MAX)
5872                 ctx->pos = LLONG_MAX;
5873         else
5874                 ctx->pos = INT_MAX;
5875 nopos:
5876         ret = 0;
5877 err:
5878         if (put)
5879                 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5880         btrfs_free_path(path);
5881         return ret;
5882 }
5883
5884 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5885 {
5886         struct btrfs_root *root = BTRFS_I(inode)->root;
5887         struct btrfs_trans_handle *trans;
5888         int ret = 0;
5889         bool nolock = false;
5890
5891         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5892                 return 0;
5893
5894         if (btrfs_fs_closing(root->fs_info) &&
5895                         btrfs_is_free_space_inode(BTRFS_I(inode)))
5896                 nolock = true;
5897
5898         if (wbc->sync_mode == WB_SYNC_ALL) {
5899                 if (nolock)
5900                         trans = btrfs_join_transaction_nolock(root);
5901                 else
5902                         trans = btrfs_join_transaction(root);
5903                 if (IS_ERR(trans))
5904                         return PTR_ERR(trans);
5905                 ret = btrfs_commit_transaction(trans);
5906         }
5907         return ret;
5908 }
5909
5910 /*
5911  * This is somewhat expensive, updating the tree every time the
5912  * inode changes.  But, it is most likely to find the inode in cache.
5913  * FIXME, needs more benchmarking...there are no reasons other than performance
5914  * to keep or drop this code.
5915  */
5916 static int btrfs_dirty_inode(struct inode *inode)
5917 {
5918         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5919         struct btrfs_root *root = BTRFS_I(inode)->root;
5920         struct btrfs_trans_handle *trans;
5921         int ret;
5922
5923         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5924                 return 0;
5925
5926         trans = btrfs_join_transaction(root);
5927         if (IS_ERR(trans))
5928                 return PTR_ERR(trans);
5929
5930         ret = btrfs_update_inode(trans, root, inode);
5931         if (ret && ret == -ENOSPC) {
5932                 /* whoops, lets try again with the full transaction */
5933                 btrfs_end_transaction(trans);
5934                 trans = btrfs_start_transaction(root, 1);
5935                 if (IS_ERR(trans))
5936                         return PTR_ERR(trans);
5937
5938                 ret = btrfs_update_inode(trans, root, inode);
5939         }
5940         btrfs_end_transaction(trans);
5941         if (BTRFS_I(inode)->delayed_node)
5942                 btrfs_balance_delayed_items(fs_info);
5943
5944         return ret;
5945 }
5946
5947 /*
5948  * This is a copy of file_update_time.  We need this so we can return error on
5949  * ENOSPC for updating the inode in the case of file write and mmap writes.
5950  */
5951 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5952                              int flags)
5953 {
5954         struct btrfs_root *root = BTRFS_I(inode)->root;
5955
5956         if (btrfs_root_readonly(root))
5957                 return -EROFS;
5958
5959         if (flags & S_VERSION)
5960                 inode_inc_iversion(inode);
5961         if (flags & S_CTIME)
5962                 inode->i_ctime = *now;
5963         if (flags & S_MTIME)
5964                 inode->i_mtime = *now;
5965         if (flags & S_ATIME)
5966                 inode->i_atime = *now;
5967         return btrfs_dirty_inode(inode);
5968 }
5969
5970 /*
5971  * find the highest existing sequence number in a directory
5972  * and then set the in-memory index_cnt variable to reflect
5973  * free sequence numbers
5974  */
5975 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5976 {
5977         struct btrfs_root *root = inode->root;
5978         struct btrfs_key key, found_key;
5979         struct btrfs_path *path;
5980         struct extent_buffer *leaf;
5981         int ret;
5982
5983         key.objectid = btrfs_ino(inode);
5984         key.type = BTRFS_DIR_INDEX_KEY;
5985         key.offset = (u64)-1;
5986
5987         path = btrfs_alloc_path();
5988         if (!path)
5989                 return -ENOMEM;
5990
5991         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5992         if (ret < 0)
5993                 goto out;
5994         /* FIXME: we should be able to handle this */
5995         if (ret == 0)
5996                 goto out;
5997         ret = 0;
5998
5999         /*
6000          * MAGIC NUMBER EXPLANATION:
6001          * since we search a directory based on f_pos we have to start at 2
6002          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6003          * else has to start at 2
6004          */
6005         if (path->slots[0] == 0) {
6006                 inode->index_cnt = 2;
6007                 goto out;
6008         }
6009
6010         path->slots[0]--;
6011
6012         leaf = path->nodes[0];
6013         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6014
6015         if (found_key.objectid != btrfs_ino(inode) ||
6016             found_key.type != BTRFS_DIR_INDEX_KEY) {
6017                 inode->index_cnt = 2;
6018                 goto out;
6019         }
6020
6021         inode->index_cnt = found_key.offset + 1;
6022 out:
6023         btrfs_free_path(path);
6024         return ret;
6025 }
6026
6027 /*
6028  * helper to find a free sequence number in a given directory.  This current
6029  * code is very simple, later versions will do smarter things in the btree
6030  */
6031 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6032 {
6033         int ret = 0;
6034
6035         if (dir->index_cnt == (u64)-1) {
6036                 ret = btrfs_inode_delayed_dir_index_count(dir);
6037                 if (ret) {
6038                         ret = btrfs_set_inode_index_count(dir);
6039                         if (ret)
6040                                 return ret;
6041                 }
6042         }
6043
6044         *index = dir->index_cnt;
6045         dir->index_cnt++;
6046
6047         return ret;
6048 }
6049
6050 static int btrfs_insert_inode_locked(struct inode *inode)
6051 {
6052         struct btrfs_iget_args args;
6053         args.location = &BTRFS_I(inode)->location;
6054         args.root = BTRFS_I(inode)->root;
6055
6056         return insert_inode_locked4(inode,
6057                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6058                    btrfs_find_actor, &args);
6059 }
6060
6061 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6062                                      struct btrfs_root *root,
6063                                      struct inode *dir,
6064                                      const char *name, int name_len,
6065                                      u64 ref_objectid, u64 objectid,
6066                                      umode_t mode, u64 *index)
6067 {
6068         struct btrfs_fs_info *fs_info = root->fs_info;
6069         struct inode *inode;
6070         struct btrfs_inode_item *inode_item;
6071         struct btrfs_key *location;
6072         struct btrfs_path *path;
6073         struct btrfs_inode_ref *ref;
6074         struct btrfs_key key[2];
6075         u32 sizes[2];
6076         int nitems = name ? 2 : 1;
6077         unsigned long ptr;
6078         int ret;
6079
6080         path = btrfs_alloc_path();
6081         if (!path)
6082                 return ERR_PTR(-ENOMEM);
6083
6084         inode = new_inode(fs_info->sb);
6085         if (!inode) {
6086                 btrfs_free_path(path);
6087                 return ERR_PTR(-ENOMEM);
6088         }
6089
6090         /*
6091          * O_TMPFILE, set link count to 0, so that after this point,
6092          * we fill in an inode item with the correct link count.
6093          */
6094         if (!name)
6095                 set_nlink(inode, 0);
6096
6097         /*
6098          * we have to initialize this early, so we can reclaim the inode
6099          * number if we fail afterwards in this function.
6100          */
6101         inode->i_ino = objectid;
6102
6103         if (dir && name) {
6104                 trace_btrfs_inode_request(dir);
6105
6106                 ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6107                 if (ret) {
6108                         btrfs_free_path(path);
6109                         iput(inode);
6110                         return ERR_PTR(ret);
6111                 }
6112         } else if (dir) {
6113                 *index = 0;
6114         }
6115         /*
6116          * index_cnt is ignored for everything but a dir,
6117          * btrfs_get_inode_index_count has an explanation for the magic
6118          * number
6119          */
6120         BTRFS_I(inode)->index_cnt = 2;
6121         BTRFS_I(inode)->dir_index = *index;
6122         BTRFS_I(inode)->root = root;
6123         BTRFS_I(inode)->generation = trans->transid;
6124         inode->i_generation = BTRFS_I(inode)->generation;
6125
6126         /*
6127          * We could have gotten an inode number from somebody who was fsynced
6128          * and then removed in this same transaction, so let's just set full
6129          * sync since it will be a full sync anyway and this will blow away the
6130          * old info in the log.
6131          */
6132         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6133
6134         key[0].objectid = objectid;
6135         key[0].type = BTRFS_INODE_ITEM_KEY;
6136         key[0].offset = 0;
6137
6138         sizes[0] = sizeof(struct btrfs_inode_item);
6139
6140         if (name) {
6141                 /*
6142                  * Start new inodes with an inode_ref. This is slightly more
6143                  * efficient for small numbers of hard links since they will
6144                  * be packed into one item. Extended refs will kick in if we
6145                  * add more hard links than can fit in the ref item.
6146                  */
6147                 key[1].objectid = objectid;
6148                 key[1].type = BTRFS_INODE_REF_KEY;
6149                 key[1].offset = ref_objectid;
6150
6151                 sizes[1] = name_len + sizeof(*ref);
6152         }
6153
6154         location = &BTRFS_I(inode)->location;
6155         location->objectid = objectid;
6156         location->offset = 0;
6157         location->type = BTRFS_INODE_ITEM_KEY;
6158
6159         ret = btrfs_insert_inode_locked(inode);
6160         if (ret < 0)
6161                 goto fail;
6162
6163         path->leave_spinning = 1;
6164         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6165         if (ret != 0)
6166                 goto fail_unlock;
6167
6168         inode_init_owner(inode, dir, mode);
6169         inode_set_bytes(inode, 0);
6170
6171         inode->i_mtime = current_time(inode);
6172         inode->i_atime = inode->i_mtime;
6173         inode->i_ctime = inode->i_mtime;
6174         BTRFS_I(inode)->i_otime = inode->i_mtime;
6175
6176         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6177                                   struct btrfs_inode_item);
6178         memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6179                              sizeof(*inode_item));
6180         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6181
6182         if (name) {
6183                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6184                                      struct btrfs_inode_ref);
6185                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6186                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6187                 ptr = (unsigned long)(ref + 1);
6188                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6189         }
6190
6191         btrfs_mark_buffer_dirty(path->nodes[0]);
6192         btrfs_free_path(path);
6193
6194         btrfs_inherit_iflags(inode, dir);
6195
6196         if (S_ISREG(mode)) {
6197                 if (btrfs_test_opt(fs_info, NODATASUM))
6198                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6199                 if (btrfs_test_opt(fs_info, NODATACOW))
6200                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6201                                 BTRFS_INODE_NODATASUM;
6202         }
6203
6204         inode_tree_add(inode);
6205
6206         trace_btrfs_inode_new(inode);
6207         btrfs_set_inode_last_trans(trans, inode);
6208
6209         btrfs_update_root_times(trans, root);
6210
6211         ret = btrfs_inode_inherit_props(trans, inode, dir);
6212         if (ret)
6213                 btrfs_err(fs_info,
6214                           "error inheriting props for ino %llu (root %llu): %d",
6215                         btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6216
6217         return inode;
6218
6219 fail_unlock:
6220         unlock_new_inode(inode);
6221 fail:
6222         if (dir && name)
6223                 BTRFS_I(dir)->index_cnt--;
6224         btrfs_free_path(path);
6225         iput(inode);
6226         return ERR_PTR(ret);
6227 }
6228
6229 static inline u8 btrfs_inode_type(struct inode *inode)
6230 {
6231         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6232 }
6233
6234 /*
6235  * utility function to add 'inode' into 'parent_inode' with
6236  * a give name and a given sequence number.
6237  * if 'add_backref' is true, also insert a backref from the
6238  * inode to the parent directory.
6239  */
6240 int btrfs_add_link(struct btrfs_trans_handle *trans,
6241                    struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6242                    const char *name, int name_len, int add_backref, u64 index)
6243 {
6244         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6245         int ret = 0;
6246         struct btrfs_key key;
6247         struct btrfs_root *root = parent_inode->root;
6248         u64 ino = btrfs_ino(inode);
6249         u64 parent_ino = btrfs_ino(parent_inode);
6250
6251         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6252                 memcpy(&key, &inode->root->root_key, sizeof(key));
6253         } else {
6254                 key.objectid = ino;
6255                 key.type = BTRFS_INODE_ITEM_KEY;
6256                 key.offset = 0;
6257         }
6258
6259         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6260                 ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
6261                                          root->root_key.objectid, parent_ino,
6262                                          index, name, name_len);
6263         } else if (add_backref) {
6264                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6265                                              parent_ino, index);
6266         }
6267
6268         /* Nothing to clean up yet */
6269         if (ret)
6270                 return ret;
6271
6272         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6273                                     parent_inode, &key,
6274                                     btrfs_inode_type(&inode->vfs_inode), index);
6275         if (ret == -EEXIST || ret == -EOVERFLOW)
6276                 goto fail_dir_item;
6277         else if (ret) {
6278                 btrfs_abort_transaction(trans, ret);
6279                 return ret;
6280         }
6281
6282         btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6283                            name_len * 2);
6284         inode_inc_iversion(&parent_inode->vfs_inode);
6285         parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime =
6286                 current_time(&parent_inode->vfs_inode);
6287         ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
6288         if (ret)
6289                 btrfs_abort_transaction(trans, ret);
6290         return ret;
6291
6292 fail_dir_item:
6293         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6294                 u64 local_index;
6295                 int err;
6296                 err = btrfs_del_root_ref(trans, fs_info, key.objectid,
6297                                          root->root_key.objectid, parent_ino,
6298                                          &local_index, name, name_len);
6299
6300         } else if (add_backref) {
6301                 u64 local_index;
6302                 int err;
6303
6304                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6305                                           ino, parent_ino, &local_index);
6306         }
6307         return ret;
6308 }
6309
6310 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6311                             struct btrfs_inode *dir, struct dentry *dentry,
6312                             struct btrfs_inode *inode, int backref, u64 index)
6313 {
6314         int err = btrfs_add_link(trans, dir, inode,
6315                                  dentry->d_name.name, dentry->d_name.len,
6316                                  backref, index);
6317         if (err > 0)
6318                 err = -EEXIST;
6319         return err;
6320 }
6321
6322 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6323                         umode_t mode, dev_t rdev)
6324 {
6325         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6326         struct btrfs_trans_handle *trans;
6327         struct btrfs_root *root = BTRFS_I(dir)->root;
6328         struct inode *inode = NULL;
6329         int err;
6330         int drop_inode = 0;
6331         u64 objectid;
6332         u64 index = 0;
6333
6334         /*
6335          * 2 for inode item and ref
6336          * 2 for dir items
6337          * 1 for xattr if selinux is on
6338          */
6339         trans = btrfs_start_transaction(root, 5);
6340         if (IS_ERR(trans))
6341                 return PTR_ERR(trans);
6342
6343         err = btrfs_find_free_ino(root, &objectid);
6344         if (err)
6345                 goto out_unlock;
6346
6347         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6348                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6349                         mode, &index);
6350         if (IS_ERR(inode)) {
6351                 err = PTR_ERR(inode);
6352                 goto out_unlock;
6353         }
6354
6355         /*
6356         * If the active LSM wants to access the inode during
6357         * d_instantiate it needs these. Smack checks to see
6358         * if the filesystem supports xattrs by looking at the
6359         * ops vector.
6360         */
6361         inode->i_op = &btrfs_special_inode_operations;
6362         init_special_inode(inode, inode->i_mode, rdev);
6363
6364         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6365         if (err)
6366                 goto out_unlock_inode;
6367
6368         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6369                         0, index);
6370         if (err) {
6371                 goto out_unlock_inode;
6372         } else {
6373                 btrfs_update_inode(trans, root, inode);
6374                 unlock_new_inode(inode);
6375                 d_instantiate(dentry, inode);
6376         }
6377
6378 out_unlock:
6379         btrfs_end_transaction(trans);
6380         btrfs_balance_delayed_items(fs_info);
6381         btrfs_btree_balance_dirty(fs_info);
6382         if (drop_inode) {
6383                 inode_dec_link_count(inode);
6384                 iput(inode);
6385         }
6386         return err;
6387
6388 out_unlock_inode:
6389         drop_inode = 1;
6390         unlock_new_inode(inode);
6391         goto out_unlock;
6392
6393 }
6394
6395 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6396                         umode_t mode, bool excl)
6397 {
6398         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6399         struct btrfs_trans_handle *trans;
6400         struct btrfs_root *root = BTRFS_I(dir)->root;
6401         struct inode *inode = NULL;
6402         int drop_inode_on_err = 0;
6403         int err;
6404         u64 objectid;
6405         u64 index = 0;
6406
6407         /*
6408          * 2 for inode item and ref
6409          * 2 for dir items
6410          * 1 for xattr if selinux is on
6411          */
6412         trans = btrfs_start_transaction(root, 5);
6413         if (IS_ERR(trans))
6414                 return PTR_ERR(trans);
6415
6416         err = btrfs_find_free_ino(root, &objectid);
6417         if (err)
6418                 goto out_unlock;
6419
6420         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6421                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6422                         mode, &index);
6423         if (IS_ERR(inode)) {
6424                 err = PTR_ERR(inode);
6425                 goto out_unlock;
6426         }
6427         drop_inode_on_err = 1;
6428         /*
6429         * If the active LSM wants to access the inode during
6430         * d_instantiate it needs these. Smack checks to see
6431         * if the filesystem supports xattrs by looking at the
6432         * ops vector.
6433         */
6434         inode->i_fop = &btrfs_file_operations;
6435         inode->i_op = &btrfs_file_inode_operations;
6436         inode->i_mapping->a_ops = &btrfs_aops;
6437
6438         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6439         if (err)
6440                 goto out_unlock_inode;
6441
6442         err = btrfs_update_inode(trans, root, inode);
6443         if (err)
6444                 goto out_unlock_inode;
6445
6446         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6447                         0, index);
6448         if (err)
6449                 goto out_unlock_inode;
6450
6451         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6452         unlock_new_inode(inode);
6453         d_instantiate(dentry, inode);
6454
6455 out_unlock:
6456         btrfs_end_transaction(trans);
6457         if (err && drop_inode_on_err) {
6458                 inode_dec_link_count(inode);
6459                 iput(inode);
6460         }
6461         btrfs_balance_delayed_items(fs_info);
6462         btrfs_btree_balance_dirty(fs_info);
6463         return err;
6464
6465 out_unlock_inode:
6466         unlock_new_inode(inode);
6467         goto out_unlock;
6468
6469 }
6470
6471 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6472                       struct dentry *dentry)
6473 {
6474         struct btrfs_trans_handle *trans = NULL;
6475         struct btrfs_root *root = BTRFS_I(dir)->root;
6476         struct inode *inode = d_inode(old_dentry);
6477         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6478         u64 index;
6479         int err;
6480         int drop_inode = 0;
6481
6482         /* do not allow sys_link's with other subvols of the same device */
6483         if (root->objectid != BTRFS_I(inode)->root->objectid)
6484                 return -EXDEV;
6485
6486         if (inode->i_nlink >= BTRFS_LINK_MAX)
6487                 return -EMLINK;
6488
6489         err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6490         if (err)
6491                 goto fail;
6492
6493         /*
6494          * 2 items for inode and inode ref
6495          * 2 items for dir items
6496          * 1 item for parent inode
6497          */
6498         trans = btrfs_start_transaction(root, 5);
6499         if (IS_ERR(trans)) {
6500                 err = PTR_ERR(trans);
6501                 trans = NULL;
6502                 goto fail;
6503         }
6504
6505         /* There are several dir indexes for this inode, clear the cache. */
6506         BTRFS_I(inode)->dir_index = 0ULL;
6507         inc_nlink(inode);
6508         inode_inc_iversion(inode);
6509         inode->i_ctime = current_time(inode);
6510         ihold(inode);
6511         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6512
6513         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6514                         1, index);
6515
6516         if (err) {
6517                 drop_inode = 1;
6518         } else {
6519                 struct dentry *parent = dentry->d_parent;
6520                 err = btrfs_update_inode(trans, root, inode);
6521                 if (err)
6522                         goto fail;
6523                 if (inode->i_nlink == 1) {
6524                         /*
6525                          * If new hard link count is 1, it's a file created
6526                          * with open(2) O_TMPFILE flag.
6527                          */
6528                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
6529                         if (err)
6530                                 goto fail;
6531                 }
6532                 d_instantiate(dentry, inode);
6533                 btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
6534         }
6535
6536         btrfs_balance_delayed_items(fs_info);
6537 fail:
6538         if (trans)
6539                 btrfs_end_transaction(trans);
6540         if (drop_inode) {
6541                 inode_dec_link_count(inode);
6542                 iput(inode);
6543         }
6544         btrfs_btree_balance_dirty(fs_info);
6545         return err;
6546 }
6547
6548 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6549 {
6550         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6551         struct inode *inode = NULL;
6552         struct btrfs_trans_handle *trans;
6553         struct btrfs_root *root = BTRFS_I(dir)->root;
6554         int err = 0;
6555         int drop_on_err = 0;
6556         u64 objectid = 0;
6557         u64 index = 0;
6558
6559         /*
6560          * 2 items for inode and ref
6561          * 2 items for dir items
6562          * 1 for xattr if selinux is on
6563          */
6564         trans = btrfs_start_transaction(root, 5);
6565         if (IS_ERR(trans))
6566                 return PTR_ERR(trans);
6567
6568         err = btrfs_find_free_ino(root, &objectid);
6569         if (err)
6570                 goto out_fail;
6571
6572         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6573                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6574                         S_IFDIR | mode, &index);
6575         if (IS_ERR(inode)) {
6576                 err = PTR_ERR(inode);
6577                 goto out_fail;
6578         }
6579
6580         drop_on_err = 1;
6581         /* these must be set before we unlock the inode */
6582         inode->i_op = &btrfs_dir_inode_operations;
6583         inode->i_fop = &btrfs_dir_file_operations;
6584
6585         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6586         if (err)
6587                 goto out_fail_inode;
6588
6589         btrfs_i_size_write(BTRFS_I(inode), 0);
6590         err = btrfs_update_inode(trans, root, inode);
6591         if (err)
6592                 goto out_fail_inode;
6593
6594         err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6595                         dentry->d_name.name,
6596                         dentry->d_name.len, 0, index);
6597         if (err)
6598                 goto out_fail_inode;
6599
6600         d_instantiate(dentry, inode);
6601         /*
6602          * mkdir is special.  We're unlocking after we call d_instantiate
6603          * to avoid a race with nfsd calling d_instantiate.
6604          */
6605         unlock_new_inode(inode);
6606         drop_on_err = 0;
6607
6608 out_fail:
6609         btrfs_end_transaction(trans);
6610         if (drop_on_err) {
6611                 inode_dec_link_count(inode);
6612                 iput(inode);
6613         }
6614         btrfs_balance_delayed_items(fs_info);
6615         btrfs_btree_balance_dirty(fs_info);
6616         return err;
6617
6618 out_fail_inode:
6619         unlock_new_inode(inode);
6620         goto out_fail;
6621 }
6622
6623 /* Find next extent map of a given extent map, caller needs to ensure locks */
6624 static struct extent_map *next_extent_map(struct extent_map *em)
6625 {
6626         struct rb_node *next;
6627
6628         next = rb_next(&em->rb_node);
6629         if (!next)
6630                 return NULL;
6631         return container_of(next, struct extent_map, rb_node);
6632 }
6633
6634 static struct extent_map *prev_extent_map(struct extent_map *em)
6635 {
6636         struct rb_node *prev;
6637
6638         prev = rb_prev(&em->rb_node);
6639         if (!prev)
6640                 return NULL;
6641         return container_of(prev, struct extent_map, rb_node);
6642 }
6643
6644 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6645  * the existing extent is the nearest extent to map_start,
6646  * and an extent that you want to insert, deal with overlap and insert
6647  * the best fitted new extent into the tree.
6648  */
6649 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6650                                 struct extent_map *existing,
6651                                 struct extent_map *em,
6652                                 u64 map_start)
6653 {
6654         struct extent_map *prev;
6655         struct extent_map *next;
6656         u64 start;
6657         u64 end;
6658         u64 start_diff;
6659
6660         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6661
6662         if (existing->start > map_start) {
6663                 next = existing;
6664                 prev = prev_extent_map(next);
6665         } else {
6666                 prev = existing;
6667                 next = next_extent_map(prev);
6668         }
6669
6670         start = prev ? extent_map_end(prev) : em->start;
6671         start = max_t(u64, start, em->start);
6672         end = next ? next->start : extent_map_end(em);
6673         end = min_t(u64, end, extent_map_end(em));
6674         start_diff = start - em->start;
6675         em->start = start;
6676         em->len = end - start;
6677         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6678             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6679                 em->block_start += start_diff;
6680                 em->block_len -= start_diff;
6681         }
6682         return add_extent_mapping(em_tree, em, 0);
6683 }
6684
6685 static noinline int uncompress_inline(struct btrfs_path *path,
6686                                       struct page *page,
6687                                       size_t pg_offset, u64 extent_offset,
6688                                       struct btrfs_file_extent_item *item)
6689 {
6690         int ret;
6691         struct extent_buffer *leaf = path->nodes[0];
6692         char *tmp;
6693         size_t max_size;
6694         unsigned long inline_size;
6695         unsigned long ptr;
6696         int compress_type;
6697
6698         WARN_ON(pg_offset != 0);
6699         compress_type = btrfs_file_extent_compression(leaf, item);
6700         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6701         inline_size = btrfs_file_extent_inline_item_len(leaf,
6702                                         btrfs_item_nr(path->slots[0]));
6703         tmp = kmalloc(inline_size, GFP_NOFS);
6704         if (!tmp)
6705                 return -ENOMEM;
6706         ptr = btrfs_file_extent_inline_start(item);
6707
6708         read_extent_buffer(leaf, tmp, ptr, inline_size);
6709
6710         max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6711         ret = btrfs_decompress(compress_type, tmp, page,
6712                                extent_offset, inline_size, max_size);
6713         kfree(tmp);
6714         return ret;
6715 }
6716
6717 /*
6718  * a bit scary, this does extent mapping from logical file offset to the disk.
6719  * the ugly parts come from merging extents from the disk with the in-ram
6720  * representation.  This gets more complex because of the data=ordered code,
6721  * where the in-ram extents might be locked pending data=ordered completion.
6722  *
6723  * This also copies inline extents directly into the page.
6724  */
6725
6726 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6727                 struct page *page,
6728             size_t pg_offset, u64 start, u64 len,
6729                 int create)
6730 {
6731         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6732         int ret;
6733         int err = 0;
6734         u64 extent_start = 0;
6735         u64 extent_end = 0;
6736         u64 objectid = btrfs_ino(inode);
6737         u32 found_type;
6738         struct btrfs_path *path = NULL;
6739         struct btrfs_root *root = inode->root;
6740         struct btrfs_file_extent_item *item;
6741         struct extent_buffer *leaf;
6742         struct btrfs_key found_key;
6743         struct extent_map *em = NULL;
6744         struct extent_map_tree *em_tree = &inode->extent_tree;
6745         struct extent_io_tree *io_tree = &inode->io_tree;
6746         struct btrfs_trans_handle *trans = NULL;
6747         const bool new_inline = !page || create;
6748
6749 again:
6750         read_lock(&em_tree->lock);
6751         em = lookup_extent_mapping(em_tree, start, len);
6752         if (em)
6753                 em->bdev = fs_info->fs_devices->latest_bdev;
6754         read_unlock(&em_tree->lock);
6755
6756         if (em) {
6757                 if (em->start > start || em->start + em->len <= start)
6758                         free_extent_map(em);
6759                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6760                         free_extent_map(em);
6761                 else
6762                         goto out;
6763         }
6764         em = alloc_extent_map();
6765         if (!em) {
6766                 err = -ENOMEM;
6767                 goto out;
6768         }
6769         em->bdev = fs_info->fs_devices->latest_bdev;
6770         em->start = EXTENT_MAP_HOLE;
6771         em->orig_start = EXTENT_MAP_HOLE;
6772         em->len = (u64)-1;
6773         em->block_len = (u64)-1;
6774
6775         if (!path) {
6776                 path = btrfs_alloc_path();
6777                 if (!path) {
6778                         err = -ENOMEM;
6779                         goto out;
6780                 }
6781                 /*
6782                  * Chances are we'll be called again, so go ahead and do
6783                  * readahead
6784                  */
6785                 path->reada = READA_FORWARD;
6786         }
6787
6788         ret = btrfs_lookup_file_extent(trans, root, path,
6789                                        objectid, start, trans != NULL);
6790         if (ret < 0) {
6791                 err = ret;
6792                 goto out;
6793         }
6794
6795         if (ret != 0) {
6796                 if (path->slots[0] == 0)
6797                         goto not_found;
6798                 path->slots[0]--;
6799         }
6800
6801         leaf = path->nodes[0];
6802         item = btrfs_item_ptr(leaf, path->slots[0],
6803                               struct btrfs_file_extent_item);
6804         /* are we inside the extent that was found? */
6805         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6806         found_type = found_key.type;
6807         if (found_key.objectid != objectid ||
6808             found_type != BTRFS_EXTENT_DATA_KEY) {
6809                 /*
6810                  * If we backup past the first extent we want to move forward
6811                  * and see if there is an extent in front of us, otherwise we'll
6812                  * say there is a hole for our whole search range which can
6813                  * cause problems.
6814                  */
6815                 extent_end = start;
6816                 goto next;
6817         }
6818
6819         found_type = btrfs_file_extent_type(leaf, item);
6820         extent_start = found_key.offset;
6821         if (found_type == BTRFS_FILE_EXTENT_REG ||
6822             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6823                 extent_end = extent_start +
6824                        btrfs_file_extent_num_bytes(leaf, item);
6825         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6826                 size_t size;
6827                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6828                 extent_end = ALIGN(extent_start + size,
6829                                    fs_info->sectorsize);
6830         }
6831 next:
6832         if (start >= extent_end) {
6833                 path->slots[0]++;
6834                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6835                         ret = btrfs_next_leaf(root, path);
6836                         if (ret < 0) {
6837                                 err = ret;
6838                                 goto out;
6839                         }
6840                         if (ret > 0)
6841                                 goto not_found;
6842                         leaf = path->nodes[0];
6843                 }
6844                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6845                 if (found_key.objectid != objectid ||
6846                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6847                         goto not_found;
6848                 if (start + len <= found_key.offset)
6849                         goto not_found;
6850                 if (start > found_key.offset)
6851                         goto next;
6852                 em->start = start;
6853                 em->orig_start = start;
6854                 em->len = found_key.offset - start;
6855                 goto not_found_em;
6856         }
6857
6858         btrfs_extent_item_to_extent_map(inode, path, item,
6859                         new_inline, em);
6860
6861         if (found_type == BTRFS_FILE_EXTENT_REG ||
6862             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6863                 goto insert;
6864         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6865                 unsigned long ptr;
6866                 char *map;
6867                 size_t size;
6868                 size_t extent_offset;
6869                 size_t copy_size;
6870
6871                 if (new_inline)
6872                         goto out;
6873
6874                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6875                 extent_offset = page_offset(page) + pg_offset - extent_start;
6876                 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6877                                   size - extent_offset);
6878                 em->start = extent_start + extent_offset;
6879                 em->len = ALIGN(copy_size, fs_info->sectorsize);
6880                 em->orig_block_len = em->len;
6881                 em->orig_start = em->start;
6882                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6883                 if (create == 0 && !PageUptodate(page)) {
6884                         if (btrfs_file_extent_compression(leaf, item) !=
6885                             BTRFS_COMPRESS_NONE) {
6886                                 ret = uncompress_inline(path, page, pg_offset,
6887                                                         extent_offset, item);
6888                                 if (ret) {
6889                                         err = ret;
6890                                         goto out;
6891                                 }
6892                         } else {
6893                                 map = kmap(page);
6894                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6895                                                    copy_size);
6896                                 if (pg_offset + copy_size < PAGE_SIZE) {
6897                                         memset(map + pg_offset + copy_size, 0,
6898                                                PAGE_SIZE - pg_offset -
6899                                                copy_size);
6900                                 }
6901                                 kunmap(page);
6902                         }
6903                         flush_dcache_page(page);
6904                 } else if (create && PageUptodate(page)) {
6905                         BUG();
6906                         if (!trans) {
6907                                 kunmap(page);
6908                                 free_extent_map(em);
6909                                 em = NULL;
6910
6911                                 btrfs_release_path(path);
6912                                 trans = btrfs_join_transaction(root);
6913
6914                                 if (IS_ERR(trans))
6915                                         return ERR_CAST(trans);
6916                                 goto again;
6917                         }
6918                         map = kmap(page);
6919                         write_extent_buffer(leaf, map + pg_offset, ptr,
6920                                             copy_size);
6921                         kunmap(page);
6922                         btrfs_mark_buffer_dirty(leaf);
6923                 }
6924                 set_extent_uptodate(io_tree, em->start,
6925                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6926                 goto insert;
6927         }
6928 not_found:
6929         em->start = start;
6930         em->orig_start = start;
6931         em->len = len;
6932 not_found_em:
6933         em->block_start = EXTENT_MAP_HOLE;
6934         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6935 insert:
6936         btrfs_release_path(path);
6937         if (em->start > start || extent_map_end(em) <= start) {
6938                 btrfs_err(fs_info,
6939                           "bad extent! em: [%llu %llu] passed [%llu %llu]",
6940                           em->start, em->len, start, len);
6941                 err = -EIO;
6942                 goto out;
6943         }
6944
6945         err = 0;
6946         write_lock(&em_tree->lock);
6947         ret = add_extent_mapping(em_tree, em, 0);
6948         /* it is possible that someone inserted the extent into the tree
6949          * while we had the lock dropped.  It is also possible that
6950          * an overlapping map exists in the tree
6951          */
6952         if (ret == -EEXIST) {
6953                 struct extent_map *existing;
6954
6955                 ret = 0;
6956
6957                 existing = search_extent_mapping(em_tree, start, len);
6958                 /*
6959                  * existing will always be non-NULL, since there must be
6960                  * extent causing the -EEXIST.
6961                  */
6962                 if (existing->start == em->start &&
6963                     extent_map_end(existing) >= extent_map_end(em) &&
6964                     em->block_start == existing->block_start) {
6965                         /*
6966                          * The existing extent map already encompasses the
6967                          * entire extent map we tried to add.
6968                          */
6969                         free_extent_map(em);
6970                         em = existing;
6971                         err = 0;
6972
6973                 } else if (start >= extent_map_end(existing) ||
6974                     start <= existing->start) {
6975                         /*
6976                          * The existing extent map is the one nearest to
6977                          * the [start, start + len) range which overlaps
6978                          */
6979                         err = merge_extent_mapping(em_tree, existing,
6980                                                    em, start);
6981                         free_extent_map(existing);
6982                         if (err) {
6983                                 free_extent_map(em);
6984                                 em = NULL;
6985                         }
6986                 } else {
6987                         free_extent_map(em);
6988                         em = existing;
6989                         err = 0;
6990                 }
6991         }
6992         write_unlock(&em_tree->lock);
6993 out:
6994
6995         trace_btrfs_get_extent(root, inode, em);
6996
6997         btrfs_free_path(path);
6998         if (trans) {
6999                 ret = btrfs_end_transaction(trans);
7000                 if (!err)
7001                         err = ret;
7002         }
7003         if (err) {
7004                 free_extent_map(em);
7005                 return ERR_PTR(err);
7006         }
7007         BUG_ON(!em); /* Error is always set */
7008         return em;
7009 }
7010
7011 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7012                 struct page *page,
7013                 size_t pg_offset, u64 start, u64 len,
7014                 int create)
7015 {
7016         struct extent_map *em;
7017         struct extent_map *hole_em = NULL;
7018         u64 range_start = start;
7019         u64 end;
7020         u64 found;
7021         u64 found_end;
7022         int err = 0;
7023
7024         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7025         if (IS_ERR(em))
7026                 return em;
7027         if (em) {
7028                 /*
7029                  * if our em maps to
7030                  * -  a hole or
7031                  * -  a pre-alloc extent,
7032                  * there might actually be delalloc bytes behind it.
7033                  */
7034                 if (em->block_start != EXTENT_MAP_HOLE &&
7035                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7036                         return em;
7037                 else
7038                         hole_em = em;
7039         }
7040
7041         /* check to see if we've wrapped (len == -1 or similar) */
7042         end = start + len;
7043         if (end < start)
7044                 end = (u64)-1;
7045         else
7046                 end -= 1;
7047
7048         em = NULL;
7049
7050         /* ok, we didn't find anything, lets look for delalloc */
7051         found = count_range_bits(&inode->io_tree, &range_start,
7052                                  end, len, EXTENT_DELALLOC, 1);
7053         found_end = range_start + found;
7054         if (found_end < range_start)
7055                 found_end = (u64)-1;
7056
7057         /*
7058          * we didn't find anything useful, return
7059          * the original results from get_extent()
7060          */
7061         if (range_start > end || found_end <= start) {
7062                 em = hole_em;
7063                 hole_em = NULL;
7064                 goto out;
7065         }
7066
7067         /* adjust the range_start to make sure it doesn't
7068          * go backwards from the start they passed in
7069          */
7070         range_start = max(start, range_start);
7071         found = found_end - range_start;
7072
7073         if (found > 0) {
7074                 u64 hole_start = start;
7075                 u64 hole_len = len;
7076
7077                 em = alloc_extent_map();
7078                 if (!em) {
7079                         err = -ENOMEM;
7080                         goto out;
7081                 }
7082                 /*
7083                  * when btrfs_get_extent can't find anything it
7084                  * returns one huge hole
7085                  *
7086                  * make sure what it found really fits our range, and
7087                  * adjust to make sure it is based on the start from
7088                  * the caller
7089                  */
7090                 if (hole_em) {
7091                         u64 calc_end = extent_map_end(hole_em);
7092
7093                         if (calc_end <= start || (hole_em->start > end)) {
7094                                 free_extent_map(hole_em);
7095                                 hole_em = NULL;
7096                         } else {
7097                                 hole_start = max(hole_em->start, start);
7098                                 hole_len = calc_end - hole_start;
7099                         }
7100                 }
7101                 em->bdev = NULL;
7102                 if (hole_em && range_start > hole_start) {
7103                         /* our hole starts before our delalloc, so we
7104                          * have to return just the parts of the hole
7105                          * that go until  the delalloc starts
7106                          */
7107                         em->len = min(hole_len,
7108                                       range_start - hole_start);
7109                         em->start = hole_start;
7110                         em->orig_start = hole_start;
7111                         /*
7112                          * don't adjust block start at all,
7113                          * it is fixed at EXTENT_MAP_HOLE
7114                          */
7115                         em->block_start = hole_em->block_start;
7116                         em->block_len = hole_len;
7117                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7118                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7119                 } else {
7120                         em->start = range_start;
7121                         em->len = found;
7122                         em->orig_start = range_start;
7123                         em->block_start = EXTENT_MAP_DELALLOC;
7124                         em->block_len = found;
7125                 }
7126         } else if (hole_em) {
7127                 return hole_em;
7128         }
7129 out:
7130
7131         free_extent_map(hole_em);
7132         if (err) {
7133                 free_extent_map(em);
7134                 return ERR_PTR(err);
7135         }
7136         return em;
7137 }
7138
7139 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7140                                                   const u64 start,
7141                                                   const u64 len,
7142                                                   const u64 orig_start,
7143                                                   const u64 block_start,
7144                                                   const u64 block_len,
7145                                                   const u64 orig_block_len,
7146                                                   const u64 ram_bytes,
7147                                                   const int type)
7148 {
7149         struct extent_map *em = NULL;
7150         int ret;
7151
7152         if (type != BTRFS_ORDERED_NOCOW) {
7153                 em = create_io_em(inode, start, len, orig_start,
7154                                   block_start, block_len, orig_block_len,
7155                                   ram_bytes,
7156                                   BTRFS_COMPRESS_NONE, /* compress_type */
7157                                   type);
7158                 if (IS_ERR(em))
7159                         goto out;
7160         }
7161         ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7162                                            len, block_len, type);
7163         if (ret) {
7164                 if (em) {
7165                         free_extent_map(em);
7166                         btrfs_drop_extent_cache(BTRFS_I(inode), start,
7167                                                 start + len - 1, 0);
7168                 }
7169                 em = ERR_PTR(ret);
7170         }
7171  out:
7172
7173         return em;
7174 }
7175
7176 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7177                                                   u64 start, u64 len)
7178 {
7179         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7180         struct btrfs_root *root = BTRFS_I(inode)->root;
7181         struct extent_map *em;
7182         struct btrfs_key ins;
7183         u64 alloc_hint;
7184         int ret;
7185
7186         alloc_hint = get_extent_allocation_hint(inode, start, len);
7187         ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7188                                    0, alloc_hint, &ins, 1, 1);
7189         if (ret)
7190                 return ERR_PTR(ret);
7191
7192         em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7193                                      ins.objectid, ins.offset, ins.offset,
7194                                      ins.offset, BTRFS_ORDERED_REGULAR);
7195         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7196         if (IS_ERR(em))
7197                 btrfs_free_reserved_extent(fs_info, ins.objectid,
7198                                            ins.offset, 1);
7199
7200         return em;
7201 }
7202
7203 /*
7204  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7205  * block must be cow'd
7206  */
7207 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7208                               u64 *orig_start, u64 *orig_block_len,
7209                               u64 *ram_bytes)
7210 {
7211         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7212         struct btrfs_path *path;
7213         int ret;
7214         struct extent_buffer *leaf;
7215         struct btrfs_root *root = BTRFS_I(inode)->root;
7216         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7217         struct btrfs_file_extent_item *fi;
7218         struct btrfs_key key;
7219         u64 disk_bytenr;
7220         u64 backref_offset;
7221         u64 extent_end;
7222         u64 num_bytes;
7223         int slot;
7224         int found_type;
7225         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7226
7227         path = btrfs_alloc_path();
7228         if (!path)
7229                 return -ENOMEM;
7230
7231         ret = btrfs_lookup_file_extent(NULL, root, path,
7232                         btrfs_ino(BTRFS_I(inode)), offset, 0);
7233         if (ret < 0)
7234                 goto out;
7235
7236         slot = path->slots[0];
7237         if (ret == 1) {
7238                 if (slot == 0) {
7239                         /* can't find the item, must cow */
7240                         ret = 0;
7241                         goto out;
7242                 }
7243                 slot--;
7244         }
7245         ret = 0;
7246         leaf = path->nodes[0];
7247         btrfs_item_key_to_cpu(leaf, &key, slot);
7248         if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7249             key.type != BTRFS_EXTENT_DATA_KEY) {
7250                 /* not our file or wrong item type, must cow */
7251                 goto out;
7252         }
7253
7254         if (key.offset > offset) {
7255                 /* Wrong offset, must cow */
7256                 goto out;
7257         }
7258
7259         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7260         found_type = btrfs_file_extent_type(leaf, fi);
7261         if (found_type != BTRFS_FILE_EXTENT_REG &&
7262             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7263                 /* not a regular extent, must cow */
7264                 goto out;
7265         }
7266
7267         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7268                 goto out;
7269
7270         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7271         if (extent_end <= offset)
7272                 goto out;
7273
7274         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7275         if (disk_bytenr == 0)
7276                 goto out;
7277
7278         if (btrfs_file_extent_compression(leaf, fi) ||
7279             btrfs_file_extent_encryption(leaf, fi) ||
7280             btrfs_file_extent_other_encoding(leaf, fi))
7281                 goto out;
7282
7283         backref_offset = btrfs_file_extent_offset(leaf, fi);
7284
7285         if (orig_start) {
7286                 *orig_start = key.offset - backref_offset;
7287                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7288                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7289         }
7290
7291         if (btrfs_extent_readonly(fs_info, disk_bytenr))
7292                 goto out;
7293
7294         num_bytes = min(offset + *len, extent_end) - offset;
7295         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7296                 u64 range_end;
7297
7298                 range_end = round_up(offset + num_bytes,
7299                                      root->fs_info->sectorsize) - 1;
7300                 ret = test_range_bit(io_tree, offset, range_end,
7301                                      EXTENT_DELALLOC, 0, NULL);
7302                 if (ret) {
7303                         ret = -EAGAIN;
7304                         goto out;
7305                 }
7306         }
7307
7308         btrfs_release_path(path);
7309
7310         /*
7311          * look for other files referencing this extent, if we
7312          * find any we must cow
7313          */
7314
7315         ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7316                                     key.offset - backref_offset, disk_bytenr);
7317         if (ret) {
7318                 ret = 0;
7319                 goto out;
7320         }
7321
7322         /*
7323          * adjust disk_bytenr and num_bytes to cover just the bytes
7324          * in this extent we are about to write.  If there
7325          * are any csums in that range we have to cow in order
7326          * to keep the csums correct
7327          */
7328         disk_bytenr += backref_offset;
7329         disk_bytenr += offset - key.offset;
7330         if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7331                 goto out;
7332         /*
7333          * all of the above have passed, it is safe to overwrite this extent
7334          * without cow
7335          */
7336         *len = num_bytes;
7337         ret = 1;
7338 out:
7339         btrfs_free_path(path);
7340         return ret;
7341 }
7342
7343 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7344 {
7345         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7346         int found = false;
7347         void **pagep = NULL;
7348         struct page *page = NULL;
7349         int start_idx;
7350         int end_idx;
7351
7352         start_idx = start >> PAGE_SHIFT;
7353
7354         /*
7355          * end is the last byte in the last page.  end == start is legal
7356          */
7357         end_idx = end >> PAGE_SHIFT;
7358
7359         rcu_read_lock();
7360
7361         /* Most of the code in this while loop is lifted from
7362          * find_get_page.  It's been modified to begin searching from a
7363          * page and return just the first page found in that range.  If the
7364          * found idx is less than or equal to the end idx then we know that
7365          * a page exists.  If no pages are found or if those pages are
7366          * outside of the range then we're fine (yay!) */
7367         while (page == NULL &&
7368                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7369                 page = radix_tree_deref_slot(pagep);
7370                 if (unlikely(!page))
7371                         break;
7372
7373                 if (radix_tree_exception(page)) {
7374                         if (radix_tree_deref_retry(page)) {
7375                                 page = NULL;
7376                                 continue;
7377                         }
7378                         /*
7379                          * Otherwise, shmem/tmpfs must be storing a swap entry
7380                          * here as an exceptional entry: so return it without
7381                          * attempting to raise page count.
7382                          */
7383                         page = NULL;
7384                         break; /* TODO: Is this relevant for this use case? */
7385                 }
7386
7387                 if (!page_cache_get_speculative(page)) {
7388                         page = NULL;
7389                         continue;
7390                 }
7391
7392                 /*
7393                  * Has the page moved?
7394                  * This is part of the lockless pagecache protocol. See
7395                  * include/linux/pagemap.h for details.
7396                  */
7397                 if (unlikely(page != *pagep)) {
7398                         put_page(page);
7399                         page = NULL;
7400                 }
7401         }
7402
7403         if (page) {
7404                 if (page->index <= end_idx)
7405                         found = true;
7406                 put_page(page);
7407         }
7408
7409         rcu_read_unlock();
7410         return found;
7411 }
7412
7413 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7414                               struct extent_state **cached_state, int writing)
7415 {
7416         struct btrfs_ordered_extent *ordered;
7417         int ret = 0;
7418
7419         while (1) {
7420                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7421                                  cached_state);
7422                 /*
7423                  * We're concerned with the entire range that we're going to be
7424                  * doing DIO to, so we need to make sure there's no ordered
7425                  * extents in this range.
7426                  */
7427                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7428                                                      lockend - lockstart + 1);
7429
7430                 /*
7431                  * We need to make sure there are no buffered pages in this
7432                  * range either, we could have raced between the invalidate in
7433                  * generic_file_direct_write and locking the extent.  The
7434                  * invalidate needs to happen so that reads after a write do not
7435                  * get stale data.
7436                  */
7437                 if (!ordered &&
7438                     (!writing ||
7439                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7440                         break;
7441
7442                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7443                                      cached_state, GFP_NOFS);
7444
7445                 if (ordered) {
7446                         /*
7447                          * If we are doing a DIO read and the ordered extent we
7448                          * found is for a buffered write, we can not wait for it
7449                          * to complete and retry, because if we do so we can
7450                          * deadlock with concurrent buffered writes on page
7451                          * locks. This happens only if our DIO read covers more
7452                          * than one extent map, if at this point has already
7453                          * created an ordered extent for a previous extent map
7454                          * and locked its range in the inode's io tree, and a
7455                          * concurrent write against that previous extent map's
7456                          * range and this range started (we unlock the ranges
7457                          * in the io tree only when the bios complete and
7458                          * buffered writes always lock pages before attempting
7459                          * to lock range in the io tree).
7460                          */
7461                         if (writing ||
7462                             test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7463                                 btrfs_start_ordered_extent(inode, ordered, 1);
7464                         else
7465                                 ret = -ENOTBLK;
7466                         btrfs_put_ordered_extent(ordered);
7467                 } else {
7468                         /*
7469                          * We could trigger writeback for this range (and wait
7470                          * for it to complete) and then invalidate the pages for
7471                          * this range (through invalidate_inode_pages2_range()),
7472                          * but that can lead us to a deadlock with a concurrent
7473                          * call to readpages() (a buffered read or a defrag call
7474                          * triggered a readahead) on a page lock due to an
7475                          * ordered dio extent we created before but did not have
7476                          * yet a corresponding bio submitted (whence it can not
7477                          * complete), which makes readpages() wait for that
7478                          * ordered extent to complete while holding a lock on
7479                          * that page.
7480                          */
7481                         ret = -ENOTBLK;
7482                 }
7483
7484                 if (ret)
7485                         break;
7486
7487                 cond_resched();
7488         }
7489
7490         return ret;
7491 }
7492
7493 /* The callers of this must take lock_extent() */
7494 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
7495                                        u64 orig_start, u64 block_start,
7496                                        u64 block_len, u64 orig_block_len,
7497                                        u64 ram_bytes, int compress_type,
7498                                        int type)
7499 {
7500         struct extent_map_tree *em_tree;
7501         struct extent_map *em;
7502         struct btrfs_root *root = BTRFS_I(inode)->root;
7503         int ret;
7504
7505         ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7506                type == BTRFS_ORDERED_COMPRESSED ||
7507                type == BTRFS_ORDERED_NOCOW ||
7508                type == BTRFS_ORDERED_REGULAR);
7509
7510         em_tree = &BTRFS_I(inode)->extent_tree;
7511         em = alloc_extent_map();
7512         if (!em)
7513                 return ERR_PTR(-ENOMEM);
7514
7515         em->start = start;
7516         em->orig_start = orig_start;
7517         em->len = len;
7518         em->block_len = block_len;
7519         em->block_start = block_start;
7520         em->bdev = root->fs_info->fs_devices->latest_bdev;
7521         em->orig_block_len = orig_block_len;
7522         em->ram_bytes = ram_bytes;
7523         em->generation = -1;
7524         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7525         if (type == BTRFS_ORDERED_PREALLOC) {
7526                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7527         } else if (type == BTRFS_ORDERED_COMPRESSED) {
7528                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7529                 em->compress_type = compress_type;
7530         }
7531
7532         do {
7533                 btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
7534                                 em->start + em->len - 1, 0);
7535                 write_lock(&em_tree->lock);
7536                 ret = add_extent_mapping(em_tree, em, 1);
7537                 write_unlock(&em_tree->lock);
7538                 /*
7539                  * The caller has taken lock_extent(), who could race with us
7540                  * to add em?
7541                  */
7542         } while (ret == -EEXIST);
7543
7544         if (ret) {
7545                 free_extent_map(em);
7546                 return ERR_PTR(ret);
7547         }
7548
7549         /* em got 2 refs now, callers needs to do free_extent_map once. */
7550         return em;
7551 }
7552
7553 static void adjust_dio_outstanding_extents(struct inode *inode,
7554                                            struct btrfs_dio_data *dio_data,
7555                                            const u64 len)
7556 {
7557         unsigned num_extents = count_max_extents(len);
7558
7559         /*
7560          * If we have an outstanding_extents count still set then we're
7561          * within our reservation, otherwise we need to adjust our inode
7562          * counter appropriately.
7563          */
7564         if (dio_data->outstanding_extents >= num_extents) {
7565                 dio_data->outstanding_extents -= num_extents;
7566         } else {
7567                 /*
7568                  * If dio write length has been split due to no large enough
7569                  * contiguous space, we need to compensate our inode counter
7570                  * appropriately.
7571                  */
7572                 u64 num_needed = num_extents - dio_data->outstanding_extents;
7573
7574                 spin_lock(&BTRFS_I(inode)->lock);
7575                 BTRFS_I(inode)->outstanding_extents += num_needed;
7576                 spin_unlock(&BTRFS_I(inode)->lock);
7577         }
7578 }
7579
7580 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7581                                    struct buffer_head *bh_result, int create)
7582 {
7583         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7584         struct extent_map *em;
7585         struct extent_state *cached_state = NULL;
7586         struct btrfs_dio_data *dio_data = NULL;
7587         u64 start = iblock << inode->i_blkbits;
7588         u64 lockstart, lockend;
7589         u64 len = bh_result->b_size;
7590         int unlock_bits = EXTENT_LOCKED;
7591         int ret = 0;
7592
7593         if (create)
7594                 unlock_bits |= EXTENT_DIRTY;
7595         else
7596                 len = min_t(u64, len, fs_info->sectorsize);
7597
7598         lockstart = start;
7599         lockend = start + len - 1;
7600
7601         if (current->journal_info) {
7602                 /*
7603                  * Need to pull our outstanding extents and set journal_info to NULL so
7604                  * that anything that needs to check if there's a transaction doesn't get
7605                  * confused.
7606                  */
7607                 dio_data = current->journal_info;
7608                 current->journal_info = NULL;
7609         }
7610
7611         /*
7612          * If this errors out it's because we couldn't invalidate pagecache for
7613          * this range and we need to fallback to buffered.
7614          */
7615         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7616                                create)) {
7617                 ret = -ENOTBLK;
7618                 goto err;
7619         }
7620
7621         em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
7622         if (IS_ERR(em)) {
7623                 ret = PTR_ERR(em);
7624                 goto unlock_err;
7625         }
7626
7627         /*
7628          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7629          * io.  INLINE is special, and we could probably kludge it in here, but
7630          * it's still buffered so for safety lets just fall back to the generic
7631          * buffered path.
7632          *
7633          * For COMPRESSED we _have_ to read the entire extent in so we can
7634          * decompress it, so there will be buffering required no matter what we
7635          * do, so go ahead and fallback to buffered.
7636          *
7637          * We return -ENOTBLK because that's what makes DIO go ahead and go back
7638          * to buffered IO.  Don't blame me, this is the price we pay for using
7639          * the generic code.
7640          */
7641         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7642             em->block_start == EXTENT_MAP_INLINE) {
7643                 free_extent_map(em);
7644                 ret = -ENOTBLK;
7645                 goto unlock_err;
7646         }
7647
7648         /* Just a good old fashioned hole, return */
7649         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7650                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7651                 free_extent_map(em);
7652                 goto unlock_err;
7653         }
7654
7655         /*
7656          * We don't allocate a new extent in the following cases
7657          *
7658          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7659          * existing extent.
7660          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7661          * just use the extent.
7662          *
7663          */
7664         if (!create) {
7665                 len = min(len, em->len - (start - em->start));
7666                 lockstart = start + len;
7667                 goto unlock;
7668         }
7669
7670         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7671             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7672              em->block_start != EXTENT_MAP_HOLE)) {
7673                 int type;
7674                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7675
7676                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7677                         type = BTRFS_ORDERED_PREALLOC;
7678                 else
7679                         type = BTRFS_ORDERED_NOCOW;
7680                 len = min(len, em->len - (start - em->start));
7681                 block_start = em->block_start + (start - em->start);
7682
7683                 if (can_nocow_extent(inode, start, &len, &orig_start,
7684                                      &orig_block_len, &ram_bytes) == 1 &&
7685                     btrfs_inc_nocow_writers(fs_info, block_start)) {
7686                         struct extent_map *em2;
7687
7688                         em2 = btrfs_create_dio_extent(inode, start, len,
7689                                                       orig_start, block_start,
7690                                                       len, orig_block_len,
7691                                                       ram_bytes, type);
7692                         btrfs_dec_nocow_writers(fs_info, block_start);
7693                         if (type == BTRFS_ORDERED_PREALLOC) {
7694                                 free_extent_map(em);
7695                                 em = em2;
7696                         }
7697                         if (em2 && IS_ERR(em2)) {
7698                                 ret = PTR_ERR(em2);
7699                                 goto unlock_err;
7700                         }
7701                         /*
7702                          * For inode marked NODATACOW or extent marked PREALLOC,
7703                          * use the existing or preallocated extent, so does not
7704                          * need to adjust btrfs_space_info's bytes_may_use.
7705                          */
7706                         btrfs_free_reserved_data_space_noquota(inode,
7707                                         start, len);
7708                         goto unlock;
7709                 }
7710         }
7711
7712         /*
7713          * this will cow the extent, reset the len in case we changed
7714          * it above
7715          */
7716         len = bh_result->b_size;
7717         free_extent_map(em);
7718         em = btrfs_new_extent_direct(inode, start, len);
7719         if (IS_ERR(em)) {
7720                 ret = PTR_ERR(em);
7721                 goto unlock_err;
7722         }
7723         len = min(len, em->len - (start - em->start));
7724 unlock:
7725         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7726                 inode->i_blkbits;
7727         bh_result->b_size = len;
7728         bh_result->b_bdev = em->bdev;
7729         set_buffer_mapped(bh_result);
7730         if (create) {
7731                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7732                         set_buffer_new(bh_result);
7733
7734                 /*
7735                  * Need to update the i_size under the extent lock so buffered
7736                  * readers will get the updated i_size when we unlock.
7737                  */
7738                 if (!dio_data->overwrite && start + len > i_size_read(inode))
7739                         i_size_write(inode, start + len);
7740
7741                 adjust_dio_outstanding_extents(inode, dio_data, len);
7742                 WARN_ON(dio_data->reserve < len);
7743                 dio_data->reserve -= len;
7744                 dio_data->unsubmitted_oe_range_end = start + len;
7745                 current->journal_info = dio_data;
7746         }
7747
7748         /*
7749          * In the case of write we need to clear and unlock the entire range,
7750          * in the case of read we need to unlock only the end area that we
7751          * aren't using if there is any left over space.
7752          */
7753         if (lockstart < lockend) {
7754                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7755                                  lockend, unlock_bits, 1, 0,
7756                                  &cached_state, GFP_NOFS);
7757         } else {
7758                 free_extent_state(cached_state);
7759         }
7760
7761         free_extent_map(em);
7762
7763         return 0;
7764
7765 unlock_err:
7766         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7767                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7768 err:
7769         if (dio_data)
7770                 current->journal_info = dio_data;
7771         /*
7772          * Compensate the delalloc release we do in btrfs_direct_IO() when we
7773          * write less data then expected, so that we don't underflow our inode's
7774          * outstanding extents counter.
7775          */
7776         if (create && dio_data)
7777                 adjust_dio_outstanding_extents(inode, dio_data, len);
7778
7779         return ret;
7780 }
7781
7782 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7783                                         int mirror_num)
7784 {
7785         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7786         int ret;
7787
7788         BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7789
7790         bio_get(bio);
7791
7792         ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
7793         if (ret)
7794                 goto err;
7795
7796         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
7797 err:
7798         bio_put(bio);
7799         return ret;
7800 }
7801
7802 static int btrfs_check_dio_repairable(struct inode *inode,
7803                                       struct bio *failed_bio,
7804                                       struct io_failure_record *failrec,
7805                                       int failed_mirror)
7806 {
7807         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7808         int num_copies;
7809
7810         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
7811         if (num_copies == 1) {
7812                 /*
7813                  * we only have a single copy of the data, so don't bother with
7814                  * all the retry and error correction code that follows. no
7815                  * matter what the error is, it is very likely to persist.
7816                  */
7817                 btrfs_debug(fs_info,
7818                         "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7819                         num_copies, failrec->this_mirror, failed_mirror);
7820                 return 0;
7821         }
7822
7823         failrec->failed_mirror = failed_mirror;
7824         failrec->this_mirror++;
7825         if (failrec->this_mirror == failed_mirror)
7826                 failrec->this_mirror++;
7827
7828         if (failrec->this_mirror > num_copies) {
7829                 btrfs_debug(fs_info,
7830                         "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
7831                         num_copies, failrec->this_mirror, failed_mirror);
7832                 return 0;
7833         }
7834
7835         return 1;
7836 }
7837
7838 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7839                         struct page *page, unsigned int pgoff,
7840                         u64 start, u64 end, int failed_mirror,
7841                         bio_end_io_t *repair_endio, void *repair_arg)
7842 {
7843         struct io_failure_record *failrec;
7844         struct bio *bio;
7845         int isector;
7846         int read_mode = 0;
7847         int ret;
7848
7849         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
7850
7851         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7852         if (ret)
7853                 return ret;
7854
7855         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7856                                          failed_mirror);
7857         if (!ret) {
7858                 free_io_failure(BTRFS_I(inode), failrec);
7859                 return -EIO;
7860         }
7861
7862         if ((failed_bio->bi_vcnt > 1)
7863                 || (failed_bio->bi_io_vec->bv_len
7864                         > btrfs_inode_sectorsize(inode)))
7865                 read_mode |= REQ_FAILFAST_DEV;
7866
7867         isector = start - btrfs_io_bio(failed_bio)->logical;
7868         isector >>= inode->i_sb->s_blocksize_bits;
7869         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7870                                 pgoff, isector, repair_endio, repair_arg);
7871         if (!bio) {
7872                 free_io_failure(BTRFS_I(inode), failrec);
7873                 return -EIO;
7874         }
7875         bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
7876
7877         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7878                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7879                     read_mode, failrec->this_mirror, failrec->in_validation);
7880
7881         ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
7882         if (ret) {
7883                 free_io_failure(BTRFS_I(inode), failrec);
7884                 bio_put(bio);
7885         }
7886
7887         return ret;
7888 }
7889
7890 struct btrfs_retry_complete {
7891         struct completion done;
7892         struct inode *inode;
7893         u64 start;
7894         int uptodate;
7895 };
7896
7897 static void btrfs_retry_endio_nocsum(struct bio *bio)
7898 {
7899         struct btrfs_retry_complete *done = bio->bi_private;
7900         struct inode *inode;
7901         struct bio_vec *bvec;
7902         int i;
7903
7904         if (bio->bi_error)
7905                 goto end;
7906
7907         ASSERT(bio->bi_vcnt == 1);
7908         inode = bio->bi_io_vec->bv_page->mapping->host;
7909         ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7910
7911         done->uptodate = 1;
7912         bio_for_each_segment_all(bvec, bio, i)
7913         clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0);
7914 end:
7915         complete(&done->done);
7916         bio_put(bio);
7917 }
7918
7919 static int __btrfs_correct_data_nocsum(struct inode *inode,
7920                                        struct btrfs_io_bio *io_bio)
7921 {
7922         struct btrfs_fs_info *fs_info;
7923         struct bio_vec *bvec;
7924         struct btrfs_retry_complete done;
7925         u64 start;
7926         unsigned int pgoff;
7927         u32 sectorsize;
7928         int nr_sectors;
7929         int i;
7930         int ret;
7931
7932         fs_info = BTRFS_I(inode)->root->fs_info;
7933         sectorsize = fs_info->sectorsize;
7934
7935         start = io_bio->logical;
7936         done.inode = inode;
7937
7938         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7939                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
7940                 pgoff = bvec->bv_offset;
7941
7942 next_block_or_try_again:
7943                 done.uptodate = 0;
7944                 done.start = start;
7945                 init_completion(&done.done);
7946
7947                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
7948                                 pgoff, start, start + sectorsize - 1,
7949                                 io_bio->mirror_num,
7950                                 btrfs_retry_endio_nocsum, &done);
7951                 if (ret)
7952                         return ret;
7953
7954                 wait_for_completion(&done.done);
7955
7956                 if (!done.uptodate) {
7957                         /* We might have another mirror, so try again */
7958                         goto next_block_or_try_again;
7959                 }
7960
7961                 start += sectorsize;
7962
7963                 if (nr_sectors--) {
7964                         pgoff += sectorsize;
7965                         goto next_block_or_try_again;
7966                 }
7967         }
7968
7969         return 0;
7970 }
7971
7972 static void btrfs_retry_endio(struct bio *bio)
7973 {
7974         struct btrfs_retry_complete *done = bio->bi_private;
7975         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7976         struct inode *inode;
7977         struct bio_vec *bvec;
7978         u64 start;
7979         int uptodate;
7980         int ret;
7981         int i;
7982
7983         if (bio->bi_error)
7984                 goto end;
7985
7986         uptodate = 1;
7987
7988         start = done->start;
7989
7990         ASSERT(bio->bi_vcnt == 1);
7991         inode = bio->bi_io_vec->bv_page->mapping->host;
7992         ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7993
7994         bio_for_each_segment_all(bvec, bio, i) {
7995                 ret = __readpage_endio_check(done->inode, io_bio, i,
7996                                         bvec->bv_page, bvec->bv_offset,
7997                                         done->start, bvec->bv_len);
7998                 if (!ret)
7999                         clean_io_failure(BTRFS_I(done->inode), done->start,
8000                                         bvec->bv_page, bvec->bv_offset);
8001                 else
8002                         uptodate = 0;
8003         }
8004
8005         done->uptodate = uptodate;
8006 end:
8007         complete(&done->done);
8008         bio_put(bio);
8009 }
8010
8011 static int __btrfs_subio_endio_read(struct inode *inode,
8012                                     struct btrfs_io_bio *io_bio, int err)
8013 {
8014         struct btrfs_fs_info *fs_info;
8015         struct bio_vec *bvec;
8016         struct btrfs_retry_complete done;
8017         u64 start;
8018         u64 offset = 0;
8019         u32 sectorsize;
8020         int nr_sectors;
8021         unsigned int pgoff;
8022         int csum_pos;
8023         int i;
8024         int ret;
8025
8026         fs_info = BTRFS_I(inode)->root->fs_info;
8027         sectorsize = fs_info->sectorsize;
8028
8029         err = 0;
8030         start = io_bio->logical;
8031         done.inode = inode;
8032
8033         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
8034                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
8035
8036                 pgoff = bvec->bv_offset;
8037 next_block:
8038                 csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8039                 ret = __readpage_endio_check(inode, io_bio, csum_pos,
8040                                         bvec->bv_page, pgoff, start,
8041                                         sectorsize);
8042                 if (likely(!ret))
8043                         goto next;
8044 try_again:
8045                 done.uptodate = 0;
8046                 done.start = start;
8047                 init_completion(&done.done);
8048
8049                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
8050                                 pgoff, start, start + sectorsize - 1,
8051                                 io_bio->mirror_num,
8052                                 btrfs_retry_endio, &done);
8053                 if (ret) {
8054                         err = ret;
8055                         goto next;
8056                 }
8057
8058                 wait_for_completion(&done.done);
8059
8060                 if (!done.uptodate) {
8061                         /* We might have another mirror, so try again */
8062                         goto try_again;
8063                 }
8064 next:
8065                 offset += sectorsize;
8066                 start += sectorsize;
8067
8068                 ASSERT(nr_sectors);
8069
8070                 if (--nr_sectors) {
8071                         pgoff += sectorsize;
8072                         goto next_block;
8073                 }
8074         }
8075
8076         return err;
8077 }
8078
8079 static int btrfs_subio_endio_read(struct inode *inode,
8080                                   struct btrfs_io_bio *io_bio, int err)
8081 {
8082         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8083
8084         if (skip_csum) {
8085                 if (unlikely(err))
8086                         return __btrfs_correct_data_nocsum(inode, io_bio);
8087                 else
8088                         return 0;
8089         } else {
8090                 return __btrfs_subio_endio_read(inode, io_bio, err);
8091         }
8092 }
8093
8094 static void btrfs_endio_direct_read(struct bio *bio)
8095 {
8096         struct btrfs_dio_private *dip = bio->bi_private;
8097         struct inode *inode = dip->inode;
8098         struct bio *dio_bio;
8099         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8100         int err = bio->bi_error;
8101
8102         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8103                 err = btrfs_subio_endio_read(inode, io_bio, err);
8104
8105         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8106                       dip->logical_offset + dip->bytes - 1);
8107         dio_bio = dip->dio_bio;
8108
8109         kfree(dip);
8110
8111         dio_bio->bi_error = bio->bi_error;
8112         dio_end_io(dio_bio, bio->bi_error);
8113
8114         if (io_bio->end_io)
8115                 io_bio->end_io(io_bio, err);
8116         bio_put(bio);
8117 }
8118
8119 static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
8120                                                     const u64 offset,
8121                                                     const u64 bytes,
8122                                                     const int uptodate)
8123 {
8124         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8125         struct btrfs_ordered_extent *ordered = NULL;
8126         u64 ordered_offset = offset;
8127         u64 ordered_bytes = bytes;
8128         int ret;
8129
8130 again:
8131         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8132                                                    &ordered_offset,
8133                                                    ordered_bytes,
8134                                                    uptodate);
8135         if (!ret)
8136                 goto out_test;
8137
8138         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
8139                         finish_ordered_fn, NULL, NULL);
8140         btrfs_queue_work(fs_info->endio_write_workers, &ordered->work);
8141 out_test:
8142         /*
8143          * our bio might span multiple ordered extents.  If we haven't
8144          * completed the accounting for the whole dio, go back and try again
8145          */
8146         if (ordered_offset < offset + bytes) {
8147                 ordered_bytes = offset + bytes - ordered_offset;
8148                 ordered = NULL;
8149                 goto again;
8150         }
8151 }
8152
8153 static void btrfs_endio_direct_write(struct bio *bio)
8154 {
8155         struct btrfs_dio_private *dip = bio->bi_private;
8156         struct bio *dio_bio = dip->dio_bio;
8157
8158         btrfs_endio_direct_write_update_ordered(dip->inode,
8159                                                 dip->logical_offset,
8160                                                 dip->bytes,
8161                                                 !bio->bi_error);
8162
8163         kfree(dip);
8164
8165         dio_bio->bi_error = bio->bi_error;
8166         dio_end_io(dio_bio, bio->bi_error);
8167         bio_put(bio);
8168 }
8169
8170 static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
8171                                     struct bio *bio, int mirror_num,
8172                                     unsigned long bio_flags, u64 offset)
8173 {
8174         int ret;
8175         ret = btrfs_csum_one_bio(inode, bio, offset, 1);
8176         BUG_ON(ret); /* -ENOMEM */
8177         return 0;
8178 }
8179
8180 static void btrfs_end_dio_bio(struct bio *bio)
8181 {
8182         struct btrfs_dio_private *dip = bio->bi_private;
8183         int err = bio->bi_error;
8184
8185         if (err)
8186                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8187                            "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8188                            btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8189                            bio->bi_opf,
8190                            (unsigned long long)bio->bi_iter.bi_sector,
8191                            bio->bi_iter.bi_size, err);
8192
8193         if (dip->subio_endio)
8194                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8195
8196         if (err) {
8197                 dip->errors = 1;
8198
8199                 /*
8200                  * before atomic variable goto zero, we must make sure
8201                  * dip->errors is perceived to be set.
8202                  */
8203                 smp_mb__before_atomic();
8204         }
8205
8206         /* if there are more bios still pending for this dio, just exit */
8207         if (!atomic_dec_and_test(&dip->pending_bios))
8208                 goto out;
8209
8210         if (dip->errors) {
8211                 bio_io_error(dip->orig_bio);
8212         } else {
8213                 dip->dio_bio->bi_error = 0;
8214                 bio_endio(dip->orig_bio);
8215         }
8216 out:
8217         bio_put(bio);
8218 }
8219
8220 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8221                                        u64 first_sector, gfp_t gfp_flags)
8222 {
8223         struct bio *bio;
8224         bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8225         if (bio)
8226                 bio_associate_current(bio);
8227         return bio;
8228 }
8229
8230 static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
8231                                                  struct btrfs_dio_private *dip,
8232                                                  struct bio *bio,
8233                                                  u64 file_offset)
8234 {
8235         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8236         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8237         int ret;
8238
8239         /*
8240          * We load all the csum data we need when we submit
8241          * the first bio to reduce the csum tree search and
8242          * contention.
8243          */
8244         if (dip->logical_offset == file_offset) {
8245                 ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
8246                                                 file_offset);
8247                 if (ret)
8248                         return ret;
8249         }
8250
8251         if (bio == dip->orig_bio)
8252                 return 0;
8253
8254         file_offset -= dip->logical_offset;
8255         file_offset >>= inode->i_sb->s_blocksize_bits;
8256         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8257
8258         return 0;
8259 }
8260
8261 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8262                                          u64 file_offset, int skip_sum,
8263                                          int async_submit)
8264 {
8265         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8266         struct btrfs_dio_private *dip = bio->bi_private;
8267         bool write = bio_op(bio) == REQ_OP_WRITE;
8268         int ret;
8269
8270         if (async_submit)
8271                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8272
8273         bio_get(bio);
8274
8275         if (!write) {
8276                 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8277                 if (ret)
8278                         goto err;
8279         }
8280
8281         if (skip_sum)
8282                 goto map;
8283
8284         if (write && async_submit) {
8285                 ret = btrfs_wq_submit_bio(fs_info, inode, bio, 0, 0,
8286                                           file_offset,
8287                                           __btrfs_submit_bio_start_direct_io,
8288                                           __btrfs_submit_bio_done);
8289                 goto err;
8290         } else if (write) {
8291                 /*
8292                  * If we aren't doing async submit, calculate the csum of the
8293                  * bio now.
8294                  */
8295                 ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
8296                 if (ret)
8297                         goto err;
8298         } else {
8299                 ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
8300                                                      file_offset);
8301                 if (ret)
8302                         goto err;
8303         }
8304 map:
8305         ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
8306 err:
8307         bio_put(bio);
8308         return ret;
8309 }
8310
8311 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
8312                                     int skip_sum)
8313 {
8314         struct inode *inode = dip->inode;
8315         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8316         struct btrfs_root *root = BTRFS_I(inode)->root;
8317         struct bio *bio;
8318         struct bio *orig_bio = dip->orig_bio;
8319         struct bio_vec *bvec;
8320         u64 start_sector = orig_bio->bi_iter.bi_sector;
8321         u64 file_offset = dip->logical_offset;
8322         u64 submit_len = 0;
8323         u64 map_length;
8324         u32 blocksize = fs_info->sectorsize;
8325         int async_submit = 0;
8326         int nr_sectors;
8327         int ret;
8328         int i, j;
8329
8330         map_length = orig_bio->bi_iter.bi_size;
8331         ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
8332                               &map_length, NULL, 0);
8333         if (ret)
8334                 return -EIO;
8335
8336         if (map_length >= orig_bio->bi_iter.bi_size) {
8337                 bio = orig_bio;
8338                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8339                 goto submit;
8340         }
8341
8342         /* async crcs make it difficult to collect full stripe writes. */
8343         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8344                 async_submit = 0;
8345         else
8346                 async_submit = 1;
8347
8348         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8349         if (!bio)
8350                 return -ENOMEM;
8351
8352         bio->bi_opf = orig_bio->bi_opf;
8353         bio->bi_private = dip;
8354         bio->bi_end_io = btrfs_end_dio_bio;
8355         btrfs_io_bio(bio)->logical = file_offset;
8356         atomic_inc(&dip->pending_bios);
8357
8358         bio_for_each_segment_all(bvec, orig_bio, j) {
8359                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
8360                 i = 0;
8361 next_block:
8362                 if (unlikely(map_length < submit_len + blocksize ||
8363                     bio_add_page(bio, bvec->bv_page, blocksize,
8364                             bvec->bv_offset + (i * blocksize)) < blocksize)) {
8365                         /*
8366                          * inc the count before we submit the bio so
8367                          * we know the end IO handler won't happen before
8368                          * we inc the count. Otherwise, the dip might get freed
8369                          * before we're done setting it up
8370                          */
8371                         atomic_inc(&dip->pending_bios);
8372                         ret = __btrfs_submit_dio_bio(bio, inode,
8373                                                      file_offset, skip_sum,
8374                                                      async_submit);
8375                         if (ret) {
8376                                 bio_put(bio);
8377                                 atomic_dec(&dip->pending_bios);
8378                                 goto out_err;
8379                         }
8380
8381                         start_sector += submit_len >> 9;
8382                         file_offset += submit_len;
8383
8384                         submit_len = 0;
8385
8386                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8387                                                   start_sector, GFP_NOFS);
8388                         if (!bio)
8389                                 goto out_err;
8390                         bio->bi_opf = orig_bio->bi_opf;
8391                         bio->bi_private = dip;
8392                         bio->bi_end_io = btrfs_end_dio_bio;
8393                         btrfs_io_bio(bio)->logical = file_offset;
8394
8395                         map_length = orig_bio->bi_iter.bi_size;
8396                         ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
8397                                               start_sector << 9,
8398                                               &map_length, NULL, 0);
8399                         if (ret) {
8400                                 bio_put(bio);
8401                                 goto out_err;
8402                         }
8403
8404                         goto next_block;
8405                 } else {
8406                         submit_len += blocksize;
8407                         if (--nr_sectors) {
8408                                 i++;
8409                                 goto next_block;
8410                         }
8411                 }
8412         }
8413
8414 submit:
8415         ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
8416                                      async_submit);
8417         if (!ret)
8418                 return 0;
8419
8420         bio_put(bio);
8421 out_err:
8422         dip->errors = 1;
8423         /*
8424          * before atomic variable goto zero, we must
8425          * make sure dip->errors is perceived to be set.
8426          */
8427         smp_mb__before_atomic();
8428         if (atomic_dec_and_test(&dip->pending_bios))
8429                 bio_io_error(dip->orig_bio);
8430
8431         /* bio_end_io() will handle error, so we needn't return it */
8432         return 0;
8433 }
8434
8435 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
8436                                 loff_t file_offset)
8437 {
8438         struct btrfs_dio_private *dip = NULL;
8439         struct bio *io_bio = NULL;
8440         struct btrfs_io_bio *btrfs_bio;
8441         int skip_sum;
8442         bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
8443         int ret = 0;
8444
8445         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8446
8447         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8448         if (!io_bio) {
8449                 ret = -ENOMEM;
8450                 goto free_ordered;
8451         }
8452
8453         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8454         if (!dip) {
8455                 ret = -ENOMEM;
8456                 goto free_ordered;
8457         }
8458
8459         dip->private = dio_bio->bi_private;
8460         dip->inode = inode;
8461         dip->logical_offset = file_offset;
8462         dip->bytes = dio_bio->bi_iter.bi_size;
8463         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8464         io_bio->bi_private = dip;
8465         dip->orig_bio = io_bio;
8466         dip->dio_bio = dio_bio;
8467         atomic_set(&dip->pending_bios, 0);
8468         btrfs_bio = btrfs_io_bio(io_bio);
8469         btrfs_bio->logical = file_offset;
8470
8471         if (write) {
8472                 io_bio->bi_end_io = btrfs_endio_direct_write;
8473         } else {
8474                 io_bio->bi_end_io = btrfs_endio_direct_read;
8475                 dip->subio_endio = btrfs_subio_endio_read;
8476         }
8477
8478         /*
8479          * Reset the range for unsubmitted ordered extents (to a 0 length range)
8480          * even if we fail to submit a bio, because in such case we do the
8481          * corresponding error handling below and it must not be done a second
8482          * time by btrfs_direct_IO().
8483          */
8484         if (write) {
8485                 struct btrfs_dio_data *dio_data = current->journal_info;
8486
8487                 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8488                         dip->bytes;
8489                 dio_data->unsubmitted_oe_range_start =
8490                         dio_data->unsubmitted_oe_range_end;
8491         }
8492
8493         ret = btrfs_submit_direct_hook(dip, skip_sum);
8494         if (!ret)
8495                 return;
8496
8497         if (btrfs_bio->end_io)
8498                 btrfs_bio->end_io(btrfs_bio, ret);
8499
8500 free_ordered:
8501         /*
8502          * If we arrived here it means either we failed to submit the dip
8503          * or we either failed to clone the dio_bio or failed to allocate the
8504          * dip. If we cloned the dio_bio and allocated the dip, we can just
8505          * call bio_endio against our io_bio so that we get proper resource
8506          * cleanup if we fail to submit the dip, otherwise, we must do the
8507          * same as btrfs_endio_direct_[write|read] because we can't call these
8508          * callbacks - they require an allocated dip and a clone of dio_bio.
8509          */
8510         if (io_bio && dip) {
8511                 io_bio->bi_error = -EIO;
8512                 bio_endio(io_bio);
8513                 /*
8514                  * The end io callbacks free our dip, do the final put on io_bio
8515                  * and all the cleanup and final put for dio_bio (through
8516                  * dio_end_io()).
8517                  */
8518                 dip = NULL;
8519                 io_bio = NULL;
8520         } else {
8521                 if (write)
8522                         btrfs_endio_direct_write_update_ordered(inode,
8523                                                 file_offset,
8524                                                 dio_bio->bi_iter.bi_size,
8525                                                 0);
8526                 else
8527                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8528                               file_offset + dio_bio->bi_iter.bi_size - 1);
8529
8530                 dio_bio->bi_error = -EIO;
8531                 /*
8532                  * Releases and cleans up our dio_bio, no need to bio_put()
8533                  * nor bio_endio()/bio_io_error() against dio_bio.
8534                  */
8535                 dio_end_io(dio_bio, ret);
8536         }
8537         if (io_bio)
8538                 bio_put(io_bio);
8539         kfree(dip);
8540 }
8541
8542 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
8543                                struct kiocb *iocb,
8544                                const struct iov_iter *iter, loff_t offset)
8545 {
8546         int seg;
8547         int i;
8548         unsigned int blocksize_mask = fs_info->sectorsize - 1;
8549         ssize_t retval = -EINVAL;
8550
8551         if (offset & blocksize_mask)
8552                 goto out;
8553
8554         if (iov_iter_alignment(iter) & blocksize_mask)
8555                 goto out;
8556
8557         /* If this is a write we don't need to check anymore */
8558         if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
8559                 return 0;
8560         /*
8561          * Check to make sure we don't have duplicate iov_base's in this
8562          * iovec, if so return EINVAL, otherwise we'll get csum errors
8563          * when reading back.
8564          */
8565         for (seg = 0; seg < iter->nr_segs; seg++) {
8566                 for (i = seg + 1; i < iter->nr_segs; i++) {
8567                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8568                                 goto out;
8569                 }
8570         }
8571         retval = 0;
8572 out:
8573         return retval;
8574 }
8575
8576 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8577 {
8578         struct file *file = iocb->ki_filp;
8579         struct inode *inode = file->f_mapping->host;
8580         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8581         struct btrfs_dio_data dio_data = { 0 };
8582         loff_t offset = iocb->ki_pos;
8583         size_t count = 0;
8584         int flags = 0;
8585         bool wakeup = true;
8586         bool relock = false;
8587         ssize_t ret;
8588
8589         if (check_direct_IO(fs_info, iocb, iter, offset))
8590                 return 0;
8591
8592         inode_dio_begin(inode);
8593         smp_mb__after_atomic();
8594
8595         /*
8596          * The generic stuff only does filemap_write_and_wait_range, which
8597          * isn't enough if we've written compressed pages to this area, so
8598          * we need to flush the dirty pages again to make absolutely sure
8599          * that any outstanding dirty pages are on disk.
8600          */
8601         count = iov_iter_count(iter);
8602         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8603                      &BTRFS_I(inode)->runtime_flags))
8604                 filemap_fdatawrite_range(inode->i_mapping, offset,
8605                                          offset + count - 1);
8606
8607         if (iov_iter_rw(iter) == WRITE) {
8608                 /*
8609                  * If the write DIO is beyond the EOF, we need update
8610                  * the isize, but it is protected by i_mutex. So we can
8611                  * not unlock the i_mutex at this case.
8612                  */
8613                 if (offset + count <= inode->i_size) {
8614                         dio_data.overwrite = 1;
8615                         inode_unlock(inode);
8616                         relock = true;
8617                 }
8618                 ret = btrfs_delalloc_reserve_space(inode, offset, count);
8619                 if (ret)
8620                         goto out;
8621                 dio_data.outstanding_extents = count_max_extents(count);
8622
8623                 /*
8624                  * We need to know how many extents we reserved so that we can
8625                  * do the accounting properly if we go over the number we
8626                  * originally calculated.  Abuse current->journal_info for this.
8627                  */
8628                 dio_data.reserve = round_up(count,
8629                                             fs_info->sectorsize);
8630                 dio_data.unsubmitted_oe_range_start = (u64)offset;
8631                 dio_data.unsubmitted_oe_range_end = (u64)offset;
8632                 current->journal_info = &dio_data;
8633                 down_read(&BTRFS_I(inode)->dio_sem);
8634         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8635                                      &BTRFS_I(inode)->runtime_flags)) {
8636                 inode_dio_end(inode);
8637                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8638                 wakeup = false;
8639         }
8640
8641         ret = __blockdev_direct_IO(iocb, inode,
8642                                    fs_info->fs_devices->latest_bdev,
8643                                    iter, btrfs_get_blocks_direct, NULL,
8644                                    btrfs_submit_direct, flags);
8645         if (iov_iter_rw(iter) == WRITE) {
8646                 up_read(&BTRFS_I(inode)->dio_sem);
8647                 current->journal_info = NULL;
8648                 if (ret < 0 && ret != -EIOCBQUEUED) {
8649                         if (dio_data.reserve)
8650                                 btrfs_delalloc_release_space(inode, offset,
8651                                                              dio_data.reserve);
8652                         /*
8653                          * On error we might have left some ordered extents
8654                          * without submitting corresponding bios for them, so
8655                          * cleanup them up to avoid other tasks getting them
8656                          * and waiting for them to complete forever.
8657                          */
8658                         if (dio_data.unsubmitted_oe_range_start <
8659                             dio_data.unsubmitted_oe_range_end)
8660                                 btrfs_endio_direct_write_update_ordered(inode,
8661                                         dio_data.unsubmitted_oe_range_start,
8662                                         dio_data.unsubmitted_oe_range_end -
8663                                         dio_data.unsubmitted_oe_range_start,
8664                                         0);
8665                 } else if (ret >= 0 && (size_t)ret < count)
8666                         btrfs_delalloc_release_space(inode, offset,
8667                                                      count - (size_t)ret);
8668         }
8669 out:
8670         if (wakeup)
8671                 inode_dio_end(inode);
8672         if (relock)
8673                 inode_lock(inode);
8674
8675         return ret;
8676 }
8677
8678 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8679
8680 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8681                 __u64 start, __u64 len)
8682 {
8683         int     ret;
8684
8685         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8686         if (ret)
8687                 return ret;
8688
8689         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8690 }
8691
8692 int btrfs_readpage(struct file *file, struct page *page)
8693 {
8694         struct extent_io_tree *tree;
8695         tree = &BTRFS_I(page->mapping->host)->io_tree;
8696         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8697 }
8698
8699 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8700 {
8701         struct extent_io_tree *tree;
8702         struct inode *inode = page->mapping->host;
8703         int ret;
8704
8705         if (current->flags & PF_MEMALLOC) {
8706                 redirty_page_for_writepage(wbc, page);
8707                 unlock_page(page);
8708                 return 0;
8709         }
8710
8711         /*
8712          * If we are under memory pressure we will call this directly from the
8713          * VM, we need to make sure we have the inode referenced for the ordered
8714          * extent.  If not just return like we didn't do anything.
8715          */
8716         if (!igrab(inode)) {
8717                 redirty_page_for_writepage(wbc, page);
8718                 return AOP_WRITEPAGE_ACTIVATE;
8719         }
8720         tree = &BTRFS_I(page->mapping->host)->io_tree;
8721         ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8722         btrfs_add_delayed_iput(inode);
8723         return ret;
8724 }
8725
8726 static int btrfs_writepages(struct address_space *mapping,
8727                             struct writeback_control *wbc)
8728 {
8729         struct extent_io_tree *tree;
8730
8731         tree = &BTRFS_I(mapping->host)->io_tree;
8732         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8733 }
8734
8735 static int
8736 btrfs_readpages(struct file *file, struct address_space *mapping,
8737                 struct list_head *pages, unsigned nr_pages)
8738 {
8739         struct extent_io_tree *tree;
8740         tree = &BTRFS_I(mapping->host)->io_tree;
8741         return extent_readpages(tree, mapping, pages, nr_pages,
8742                                 btrfs_get_extent);
8743 }
8744 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8745 {
8746         struct extent_io_tree *tree;
8747         struct extent_map_tree *map;
8748         int ret;
8749
8750         tree = &BTRFS_I(page->mapping->host)->io_tree;
8751         map = &BTRFS_I(page->mapping->host)->extent_tree;
8752         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8753         if (ret == 1) {
8754                 ClearPagePrivate(page);
8755                 set_page_private(page, 0);
8756                 put_page(page);
8757         }
8758         return ret;
8759 }
8760
8761 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8762 {
8763         if (PageWriteback(page) || PageDirty(page))
8764                 return 0;
8765         return __btrfs_releasepage(page, gfp_flags);
8766 }
8767
8768 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8769                                  unsigned int length)
8770 {
8771         struct inode *inode = page->mapping->host;
8772         struct extent_io_tree *tree;
8773         struct btrfs_ordered_extent *ordered;
8774         struct extent_state *cached_state = NULL;
8775         u64 page_start = page_offset(page);
8776         u64 page_end = page_start + PAGE_SIZE - 1;
8777         u64 start;
8778         u64 end;
8779         int inode_evicting = inode->i_state & I_FREEING;
8780
8781         /*
8782          * we have the page locked, so new writeback can't start,
8783          * and the dirty bit won't be cleared while we are here.
8784          *
8785          * Wait for IO on this page so that we can safely clear
8786          * the PagePrivate2 bit and do ordered accounting
8787          */
8788         wait_on_page_writeback(page);
8789
8790         tree = &BTRFS_I(inode)->io_tree;
8791         if (offset) {
8792                 btrfs_releasepage(page, GFP_NOFS);
8793                 return;
8794         }
8795
8796         if (!inode_evicting)
8797                 lock_extent_bits(tree, page_start, page_end, &cached_state);
8798 again:
8799         start = page_start;
8800         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
8801                                         page_end - start + 1);
8802         if (ordered) {
8803                 end = min(page_end, ordered->file_offset + ordered->len - 1);
8804                 /*
8805                  * IO on this page will never be started, so we need
8806                  * to account for any ordered extents now
8807                  */
8808                 if (!inode_evicting)
8809                         clear_extent_bit(tree, start, end,
8810                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8811                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8812                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8813                                          GFP_NOFS);
8814                 /*
8815                  * whoever cleared the private bit is responsible
8816                  * for the finish_ordered_io
8817                  */
8818                 if (TestClearPagePrivate2(page)) {
8819                         struct btrfs_ordered_inode_tree *tree;
8820                         u64 new_len;
8821
8822                         tree = &BTRFS_I(inode)->ordered_tree;
8823
8824                         spin_lock_irq(&tree->lock);
8825                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8826                         new_len = start - ordered->file_offset;
8827                         if (new_len < ordered->truncated_len)
8828                                 ordered->truncated_len = new_len;
8829                         spin_unlock_irq(&tree->lock);
8830
8831                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8832                                                            start,
8833                                                            end - start + 1, 1))
8834                                 btrfs_finish_ordered_io(ordered);
8835                 }
8836                 btrfs_put_ordered_extent(ordered);
8837                 if (!inode_evicting) {
8838                         cached_state = NULL;
8839                         lock_extent_bits(tree, start, end,
8840                                          &cached_state);
8841                 }
8842
8843                 start = end + 1;
8844                 if (start < page_end)
8845                         goto again;
8846         }
8847
8848         /*
8849          * Qgroup reserved space handler
8850          * Page here will be either
8851          * 1) Already written to disk
8852          *    In this case, its reserved space is released from data rsv map
8853          *    and will be freed by delayed_ref handler finally.
8854          *    So even we call qgroup_free_data(), it won't decrease reserved
8855          *    space.
8856          * 2) Not written to disk
8857          *    This means the reserved space should be freed here. However,
8858          *    if a truncate invalidates the page (by clearing PageDirty)
8859          *    and the page is accounted for while allocating extent
8860          *    in btrfs_check_data_free_space() we let delayed_ref to
8861          *    free the entire extent.
8862          */
8863         if (PageDirty(page))
8864                 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8865         if (!inode_evicting) {
8866                 clear_extent_bit(tree, page_start, page_end,
8867                                  EXTENT_LOCKED | EXTENT_DIRTY |
8868                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8869                                  EXTENT_DEFRAG, 1, 1,
8870                                  &cached_state, GFP_NOFS);
8871
8872                 __btrfs_releasepage(page, GFP_NOFS);
8873         }
8874
8875         ClearPageChecked(page);
8876         if (PagePrivate(page)) {
8877                 ClearPagePrivate(page);
8878                 set_page_private(page, 0);
8879                 put_page(page);
8880         }
8881 }
8882
8883 /*
8884  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8885  * called from a page fault handler when a page is first dirtied. Hence we must
8886  * be careful to check for EOF conditions here. We set the page up correctly
8887  * for a written page which means we get ENOSPC checking when writing into
8888  * holes and correct delalloc and unwritten extent mapping on filesystems that
8889  * support these features.
8890  *
8891  * We are not allowed to take the i_mutex here so we have to play games to
8892  * protect against truncate races as the page could now be beyond EOF.  Because
8893  * vmtruncate() writes the inode size before removing pages, once we have the
8894  * page lock we can determine safely if the page is beyond EOF. If it is not
8895  * beyond EOF, then the page is guaranteed safe against truncation until we
8896  * unlock the page.
8897  */
8898 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8899 {
8900         struct page *page = vmf->page;
8901         struct inode *inode = file_inode(vma->vm_file);
8902         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8903         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8904         struct btrfs_ordered_extent *ordered;
8905         struct extent_state *cached_state = NULL;
8906         char *kaddr;
8907         unsigned long zero_start;
8908         loff_t size;
8909         int ret;
8910         int reserved = 0;
8911         u64 reserved_space;
8912         u64 page_start;
8913         u64 page_end;
8914         u64 end;
8915
8916         reserved_space = PAGE_SIZE;
8917
8918         sb_start_pagefault(inode->i_sb);
8919         page_start = page_offset(page);
8920         page_end = page_start + PAGE_SIZE - 1;
8921         end = page_end;
8922
8923         /*
8924          * Reserving delalloc space after obtaining the page lock can lead to
8925          * deadlock. For example, if a dirty page is locked by this function
8926          * and the call to btrfs_delalloc_reserve_space() ends up triggering
8927          * dirty page write out, then the btrfs_writepage() function could
8928          * end up waiting indefinitely to get a lock on the page currently
8929          * being processed by btrfs_page_mkwrite() function.
8930          */
8931         ret = btrfs_delalloc_reserve_space(inode, page_start,
8932                                            reserved_space);
8933         if (!ret) {
8934                 ret = file_update_time(vma->vm_file);
8935                 reserved = 1;
8936         }
8937         if (ret) {
8938                 if (ret == -ENOMEM)
8939                         ret = VM_FAULT_OOM;
8940                 else /* -ENOSPC, -EIO, etc */
8941                         ret = VM_FAULT_SIGBUS;
8942                 if (reserved)
8943                         goto out;
8944                 goto out_noreserve;
8945         }
8946
8947         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8948 again:
8949         lock_page(page);
8950         size = i_size_read(inode);
8951
8952         if ((page->mapping != inode->i_mapping) ||
8953             (page_start >= size)) {
8954                 /* page got truncated out from underneath us */
8955                 goto out_unlock;
8956         }
8957         wait_on_page_writeback(page);
8958
8959         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8960         set_page_extent_mapped(page);
8961
8962         /*
8963          * we can't set the delalloc bits if there are pending ordered
8964          * extents.  Drop our locks and wait for them to finish
8965          */
8966         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8967                         PAGE_SIZE);
8968         if (ordered) {
8969                 unlock_extent_cached(io_tree, page_start, page_end,
8970                                      &cached_state, GFP_NOFS);
8971                 unlock_page(page);
8972                 btrfs_start_ordered_extent(inode, ordered, 1);
8973                 btrfs_put_ordered_extent(ordered);
8974                 goto again;
8975         }
8976
8977         if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8978                 reserved_space = round_up(size - page_start,
8979                                           fs_info->sectorsize);
8980                 if (reserved_space < PAGE_SIZE) {
8981                         end = page_start + reserved_space - 1;
8982                         spin_lock(&BTRFS_I(inode)->lock);
8983                         BTRFS_I(inode)->outstanding_extents++;
8984                         spin_unlock(&BTRFS_I(inode)->lock);
8985                         btrfs_delalloc_release_space(inode, page_start,
8986                                                 PAGE_SIZE - reserved_space);
8987                 }
8988         }
8989
8990         /*
8991          * page_mkwrite gets called when the page is firstly dirtied after it's
8992          * faulted in, but write(2) could also dirty a page and set delalloc
8993          * bits, thus in this case for space account reason, we still need to
8994          * clear any delalloc bits within this page range since we have to
8995          * reserve data&meta space before lock_page() (see above comments).
8996          */
8997         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8998                           EXTENT_DIRTY | EXTENT_DELALLOC |
8999                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
9000                           0, 0, &cached_state, GFP_NOFS);
9001
9002         ret = btrfs_set_extent_delalloc(inode, page_start, end,
9003                                         &cached_state, 0);
9004         if (ret) {
9005                 unlock_extent_cached(io_tree, page_start, page_end,
9006                                      &cached_state, GFP_NOFS);
9007                 ret = VM_FAULT_SIGBUS;
9008                 goto out_unlock;
9009         }
9010         ret = 0;
9011
9012         /* page is wholly or partially inside EOF */
9013         if (page_start + PAGE_SIZE > size)
9014                 zero_start = size & ~PAGE_MASK;
9015         else
9016                 zero_start = PAGE_SIZE;
9017
9018         if (zero_start != PAGE_SIZE) {
9019                 kaddr = kmap(page);
9020                 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
9021                 flush_dcache_page(page);
9022                 kunmap(page);
9023         }
9024         ClearPageChecked(page);
9025         set_page_dirty(page);
9026         SetPageUptodate(page);
9027
9028         BTRFS_I(inode)->last_trans = fs_info->generation;
9029         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
9030         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
9031
9032         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
9033
9034 out_unlock:
9035         if (!ret) {
9036                 sb_end_pagefault(inode->i_sb);
9037                 return VM_FAULT_LOCKED;
9038         }
9039         unlock_page(page);
9040 out:
9041         btrfs_delalloc_release_space(inode, page_start, reserved_space);
9042 out_noreserve:
9043         sb_end_pagefault(inode->i_sb);
9044         return ret;
9045 }
9046
9047 static int btrfs_truncate(struct inode *inode)
9048 {
9049         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9050         struct btrfs_root *root = BTRFS_I(inode)->root;
9051         struct btrfs_block_rsv *rsv;
9052         int ret = 0;
9053         int err = 0;
9054         struct btrfs_trans_handle *trans;
9055         u64 mask = fs_info->sectorsize - 1;
9056         u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
9057
9058         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
9059                                        (u64)-1);
9060         if (ret)
9061                 return ret;
9062
9063         /*
9064          * Yes ladies and gentlemen, this is indeed ugly.  The fact is we have
9065          * 3 things going on here
9066          *
9067          * 1) We need to reserve space for our orphan item and the space to
9068          * delete our orphan item.  Lord knows we don't want to have a dangling
9069          * orphan item because we didn't reserve space to remove it.
9070          *
9071          * 2) We need to reserve space to update our inode.
9072          *
9073          * 3) We need to have something to cache all the space that is going to
9074          * be free'd up by the truncate operation, but also have some slack
9075          * space reserved in case it uses space during the truncate (thank you
9076          * very much snapshotting).
9077          *
9078          * And we need these to all be separate.  The fact is we can use a lot of
9079          * space doing the truncate, and we have no earthly idea how much space
9080          * we will use, so we need the truncate reservation to be separate so it
9081          * doesn't end up using space reserved for updating the inode or
9082          * removing the orphan item.  We also need to be able to stop the
9083          * transaction and start a new one, which means we need to be able to
9084          * update the inode several times, and we have no idea of knowing how
9085          * many times that will be, so we can't just reserve 1 item for the
9086          * entirety of the operation, so that has to be done separately as well.
9087          * Then there is the orphan item, which does indeed need to be held on
9088          * to for the whole operation, and we need nobody to touch this reserved
9089          * space except the orphan code.
9090          *
9091          * So that leaves us with
9092          *
9093          * 1) root->orphan_block_rsv - for the orphan deletion.
9094          * 2) rsv - for the truncate reservation, which we will steal from the
9095          * transaction reservation.
9096          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
9097          * updating the inode.
9098          */
9099         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
9100         if (!rsv)
9101                 return -ENOMEM;
9102         rsv->size = min_size;
9103         rsv->failfast = 1;
9104
9105         /*
9106          * 1 for the truncate slack space
9107          * 1 for updating the inode.
9108          */
9109         trans = btrfs_start_transaction(root, 2);
9110         if (IS_ERR(trans)) {
9111                 err = PTR_ERR(trans);
9112                 goto out;
9113         }
9114
9115         /* Migrate the slack space for the truncate to our reserve */
9116         ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
9117                                       min_size, 0);
9118         BUG_ON(ret);
9119
9120         /*
9121          * So if we truncate and then write and fsync we normally would just
9122          * write the extents that changed, which is a problem if we need to
9123          * first truncate that entire inode.  So set this flag so we write out
9124          * all of the extents in the inode to the sync log so we're completely
9125          * safe.
9126          */
9127         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9128         trans->block_rsv = rsv;
9129
9130         while (1) {
9131                 ret = btrfs_truncate_inode_items(trans, root, inode,
9132                                                  inode->i_size,
9133                                                  BTRFS_EXTENT_DATA_KEY);
9134                 if (ret != -ENOSPC && ret != -EAGAIN) {
9135                         err = ret;
9136                         break;
9137                 }
9138
9139                 trans->block_rsv = &fs_info->trans_block_rsv;
9140                 ret = btrfs_update_inode(trans, root, inode);
9141                 if (ret) {
9142                         err = ret;
9143                         break;
9144                 }
9145
9146                 btrfs_end_transaction(trans);
9147                 btrfs_btree_balance_dirty(fs_info);
9148
9149                 trans = btrfs_start_transaction(root, 2);
9150                 if (IS_ERR(trans)) {
9151                         ret = err = PTR_ERR(trans);
9152                         trans = NULL;
9153                         break;
9154                 }
9155
9156                 btrfs_block_rsv_release(fs_info, rsv, -1);
9157                 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9158                                               rsv, min_size, 0);
9159                 BUG_ON(ret);    /* shouldn't happen */
9160                 trans->block_rsv = rsv;
9161         }
9162
9163         if (ret == 0 && inode->i_nlink > 0) {
9164                 trans->block_rsv = root->orphan_block_rsv;
9165                 ret = btrfs_orphan_del(trans, BTRFS_I(inode));
9166                 if (ret)
9167                         err = ret;
9168         }
9169
9170         if (trans) {
9171                 trans->block_rsv = &fs_info->trans_block_rsv;
9172                 ret = btrfs_update_inode(trans, root, inode);
9173                 if (ret && !err)
9174                         err = ret;
9175
9176                 ret = btrfs_end_transaction(trans);
9177                 btrfs_btree_balance_dirty(fs_info);
9178         }
9179 out:
9180         btrfs_free_block_rsv(fs_info, rsv);
9181
9182         if (ret && !err)
9183                 err = ret;
9184
9185         return err;
9186 }
9187
9188 /*
9189  * create a new subvolume directory/inode (helper for the ioctl).
9190  */
9191 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9192                              struct btrfs_root *new_root,
9193                              struct btrfs_root *parent_root,
9194                              u64 new_dirid)
9195 {
9196         struct inode *inode;
9197         int err;
9198         u64 index = 0;
9199
9200         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9201                                 new_dirid, new_dirid,
9202                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9203                                 &index);
9204         if (IS_ERR(inode))
9205                 return PTR_ERR(inode);
9206         inode->i_op = &btrfs_dir_inode_operations;
9207         inode->i_fop = &btrfs_dir_file_operations;
9208
9209         set_nlink(inode, 1);
9210         btrfs_i_size_write(BTRFS_I(inode), 0);
9211         unlock_new_inode(inode);
9212
9213         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9214         if (err)
9215                 btrfs_err(new_root->fs_info,
9216                           "error inheriting subvolume %llu properties: %d",
9217                           new_root->root_key.objectid, err);
9218
9219         err = btrfs_update_inode(trans, new_root, inode);
9220
9221         iput(inode);
9222         return err;
9223 }
9224
9225 struct inode *btrfs_alloc_inode(struct super_block *sb)
9226 {
9227         struct btrfs_inode *ei;
9228         struct inode *inode;
9229
9230         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9231         if (!ei)
9232                 return NULL;
9233
9234         ei->root = NULL;
9235         ei->generation = 0;
9236         ei->last_trans = 0;
9237         ei->last_sub_trans = 0;
9238         ei->logged_trans = 0;
9239         ei->delalloc_bytes = 0;
9240         ei->defrag_bytes = 0;
9241         ei->disk_i_size = 0;
9242         ei->flags = 0;
9243         ei->csum_bytes = 0;
9244         ei->index_cnt = (u64)-1;
9245         ei->dir_index = 0;
9246         ei->last_unlink_trans = 0;
9247         ei->last_log_commit = 0;
9248         ei->delayed_iput_count = 0;
9249
9250         spin_lock_init(&ei->lock);
9251         ei->outstanding_extents = 0;
9252         ei->reserved_extents = 0;
9253
9254         ei->runtime_flags = 0;
9255         ei->force_compress = BTRFS_COMPRESS_NONE;
9256
9257         ei->delayed_node = NULL;
9258
9259         ei->i_otime.tv_sec = 0;
9260         ei->i_otime.tv_nsec = 0;
9261
9262         inode = &ei->vfs_inode;
9263         extent_map_tree_init(&ei->extent_tree);
9264         extent_io_tree_init(&ei->io_tree, &inode->i_data);
9265         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
9266         ei->io_tree.track_uptodate = 1;
9267         ei->io_failure_tree.track_uptodate = 1;
9268         atomic_set(&ei->sync_writers, 0);
9269         mutex_init(&ei->log_mutex);
9270         mutex_init(&ei->delalloc_mutex);
9271         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9272         INIT_LIST_HEAD(&ei->delalloc_inodes);
9273         INIT_LIST_HEAD(&ei->delayed_iput);
9274         RB_CLEAR_NODE(&ei->rb_node);
9275         init_rwsem(&ei->dio_sem);
9276
9277         return inode;
9278 }
9279
9280 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9281 void btrfs_test_destroy_inode(struct inode *inode)
9282 {
9283         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9284         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9285 }
9286 #endif
9287
9288 static void btrfs_i_callback(struct rcu_head *head)
9289 {
9290         struct inode *inode = container_of(head, struct inode, i_rcu);
9291         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9292 }
9293
9294 void btrfs_destroy_inode(struct inode *inode)
9295 {
9296         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9297         struct btrfs_ordered_extent *ordered;
9298         struct btrfs_root *root = BTRFS_I(inode)->root;
9299
9300         WARN_ON(!hlist_empty(&inode->i_dentry));
9301         WARN_ON(inode->i_data.nrpages);
9302         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9303         WARN_ON(BTRFS_I(inode)->reserved_extents);
9304         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9305         WARN_ON(BTRFS_I(inode)->csum_bytes);
9306         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9307
9308         /*
9309          * This can happen where we create an inode, but somebody else also
9310          * created the same inode and we need to destroy the one we already
9311          * created.
9312          */
9313         if (!root)
9314                 goto free;
9315
9316         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9317                      &BTRFS_I(inode)->runtime_flags)) {
9318                 btrfs_info(fs_info, "inode %llu still on the orphan list",
9319                            btrfs_ino(BTRFS_I(inode)));
9320                 atomic_dec(&root->orphan_inodes);
9321         }
9322
9323         while (1) {
9324                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9325                 if (!ordered)
9326                         break;
9327                 else {
9328                         btrfs_err(fs_info,
9329                                   "found ordered extent %llu %llu on inode cleanup",
9330                                   ordered->file_offset, ordered->len);
9331                         btrfs_remove_ordered_extent(inode, ordered);
9332                         btrfs_put_ordered_extent(ordered);
9333                         btrfs_put_ordered_extent(ordered);
9334                 }
9335         }
9336         btrfs_qgroup_check_reserved_leak(inode);
9337         inode_tree_del(inode);
9338         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9339 free:
9340         call_rcu(&inode->i_rcu, btrfs_i_callback);
9341 }
9342
9343 int btrfs_drop_inode(struct inode *inode)
9344 {
9345         struct btrfs_root *root = BTRFS_I(inode)->root;
9346
9347         if (root == NULL)
9348                 return 1;
9349
9350         /* the snap/subvol tree is on deleting */
9351         if (btrfs_root_refs(&root->root_item) == 0)
9352                 return 1;
9353         else
9354                 return generic_drop_inode(inode);
9355 }
9356
9357 static void init_once(void *foo)
9358 {
9359         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9360
9361         inode_init_once(&ei->vfs_inode);
9362 }
9363
9364 void btrfs_destroy_cachep(void)
9365 {
9366         /*
9367          * Make sure all delayed rcu free inodes are flushed before we
9368          * destroy cache.
9369          */
9370         rcu_barrier();
9371         kmem_cache_destroy(btrfs_inode_cachep);
9372         kmem_cache_destroy(btrfs_trans_handle_cachep);
9373         kmem_cache_destroy(btrfs_transaction_cachep);
9374         kmem_cache_destroy(btrfs_path_cachep);
9375         kmem_cache_destroy(btrfs_free_space_cachep);
9376 }
9377
9378 int btrfs_init_cachep(void)
9379 {
9380         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9381                         sizeof(struct btrfs_inode), 0,
9382                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9383                         init_once);
9384         if (!btrfs_inode_cachep)
9385                 goto fail;
9386
9387         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9388                         sizeof(struct btrfs_trans_handle), 0,
9389                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9390         if (!btrfs_trans_handle_cachep)
9391                 goto fail;
9392
9393         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9394                         sizeof(struct btrfs_transaction), 0,
9395                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9396         if (!btrfs_transaction_cachep)
9397                 goto fail;
9398
9399         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9400                         sizeof(struct btrfs_path), 0,
9401                         SLAB_MEM_SPREAD, NULL);
9402         if (!btrfs_path_cachep)
9403                 goto fail;
9404
9405         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9406                         sizeof(struct btrfs_free_space), 0,
9407                         SLAB_MEM_SPREAD, NULL);
9408         if (!btrfs_free_space_cachep)
9409                 goto fail;
9410
9411         return 0;
9412 fail:
9413         btrfs_destroy_cachep();
9414         return -ENOMEM;
9415 }
9416
9417 static int btrfs_getattr(struct vfsmount *mnt,
9418                          struct dentry *dentry, struct kstat *stat)
9419 {
9420         u64 delalloc_bytes;
9421         struct inode *inode = d_inode(dentry);
9422         u32 blocksize = inode->i_sb->s_blocksize;
9423
9424         generic_fillattr(inode, stat);
9425         stat->dev = BTRFS_I(inode)->root->anon_dev;
9426
9427         spin_lock(&BTRFS_I(inode)->lock);
9428         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9429         spin_unlock(&BTRFS_I(inode)->lock);
9430         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9431                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9432         return 0;
9433 }
9434
9435 static int btrfs_rename_exchange(struct inode *old_dir,
9436                               struct dentry *old_dentry,
9437                               struct inode *new_dir,
9438                               struct dentry *new_dentry)
9439 {
9440         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9441         struct btrfs_trans_handle *trans;
9442         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9443         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9444         struct inode *new_inode = new_dentry->d_inode;
9445         struct inode *old_inode = old_dentry->d_inode;
9446         struct timespec ctime = current_time(old_inode);
9447         struct dentry *parent;
9448         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9449         u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9450         u64 old_idx = 0;
9451         u64 new_idx = 0;
9452         u64 root_objectid;
9453         int ret;
9454         bool root_log_pinned = false;
9455         bool dest_log_pinned = false;
9456
9457         /* we only allow rename subvolume link between subvolumes */
9458         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9459                 return -EXDEV;
9460
9461         /* close the race window with snapshot create/destroy ioctl */
9462         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9463                 down_read(&fs_info->subvol_sem);
9464         if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9465                 down_read(&fs_info->subvol_sem);
9466
9467         /*
9468          * We want to reserve the absolute worst case amount of items.  So if
9469          * both inodes are subvols and we need to unlink them then that would
9470          * require 4 item modifications, but if they are both normal inodes it
9471          * would require 5 item modifications, so we'll assume their normal
9472          * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9473          * should cover the worst case number of items we'll modify.
9474          */
9475         trans = btrfs_start_transaction(root, 12);
9476         if (IS_ERR(trans)) {
9477                 ret = PTR_ERR(trans);
9478                 goto out_notrans;
9479         }
9480
9481         /*
9482          * We need to find a free sequence number both in the source and
9483          * in the destination directory for the exchange.
9484          */
9485         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9486         if (ret)
9487                 goto out_fail;
9488         ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9489         if (ret)
9490                 goto out_fail;
9491
9492         BTRFS_I(old_inode)->dir_index = 0ULL;
9493         BTRFS_I(new_inode)->dir_index = 0ULL;
9494
9495         /* Reference for the source. */
9496         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9497                 /* force full log commit if subvolume involved. */
9498                 btrfs_set_log_full_commit(fs_info, trans);
9499         } else {
9500                 btrfs_pin_log_trans(root);
9501                 root_log_pinned = true;
9502                 ret = btrfs_insert_inode_ref(trans, dest,
9503                                              new_dentry->d_name.name,
9504                                              new_dentry->d_name.len,
9505                                              old_ino,
9506                                              btrfs_ino(BTRFS_I(new_dir)),
9507                                              old_idx);
9508                 if (ret)
9509                         goto out_fail;
9510         }
9511
9512         /* And now for the dest. */
9513         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9514                 /* force full log commit if subvolume involved. */
9515                 btrfs_set_log_full_commit(fs_info, trans);
9516         } else {
9517                 btrfs_pin_log_trans(dest);
9518                 dest_log_pinned = true;
9519                 ret = btrfs_insert_inode_ref(trans, root,
9520                                              old_dentry->d_name.name,
9521                                              old_dentry->d_name.len,
9522                                              new_ino,
9523                                              btrfs_ino(BTRFS_I(old_dir)),
9524                                              new_idx);
9525                 if (ret)
9526                         goto out_fail;
9527         }
9528
9529         /* Update inode version and ctime/mtime. */
9530         inode_inc_iversion(old_dir);
9531         inode_inc_iversion(new_dir);
9532         inode_inc_iversion(old_inode);
9533         inode_inc_iversion(new_inode);
9534         old_dir->i_ctime = old_dir->i_mtime = ctime;
9535         new_dir->i_ctime = new_dir->i_mtime = ctime;
9536         old_inode->i_ctime = ctime;
9537         new_inode->i_ctime = ctime;
9538
9539         if (old_dentry->d_parent != new_dentry->d_parent) {
9540                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9541                                 BTRFS_I(old_inode), 1);
9542                 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9543                                 BTRFS_I(new_inode), 1);
9544         }
9545
9546         /* src is a subvolume */
9547         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9548                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9549                 ret = btrfs_unlink_subvol(trans, root, old_dir,
9550                                           root_objectid,
9551                                           old_dentry->d_name.name,
9552                                           old_dentry->d_name.len);
9553         } else { /* src is an inode */
9554                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9555                                            BTRFS_I(old_dentry->d_inode),
9556                                            old_dentry->d_name.name,
9557                                            old_dentry->d_name.len);
9558                 if (!ret)
9559                         ret = btrfs_update_inode(trans, root, old_inode);
9560         }
9561         if (ret) {
9562                 btrfs_abort_transaction(trans, ret);
9563                 goto out_fail;
9564         }
9565
9566         /* dest is a subvolume */
9567         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9568                 root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
9569                 ret = btrfs_unlink_subvol(trans, dest, new_dir,
9570                                           root_objectid,
9571                                           new_dentry->d_name.name,
9572                                           new_dentry->d_name.len);
9573         } else { /* dest is an inode */
9574                 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9575                                            BTRFS_I(new_dentry->d_inode),
9576                                            new_dentry->d_name.name,
9577                                            new_dentry->d_name.len);
9578                 if (!ret)
9579                         ret = btrfs_update_inode(trans, dest, new_inode);
9580         }
9581         if (ret) {
9582                 btrfs_abort_transaction(trans, ret);
9583                 goto out_fail;
9584         }
9585
9586         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9587                              new_dentry->d_name.name,
9588                              new_dentry->d_name.len, 0, old_idx);
9589         if (ret) {
9590                 btrfs_abort_transaction(trans, ret);
9591                 goto out_fail;
9592         }
9593
9594         ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9595                              old_dentry->d_name.name,
9596                              old_dentry->d_name.len, 0, new_idx);
9597         if (ret) {
9598                 btrfs_abort_transaction(trans, ret);
9599                 goto out_fail;
9600         }
9601
9602         if (old_inode->i_nlink == 1)
9603                 BTRFS_I(old_inode)->dir_index = old_idx;
9604         if (new_inode->i_nlink == 1)
9605                 BTRFS_I(new_inode)->dir_index = new_idx;
9606
9607         if (root_log_pinned) {
9608                 parent = new_dentry->d_parent;
9609                 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9610                                 parent);
9611                 btrfs_end_log_trans(root);
9612                 root_log_pinned = false;
9613         }
9614         if (dest_log_pinned) {
9615                 parent = old_dentry->d_parent;
9616                 btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
9617                                 parent);
9618                 btrfs_end_log_trans(dest);
9619                 dest_log_pinned = false;
9620         }
9621 out_fail:
9622         /*
9623          * If we have pinned a log and an error happened, we unpin tasks
9624          * trying to sync the log and force them to fallback to a transaction
9625          * commit if the log currently contains any of the inodes involved in
9626          * this rename operation (to ensure we do not persist a log with an
9627          * inconsistent state for any of these inodes or leading to any
9628          * inconsistencies when replayed). If the transaction was aborted, the
9629          * abortion reason is propagated to userspace when attempting to commit
9630          * the transaction. If the log does not contain any of these inodes, we
9631          * allow the tasks to sync it.
9632          */
9633         if (ret && (root_log_pinned || dest_log_pinned)) {
9634                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9635                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9636                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9637                     (new_inode &&
9638                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9639                         btrfs_set_log_full_commit(fs_info, trans);
9640
9641                 if (root_log_pinned) {
9642                         btrfs_end_log_trans(root);
9643                         root_log_pinned = false;
9644                 }
9645                 if (dest_log_pinned) {
9646                         btrfs_end_log_trans(dest);
9647                         dest_log_pinned = false;
9648                 }
9649         }
9650         ret = btrfs_end_transaction(trans);
9651 out_notrans:
9652         if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9653                 up_read(&fs_info->subvol_sem);
9654         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9655                 up_read(&fs_info->subvol_sem);
9656
9657         return ret;
9658 }
9659
9660 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9661                                      struct btrfs_root *root,
9662                                      struct inode *dir,
9663                                      struct dentry *dentry)
9664 {
9665         int ret;
9666         struct inode *inode;
9667         u64 objectid;
9668         u64 index;
9669
9670         ret = btrfs_find_free_ino(root, &objectid);
9671         if (ret)
9672                 return ret;
9673
9674         inode = btrfs_new_inode(trans, root, dir,
9675                                 dentry->d_name.name,
9676                                 dentry->d_name.len,
9677                                 btrfs_ino(BTRFS_I(dir)),
9678                                 objectid,
9679                                 S_IFCHR | WHITEOUT_MODE,
9680                                 &index);
9681
9682         if (IS_ERR(inode)) {
9683                 ret = PTR_ERR(inode);
9684                 return ret;
9685         }
9686
9687         inode->i_op = &btrfs_special_inode_operations;
9688         init_special_inode(inode, inode->i_mode,
9689                 WHITEOUT_DEV);
9690
9691         ret = btrfs_init_inode_security(trans, inode, dir,
9692                                 &dentry->d_name);
9693         if (ret)
9694                 goto out;
9695
9696         ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9697                                 BTRFS_I(inode), 0, index);
9698         if (ret)
9699                 goto out;
9700
9701         ret = btrfs_update_inode(trans, root, inode);
9702 out:
9703         unlock_new_inode(inode);
9704         if (ret)
9705                 inode_dec_link_count(inode);
9706         iput(inode);
9707
9708         return ret;
9709 }
9710
9711 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9712                            struct inode *new_dir, struct dentry *new_dentry,
9713                            unsigned int flags)
9714 {
9715         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9716         struct btrfs_trans_handle *trans;
9717         unsigned int trans_num_items;
9718         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9719         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9720         struct inode *new_inode = d_inode(new_dentry);
9721         struct inode *old_inode = d_inode(old_dentry);
9722         u64 index = 0;
9723         u64 root_objectid;
9724         int ret;
9725         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9726         bool log_pinned = false;
9727
9728         if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9729                 return -EPERM;
9730
9731         /* we only allow rename subvolume link between subvolumes */
9732         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9733                 return -EXDEV;
9734
9735         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9736             (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9737                 return -ENOTEMPTY;
9738
9739         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9740             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9741                 return -ENOTEMPTY;
9742
9743
9744         /* check for collisions, even if the  name isn't there */
9745         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9746                              new_dentry->d_name.name,
9747                              new_dentry->d_name.len);
9748
9749         if (ret) {
9750                 if (ret == -EEXIST) {
9751                         /* we shouldn't get
9752                          * eexist without a new_inode */
9753                         if (WARN_ON(!new_inode)) {
9754                                 return ret;
9755                         }
9756                 } else {
9757                         /* maybe -EOVERFLOW */
9758                         return ret;
9759                 }
9760         }
9761         ret = 0;
9762
9763         /*
9764          * we're using rename to replace one file with another.  Start IO on it
9765          * now so  we don't add too much work to the end of the transaction
9766          */
9767         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9768                 filemap_flush(old_inode->i_mapping);
9769
9770         /* close the racy window with snapshot create/destroy ioctl */
9771         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9772                 down_read(&fs_info->subvol_sem);
9773         /*
9774          * We want to reserve the absolute worst case amount of items.  So if
9775          * both inodes are subvols and we need to unlink them then that would
9776          * require 4 item modifications, but if they are both normal inodes it
9777          * would require 5 item modifications, so we'll assume they are normal
9778          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9779          * should cover the worst case number of items we'll modify.
9780          * If our rename has the whiteout flag, we need more 5 units for the
9781          * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9782          * when selinux is enabled).
9783          */
9784         trans_num_items = 11;
9785         if (flags & RENAME_WHITEOUT)
9786                 trans_num_items += 5;
9787         trans = btrfs_start_transaction(root, trans_num_items);
9788         if (IS_ERR(trans)) {
9789                 ret = PTR_ERR(trans);
9790                 goto out_notrans;
9791         }
9792
9793         if (dest != root)
9794                 btrfs_record_root_in_trans(trans, dest);
9795
9796         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9797         if (ret)
9798                 goto out_fail;
9799
9800         BTRFS_I(old_inode)->dir_index = 0ULL;
9801         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9802                 /* force full log commit if subvolume involved. */
9803                 btrfs_set_log_full_commit(fs_info, trans);
9804         } else {
9805                 btrfs_pin_log_trans(root);
9806                 log_pinned = true;
9807                 ret = btrfs_insert_inode_ref(trans, dest,
9808                                              new_dentry->d_name.name,
9809                                              new_dentry->d_name.len,
9810                                              old_ino,
9811                                              btrfs_ino(BTRFS_I(new_dir)), index);
9812                 if (ret)
9813                         goto out_fail;
9814         }
9815
9816         inode_inc_iversion(old_dir);
9817         inode_inc_iversion(new_dir);
9818         inode_inc_iversion(old_inode);
9819         old_dir->i_ctime = old_dir->i_mtime =
9820         new_dir->i_ctime = new_dir->i_mtime =
9821         old_inode->i_ctime = current_time(old_dir);
9822
9823         if (old_dentry->d_parent != new_dentry->d_parent)
9824                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9825                                 BTRFS_I(old_inode), 1);
9826
9827         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9828                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9829                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9830                                         old_dentry->d_name.name,
9831                                         old_dentry->d_name.len);
9832         } else {
9833                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9834                                         BTRFS_I(d_inode(old_dentry)),
9835                                         old_dentry->d_name.name,
9836                                         old_dentry->d_name.len);
9837                 if (!ret)
9838                         ret = btrfs_update_inode(trans, root, old_inode);
9839         }
9840         if (ret) {
9841                 btrfs_abort_transaction(trans, ret);
9842                 goto out_fail;
9843         }
9844
9845         if (new_inode) {
9846                 inode_inc_iversion(new_inode);
9847                 new_inode->i_ctime = current_time(new_inode);
9848                 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9849                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9850                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9851                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9852                                                 root_objectid,
9853                                                 new_dentry->d_name.name,
9854                                                 new_dentry->d_name.len);
9855                         BUG_ON(new_inode->i_nlink == 0);
9856                 } else {
9857                         ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9858                                                  BTRFS_I(d_inode(new_dentry)),
9859                                                  new_dentry->d_name.name,
9860                                                  new_dentry->d_name.len);
9861                 }
9862                 if (!ret && new_inode->i_nlink == 0)
9863                         ret = btrfs_orphan_add(trans,
9864                                         BTRFS_I(d_inode(new_dentry)));
9865                 if (ret) {
9866                         btrfs_abort_transaction(trans, ret);
9867                         goto out_fail;
9868                 }
9869         }
9870
9871         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9872                              new_dentry->d_name.name,
9873                              new_dentry->d_name.len, 0, index);
9874         if (ret) {
9875                 btrfs_abort_transaction(trans, ret);
9876                 goto out_fail;
9877         }
9878
9879         if (old_inode->i_nlink == 1)
9880                 BTRFS_I(old_inode)->dir_index = index;
9881
9882         if (log_pinned) {
9883                 struct dentry *parent = new_dentry->d_parent;
9884
9885                 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9886                                 parent);
9887                 btrfs_end_log_trans(root);
9888                 log_pinned = false;
9889         }
9890
9891         if (flags & RENAME_WHITEOUT) {
9892                 ret = btrfs_whiteout_for_rename(trans, root, old_dir,
9893                                                 old_dentry);
9894
9895                 if (ret) {
9896                         btrfs_abort_transaction(trans, ret);
9897                         goto out_fail;
9898                 }
9899         }
9900 out_fail:
9901         /*
9902          * If we have pinned the log and an error happened, we unpin tasks
9903          * trying to sync the log and force them to fallback to a transaction
9904          * commit if the log currently contains any of the inodes involved in
9905          * this rename operation (to ensure we do not persist a log with an
9906          * inconsistent state for any of these inodes or leading to any
9907          * inconsistencies when replayed). If the transaction was aborted, the
9908          * abortion reason is propagated to userspace when attempting to commit
9909          * the transaction. If the log does not contain any of these inodes, we
9910          * allow the tasks to sync it.
9911          */
9912         if (ret && log_pinned) {
9913                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9914                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9915                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9916                     (new_inode &&
9917                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9918                         btrfs_set_log_full_commit(fs_info, trans);
9919
9920                 btrfs_end_log_trans(root);
9921                 log_pinned = false;
9922         }
9923         btrfs_end_transaction(trans);
9924 out_notrans:
9925         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9926                 up_read(&fs_info->subvol_sem);
9927
9928         return ret;
9929 }
9930
9931 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9932                          struct inode *new_dir, struct dentry *new_dentry,
9933                          unsigned int flags)
9934 {
9935         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9936                 return -EINVAL;
9937
9938         if (flags & RENAME_EXCHANGE)
9939                 return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9940                                           new_dentry);
9941
9942         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
9943 }
9944
9945 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9946 {
9947         struct btrfs_delalloc_work *delalloc_work;
9948         struct inode *inode;
9949
9950         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9951                                      work);
9952         inode = delalloc_work->inode;
9953         filemap_flush(inode->i_mapping);
9954         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9955                                 &BTRFS_I(inode)->runtime_flags))
9956                 filemap_flush(inode->i_mapping);
9957
9958         if (delalloc_work->delay_iput)
9959                 btrfs_add_delayed_iput(inode);
9960         else
9961                 iput(inode);
9962         complete(&delalloc_work->completion);
9963 }
9964
9965 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9966                                                     int delay_iput)
9967 {
9968         struct btrfs_delalloc_work *work;
9969
9970         work = kmalloc(sizeof(*work), GFP_NOFS);
9971         if (!work)
9972                 return NULL;
9973
9974         init_completion(&work->completion);
9975         INIT_LIST_HEAD(&work->list);
9976         work->inode = inode;
9977         work->delay_iput = delay_iput;
9978         WARN_ON_ONCE(!inode);
9979         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9980                         btrfs_run_delalloc_work, NULL, NULL);
9981
9982         return work;
9983 }
9984
9985 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9986 {
9987         wait_for_completion(&work->completion);
9988         kfree(work);
9989 }
9990
9991 /*
9992  * some fairly slow code that needs optimization. This walks the list
9993  * of all the inodes with pending delalloc and forces them to disk.
9994  */
9995 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9996                                    int nr)
9997 {
9998         struct btrfs_inode *binode;
9999         struct inode *inode;
10000         struct btrfs_delalloc_work *work, *next;
10001         struct list_head works;
10002         struct list_head splice;
10003         int ret = 0;
10004
10005         INIT_LIST_HEAD(&works);
10006         INIT_LIST_HEAD(&splice);
10007
10008         mutex_lock(&root->delalloc_mutex);
10009         spin_lock(&root->delalloc_lock);
10010         list_splice_init(&root->delalloc_inodes, &splice);
10011         while (!list_empty(&splice)) {
10012                 binode = list_entry(splice.next, struct btrfs_inode,
10013                                     delalloc_inodes);
10014
10015                 list_move_tail(&binode->delalloc_inodes,
10016                                &root->delalloc_inodes);
10017                 inode = igrab(&binode->vfs_inode);
10018                 if (!inode) {
10019                         cond_resched_lock(&root->delalloc_lock);
10020                         continue;
10021                 }
10022                 spin_unlock(&root->delalloc_lock);
10023
10024                 work = btrfs_alloc_delalloc_work(inode, delay_iput);
10025                 if (!work) {
10026                         if (delay_iput)
10027                                 btrfs_add_delayed_iput(inode);
10028                         else
10029                                 iput(inode);
10030                         ret = -ENOMEM;
10031                         goto out;
10032                 }
10033                 list_add_tail(&work->list, &works);
10034                 btrfs_queue_work(root->fs_info->flush_workers,
10035                                  &work->work);
10036                 ret++;
10037                 if (nr != -1 && ret >= nr)
10038                         goto out;
10039                 cond_resched();
10040                 spin_lock(&root->delalloc_lock);
10041         }
10042         spin_unlock(&root->delalloc_lock);
10043
10044 out:
10045         list_for_each_entry_safe(work, next, &works, list) {
10046                 list_del_init(&work->list);
10047                 btrfs_wait_and_free_delalloc_work(work);
10048         }
10049
10050         if (!list_empty_careful(&splice)) {
10051                 spin_lock(&root->delalloc_lock);
10052                 list_splice_tail(&splice, &root->delalloc_inodes);
10053                 spin_unlock(&root->delalloc_lock);
10054         }
10055         mutex_unlock(&root->delalloc_mutex);
10056         return ret;
10057 }
10058
10059 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
10060 {
10061         struct btrfs_fs_info *fs_info = root->fs_info;
10062         int ret;
10063
10064         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10065                 return -EROFS;
10066
10067         ret = __start_delalloc_inodes(root, delay_iput, -1);
10068         if (ret > 0)
10069                 ret = 0;
10070         /*
10071          * the filemap_flush will queue IO into the worker threads, but
10072          * we have to make sure the IO is actually started and that
10073          * ordered extents get created before we return
10074          */
10075         atomic_inc(&fs_info->async_submit_draining);
10076         while (atomic_read(&fs_info->nr_async_submits) ||
10077                atomic_read(&fs_info->async_delalloc_pages)) {
10078                 wait_event(fs_info->async_submit_wait,
10079                            (atomic_read(&fs_info->nr_async_submits) == 0 &&
10080                             atomic_read(&fs_info->async_delalloc_pages) == 0));
10081         }
10082         atomic_dec(&fs_info->async_submit_draining);
10083         return ret;
10084 }
10085
10086 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
10087                                int nr)
10088 {
10089         struct btrfs_root *root;
10090         struct list_head splice;
10091         int ret;
10092
10093         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10094                 return -EROFS;
10095
10096         INIT_LIST_HEAD(&splice);
10097
10098         mutex_lock(&fs_info->delalloc_root_mutex);
10099         spin_lock(&fs_info->delalloc_root_lock);
10100         list_splice_init(&fs_info->delalloc_roots, &splice);
10101         while (!list_empty(&splice) && nr) {
10102                 root = list_first_entry(&splice, struct btrfs_root,
10103                                         delalloc_root);
10104                 root = btrfs_grab_fs_root(root);
10105                 BUG_ON(!root);
10106                 list_move_tail(&root->delalloc_root,
10107                                &fs_info->delalloc_roots);
10108                 spin_unlock(&fs_info->delalloc_root_lock);
10109
10110                 ret = __start_delalloc_inodes(root, delay_iput, nr);
10111                 btrfs_put_fs_root(root);
10112                 if (ret < 0)
10113                         goto out;
10114
10115                 if (nr != -1) {
10116                         nr -= ret;
10117                         WARN_ON(nr < 0);
10118                 }
10119                 spin_lock(&fs_info->delalloc_root_lock);
10120         }
10121         spin_unlock(&fs_info->delalloc_root_lock);
10122
10123         ret = 0;
10124         atomic_inc(&fs_info->async_submit_draining);
10125         while (atomic_read(&fs_info->nr_async_submits) ||
10126               atomic_read(&fs_info->async_delalloc_pages)) {
10127                 wait_event(fs_info->async_submit_wait,
10128                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
10129                     atomic_read(&fs_info->async_delalloc_pages) == 0));
10130         }
10131         atomic_dec(&fs_info->async_submit_draining);
10132 out:
10133         if (!list_empty_careful(&splice)) {
10134                 spin_lock(&fs_info->delalloc_root_lock);
10135                 list_splice_tail(&splice, &fs_info->delalloc_roots);
10136                 spin_unlock(&fs_info->delalloc_root_lock);
10137         }
10138         mutex_unlock(&fs_info->delalloc_root_mutex);
10139         return ret;
10140 }
10141
10142 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10143                          const char *symname)
10144 {
10145         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10146         struct btrfs_trans_handle *trans;
10147         struct btrfs_root *root = BTRFS_I(dir)->root;
10148         struct btrfs_path *path;
10149         struct btrfs_key key;
10150         struct inode *inode = NULL;
10151         int err;
10152         int drop_inode = 0;
10153         u64 objectid;
10154         u64 index = 0;
10155         int name_len;
10156         int datasize;
10157         unsigned long ptr;
10158         struct btrfs_file_extent_item *ei;
10159         struct extent_buffer *leaf;
10160
10161         name_len = strlen(symname);
10162         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
10163                 return -ENAMETOOLONG;
10164
10165         /*
10166          * 2 items for inode item and ref
10167          * 2 items for dir items
10168          * 1 item for updating parent inode item
10169          * 1 item for the inline extent item
10170          * 1 item for xattr if selinux is on
10171          */
10172         trans = btrfs_start_transaction(root, 7);
10173         if (IS_ERR(trans))
10174                 return PTR_ERR(trans);
10175
10176         err = btrfs_find_free_ino(root, &objectid);
10177         if (err)
10178                 goto out_unlock;
10179
10180         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
10181                                 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
10182                                 objectid, S_IFLNK|S_IRWXUGO, &index);
10183         if (IS_ERR(inode)) {
10184                 err = PTR_ERR(inode);
10185                 goto out_unlock;
10186         }
10187
10188         /*
10189         * If the active LSM wants to access the inode during
10190         * d_instantiate it needs these. Smack checks to see
10191         * if the filesystem supports xattrs by looking at the
10192         * ops vector.
10193         */
10194         inode->i_fop = &btrfs_file_operations;
10195         inode->i_op = &btrfs_file_inode_operations;
10196         inode->i_mapping->a_ops = &btrfs_aops;
10197         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10198
10199         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
10200         if (err)
10201                 goto out_unlock_inode;
10202
10203         path = btrfs_alloc_path();
10204         if (!path) {
10205                 err = -ENOMEM;
10206                 goto out_unlock_inode;
10207         }
10208         key.objectid = btrfs_ino(BTRFS_I(inode));
10209         key.offset = 0;
10210         key.type = BTRFS_EXTENT_DATA_KEY;
10211         datasize = btrfs_file_extent_calc_inline_size(name_len);
10212         err = btrfs_insert_empty_item(trans, root, path, &key,
10213                                       datasize);
10214         if (err) {
10215                 btrfs_free_path(path);
10216                 goto out_unlock_inode;
10217         }
10218         leaf = path->nodes[0];
10219         ei = btrfs_item_ptr(leaf, path->slots[0],
10220                             struct btrfs_file_extent_item);
10221         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
10222         btrfs_set_file_extent_type(leaf, ei,
10223                                    BTRFS_FILE_EXTENT_INLINE);
10224         btrfs_set_file_extent_encryption(leaf, ei, 0);
10225         btrfs_set_file_extent_compression(leaf, ei, 0);
10226         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
10227         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
10228
10229         ptr = btrfs_file_extent_inline_start(ei);
10230         write_extent_buffer(leaf, symname, ptr, name_len);
10231         btrfs_mark_buffer_dirty(leaf);
10232         btrfs_free_path(path);
10233
10234         inode->i_op = &btrfs_symlink_inode_operations;
10235         inode_nohighmem(inode);
10236         inode->i_mapping->a_ops = &btrfs_symlink_aops;
10237         inode_set_bytes(inode, name_len);
10238         btrfs_i_size_write(BTRFS_I(inode), name_len);
10239         err = btrfs_update_inode(trans, root, inode);
10240         /*
10241          * Last step, add directory indexes for our symlink inode. This is the
10242          * last step to avoid extra cleanup of these indexes if an error happens
10243          * elsewhere above.
10244          */
10245         if (!err)
10246                 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
10247                                 BTRFS_I(inode), 0, index);
10248         if (err) {
10249                 drop_inode = 1;
10250                 goto out_unlock_inode;
10251         }
10252
10253         unlock_new_inode(inode);
10254         d_instantiate(dentry, inode);
10255
10256 out_unlock:
10257         btrfs_end_transaction(trans);
10258         if (drop_inode) {
10259                 inode_dec_link_count(inode);
10260                 iput(inode);
10261         }
10262         btrfs_btree_balance_dirty(fs_info);
10263         return err;
10264
10265 out_unlock_inode:
10266         drop_inode = 1;
10267         unlock_new_inode(inode);
10268         goto out_unlock;
10269 }
10270
10271 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10272                                        u64 start, u64 num_bytes, u64 min_size,
10273                                        loff_t actual_len, u64 *alloc_hint,
10274                                        struct btrfs_trans_handle *trans)
10275 {
10276         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
10277         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10278         struct extent_map *em;
10279         struct btrfs_root *root = BTRFS_I(inode)->root;
10280         struct btrfs_key ins;
10281         u64 cur_offset = start;
10282         u64 i_size;
10283         u64 cur_bytes;
10284         u64 last_alloc = (u64)-1;
10285         int ret = 0;
10286         bool own_trans = true;
10287         u64 end = start + num_bytes - 1;
10288
10289         if (trans)
10290                 own_trans = false;
10291         while (num_bytes > 0) {
10292                 if (own_trans) {
10293                         trans = btrfs_start_transaction(root, 3);
10294                         if (IS_ERR(trans)) {
10295                                 ret = PTR_ERR(trans);
10296                                 break;
10297                         }
10298                 }
10299
10300                 cur_bytes = min_t(u64, num_bytes, SZ_256M);
10301                 cur_bytes = max(cur_bytes, min_size);
10302                 /*
10303                  * If we are severely fragmented we could end up with really
10304                  * small allocations, so if the allocator is returning small
10305                  * chunks lets make its job easier by only searching for those
10306                  * sized chunks.
10307                  */
10308                 cur_bytes = min(cur_bytes, last_alloc);
10309                 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10310                                 min_size, 0, *alloc_hint, &ins, 1, 0);
10311                 if (ret) {
10312                         if (own_trans)
10313                                 btrfs_end_transaction(trans);
10314                         break;
10315                 }
10316                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10317
10318                 last_alloc = ins.offset;
10319                 ret = insert_reserved_file_extent(trans, inode,
10320                                                   cur_offset, ins.objectid,
10321                                                   ins.offset, ins.offset,
10322                                                   ins.offset, 0, 0, 0,
10323                                                   BTRFS_FILE_EXTENT_PREALLOC);
10324                 if (ret) {
10325                         btrfs_free_reserved_extent(fs_info, ins.objectid,
10326                                                    ins.offset, 0);
10327                         btrfs_abort_transaction(trans, ret);
10328                         if (own_trans)
10329                                 btrfs_end_transaction(trans);
10330                         break;
10331                 }
10332
10333                 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10334                                         cur_offset + ins.offset -1, 0);
10335
10336                 em = alloc_extent_map();
10337                 if (!em) {
10338                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10339                                 &BTRFS_I(inode)->runtime_flags);
10340                         goto next;
10341                 }
10342
10343                 em->start = cur_offset;
10344                 em->orig_start = cur_offset;
10345                 em->len = ins.offset;
10346                 em->block_start = ins.objectid;
10347                 em->block_len = ins.offset;
10348                 em->orig_block_len = ins.offset;
10349                 em->ram_bytes = ins.offset;
10350                 em->bdev = fs_info->fs_devices->latest_bdev;
10351                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10352                 em->generation = trans->transid;
10353
10354                 while (1) {
10355                         write_lock(&em_tree->lock);
10356                         ret = add_extent_mapping(em_tree, em, 1);
10357                         write_unlock(&em_tree->lock);
10358                         if (ret != -EEXIST)
10359                                 break;
10360                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10361                                                 cur_offset + ins.offset - 1,
10362                                                 0);
10363                 }
10364                 free_extent_map(em);
10365 next:
10366                 num_bytes -= ins.offset;
10367                 cur_offset += ins.offset;
10368                 *alloc_hint = ins.objectid + ins.offset;
10369
10370                 inode_inc_iversion(inode);
10371                 inode->i_ctime = current_time(inode);
10372                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10373                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10374                     (actual_len > inode->i_size) &&
10375                     (cur_offset > inode->i_size)) {
10376                         if (cur_offset > actual_len)
10377                                 i_size = actual_len;
10378                         else
10379                                 i_size = cur_offset;
10380                         i_size_write(inode, i_size);
10381                         btrfs_ordered_update_i_size(inode, i_size, NULL);
10382                 }
10383
10384                 ret = btrfs_update_inode(trans, root, inode);
10385
10386                 if (ret) {
10387                         btrfs_abort_transaction(trans, ret);
10388                         if (own_trans)
10389                                 btrfs_end_transaction(trans);
10390                         break;
10391                 }
10392
10393                 if (own_trans)
10394                         btrfs_end_transaction(trans);
10395         }
10396         if (cur_offset < end)
10397                 btrfs_free_reserved_data_space(inode, cur_offset,
10398                         end - cur_offset + 1);
10399         return ret;
10400 }
10401
10402 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10403                               u64 start, u64 num_bytes, u64 min_size,
10404                               loff_t actual_len, u64 *alloc_hint)
10405 {
10406         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10407                                            min_size, actual_len, alloc_hint,
10408                                            NULL);
10409 }
10410
10411 int btrfs_prealloc_file_range_trans(struct inode *inode,
10412                                     struct btrfs_trans_handle *trans, int mode,
10413                                     u64 start, u64 num_bytes, u64 min_size,
10414                                     loff_t actual_len, u64 *alloc_hint)
10415 {
10416         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10417                                            min_size, actual_len, alloc_hint, trans);
10418 }
10419
10420 static int btrfs_set_page_dirty(struct page *page)
10421 {
10422         return __set_page_dirty_nobuffers(page);
10423 }
10424
10425 static int btrfs_permission(struct inode *inode, int mask)
10426 {
10427         struct btrfs_root *root = BTRFS_I(inode)->root;
10428         umode_t mode = inode->i_mode;
10429
10430         if (mask & MAY_WRITE &&
10431             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10432                 if (btrfs_root_readonly(root))
10433                         return -EROFS;
10434                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10435                         return -EACCES;
10436         }
10437         return generic_permission(inode, mask);
10438 }
10439
10440 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
10441 {
10442         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10443         struct btrfs_trans_handle *trans;
10444         struct btrfs_root *root = BTRFS_I(dir)->root;
10445         struct inode *inode = NULL;
10446         u64 objectid;
10447         u64 index;
10448         int ret = 0;
10449
10450         /*
10451          * 5 units required for adding orphan entry
10452          */
10453         trans = btrfs_start_transaction(root, 5);
10454         if (IS_ERR(trans))
10455                 return PTR_ERR(trans);
10456
10457         ret = btrfs_find_free_ino(root, &objectid);
10458         if (ret)
10459                 goto out;
10460
10461         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10462                         btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
10463         if (IS_ERR(inode)) {
10464                 ret = PTR_ERR(inode);
10465                 inode = NULL;
10466                 goto out;
10467         }
10468
10469         inode->i_fop = &btrfs_file_operations;
10470         inode->i_op = &btrfs_file_inode_operations;
10471
10472         inode->i_mapping->a_ops = &btrfs_aops;
10473         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10474
10475         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10476         if (ret)
10477                 goto out_inode;
10478
10479         ret = btrfs_update_inode(trans, root, inode);
10480         if (ret)
10481                 goto out_inode;
10482         ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10483         if (ret)
10484                 goto out_inode;
10485
10486         /*
10487          * We set number of links to 0 in btrfs_new_inode(), and here we set
10488          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10489          * through:
10490          *
10491          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10492          */
10493         set_nlink(inode, 1);
10494         unlock_new_inode(inode);
10495         d_tmpfile(dentry, inode);
10496         mark_inode_dirty(inode);
10497
10498 out:
10499         btrfs_end_transaction(trans);
10500         if (ret)
10501                 iput(inode);
10502         btrfs_balance_delayed_items(fs_info);
10503         btrfs_btree_balance_dirty(fs_info);
10504         return ret;
10505
10506 out_inode:
10507         unlock_new_inode(inode);
10508         goto out;
10509
10510 }
10511
10512 static const struct inode_operations btrfs_dir_inode_operations = {
10513         .getattr        = btrfs_getattr,
10514         .lookup         = btrfs_lookup,
10515         .create         = btrfs_create,
10516         .unlink         = btrfs_unlink,
10517         .link           = btrfs_link,
10518         .mkdir          = btrfs_mkdir,
10519         .rmdir          = btrfs_rmdir,
10520         .rename         = btrfs_rename2,
10521         .symlink        = btrfs_symlink,
10522         .setattr        = btrfs_setattr,
10523         .mknod          = btrfs_mknod,
10524         .listxattr      = btrfs_listxattr,
10525         .permission     = btrfs_permission,
10526         .get_acl        = btrfs_get_acl,
10527         .set_acl        = btrfs_set_acl,
10528         .update_time    = btrfs_update_time,
10529         .tmpfile        = btrfs_tmpfile,
10530 };
10531 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10532         .lookup         = btrfs_lookup,
10533         .permission     = btrfs_permission,
10534         .update_time    = btrfs_update_time,
10535 };
10536
10537 static const struct file_operations btrfs_dir_file_operations = {
10538         .llseek         = generic_file_llseek,
10539         .read           = generic_read_dir,
10540         .iterate_shared = btrfs_real_readdir,
10541         .unlocked_ioctl = btrfs_ioctl,
10542 #ifdef CONFIG_COMPAT
10543         .compat_ioctl   = btrfs_compat_ioctl,
10544 #endif
10545         .release        = btrfs_release_file,
10546         .fsync          = btrfs_sync_file,
10547 };
10548
10549 static const struct extent_io_ops btrfs_extent_io_ops = {
10550         .fill_delalloc = run_delalloc_range,
10551         .submit_bio_hook = btrfs_submit_bio_hook,
10552         .merge_bio_hook = btrfs_merge_bio_hook,
10553         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10554         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10555         .writepage_start_hook = btrfs_writepage_start_hook,
10556         .set_bit_hook = btrfs_set_bit_hook,
10557         .clear_bit_hook = btrfs_clear_bit_hook,
10558         .merge_extent_hook = btrfs_merge_extent_hook,
10559         .split_extent_hook = btrfs_split_extent_hook,
10560 };
10561
10562 /*
10563  * btrfs doesn't support the bmap operation because swapfiles
10564  * use bmap to make a mapping of extents in the file.  They assume
10565  * these extents won't change over the life of the file and they
10566  * use the bmap result to do IO directly to the drive.
10567  *
10568  * the btrfs bmap call would return logical addresses that aren't
10569  * suitable for IO and they also will change frequently as COW
10570  * operations happen.  So, swapfile + btrfs == corruption.
10571  *
10572  * For now we're avoiding this by dropping bmap.
10573  */
10574 static const struct address_space_operations btrfs_aops = {
10575         .readpage       = btrfs_readpage,
10576         .writepage      = btrfs_writepage,
10577         .writepages     = btrfs_writepages,
10578         .readpages      = btrfs_readpages,
10579         .direct_IO      = btrfs_direct_IO,
10580         .invalidatepage = btrfs_invalidatepage,
10581         .releasepage    = btrfs_releasepage,
10582         .set_page_dirty = btrfs_set_page_dirty,
10583         .error_remove_page = generic_error_remove_page,
10584 };
10585
10586 static const struct address_space_operations btrfs_symlink_aops = {
10587         .readpage       = btrfs_readpage,
10588         .writepage      = btrfs_writepage,
10589         .invalidatepage = btrfs_invalidatepage,
10590         .releasepage    = btrfs_releasepage,
10591 };
10592
10593 static const struct inode_operations btrfs_file_inode_operations = {
10594         .getattr        = btrfs_getattr,
10595         .setattr        = btrfs_setattr,
10596         .listxattr      = btrfs_listxattr,
10597         .permission     = btrfs_permission,
10598         .fiemap         = btrfs_fiemap,
10599         .get_acl        = btrfs_get_acl,
10600         .set_acl        = btrfs_set_acl,
10601         .update_time    = btrfs_update_time,
10602 };
10603 static const struct inode_operations btrfs_special_inode_operations = {
10604         .getattr        = btrfs_getattr,
10605         .setattr        = btrfs_setattr,
10606         .permission     = btrfs_permission,
10607         .listxattr      = btrfs_listxattr,
10608         .get_acl        = btrfs_get_acl,
10609         .set_acl        = btrfs_set_acl,
10610         .update_time    = btrfs_update_time,
10611 };
10612 static const struct inode_operations btrfs_symlink_inode_operations = {
10613         .get_link       = page_get_link,
10614         .getattr        = btrfs_getattr,
10615         .setattr        = btrfs_setattr,
10616         .permission     = btrfs_permission,
10617         .listxattr      = btrfs_listxattr,
10618         .update_time    = btrfs_update_time,
10619 };
10620
10621 const struct dentry_operations btrfs_dentry_operations = {
10622         .d_delete       = btrfs_dentry_delete,
10623         .d_release      = btrfs_dentry_release,
10624 };