2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
42 #include "transaction.h"
43 #include "btrfs_inode.h"
45 #include "print-tree.h"
47 #include "ordered-data.h"
50 #include "compression.h"
53 struct btrfs_iget_args {
55 struct btrfs_root *root;
58 static struct inode_operations btrfs_dir_inode_operations;
59 static struct inode_operations btrfs_symlink_inode_operations;
60 static struct inode_operations btrfs_dir_ro_inode_operations;
61 static struct inode_operations btrfs_special_inode_operations;
62 static struct inode_operations btrfs_file_inode_operations;
63 static struct address_space_operations btrfs_aops;
64 static struct address_space_operations btrfs_symlink_aops;
65 static struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
91 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
95 err = btrfs_init_acl(inode, dir);
97 err = btrfs_xattr_security_init(inode, dir);
102 * this does all the hard work for inserting an inline extent into
103 * the btree. The caller should have done a btrfs_drop_extents so that
104 * no overlapping inline items exist in the btree
106 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root, struct inode *inode,
108 u64 start, size_t size, size_t compressed_size,
109 struct page **compressed_pages)
111 struct btrfs_key key;
112 struct btrfs_path *path;
113 struct extent_buffer *leaf;
114 struct page *page = NULL;
117 struct btrfs_file_extent_item *ei;
120 size_t cur_size = size;
122 unsigned long offset;
123 int use_compress = 0;
125 if (compressed_size && compressed_pages) {
127 cur_size = compressed_size;
130 path = btrfs_alloc_path();
134 path->leave_spinning = 1;
135 btrfs_set_trans_block_group(trans, inode);
137 key.objectid = inode->i_ino;
139 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
140 datasize = btrfs_file_extent_calc_inline_size(cur_size);
142 inode_add_bytes(inode, size);
143 ret = btrfs_insert_empty_item(trans, root, path, &key,
150 leaf = path->nodes[0];
151 ei = btrfs_item_ptr(leaf, path->slots[0],
152 struct btrfs_file_extent_item);
153 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
154 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
155 btrfs_set_file_extent_encryption(leaf, ei, 0);
156 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
157 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
158 ptr = btrfs_file_extent_inline_start(ei);
163 while (compressed_size > 0) {
164 cpage = compressed_pages[i];
165 cur_size = min_t(unsigned long, compressed_size,
168 kaddr = kmap_atomic(cpage, KM_USER0);
169 write_extent_buffer(leaf, kaddr, ptr, cur_size);
170 kunmap_atomic(kaddr, KM_USER0);
174 compressed_size -= cur_size;
176 btrfs_set_file_extent_compression(leaf, ei,
177 BTRFS_COMPRESS_ZLIB);
179 page = find_get_page(inode->i_mapping,
180 start >> PAGE_CACHE_SHIFT);
181 btrfs_set_file_extent_compression(leaf, ei, 0);
182 kaddr = kmap_atomic(page, KM_USER0);
183 offset = start & (PAGE_CACHE_SIZE - 1);
184 write_extent_buffer(leaf, kaddr + offset, ptr, size);
185 kunmap_atomic(kaddr, KM_USER0);
186 page_cache_release(page);
188 btrfs_mark_buffer_dirty(leaf);
189 btrfs_free_path(path);
191 BTRFS_I(inode)->disk_i_size = inode->i_size;
192 btrfs_update_inode(trans, root, inode);
195 btrfs_free_path(path);
201 * conditionally insert an inline extent into the file. This
202 * does the checks required to make sure the data is small enough
203 * to fit as an inline extent.
205 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
206 struct btrfs_root *root,
207 struct inode *inode, u64 start, u64 end,
208 size_t compressed_size,
209 struct page **compressed_pages)
211 u64 isize = i_size_read(inode);
212 u64 actual_end = min(end + 1, isize);
213 u64 inline_len = actual_end - start;
214 u64 aligned_end = (end + root->sectorsize - 1) &
215 ~((u64)root->sectorsize - 1);
217 u64 data_len = inline_len;
221 data_len = compressed_size;
224 actual_end >= PAGE_CACHE_SIZE ||
225 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
227 (actual_end & (root->sectorsize - 1)) == 0) ||
229 data_len > root->fs_info->max_inline) {
233 ret = btrfs_drop_extents(trans, root, inode, start,
234 aligned_end, aligned_end, start,
238 if (isize > actual_end)
239 inline_len = min_t(u64, isize, actual_end);
240 ret = insert_inline_extent(trans, root, inode, start,
241 inline_len, compressed_size,
244 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
248 struct async_extent {
253 unsigned long nr_pages;
254 struct list_head list;
259 struct btrfs_root *root;
260 struct page *locked_page;
263 struct list_head extents;
264 struct btrfs_work work;
267 static noinline int add_async_extent(struct async_cow *cow,
268 u64 start, u64 ram_size,
271 unsigned long nr_pages)
273 struct async_extent *async_extent;
275 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
276 async_extent->start = start;
277 async_extent->ram_size = ram_size;
278 async_extent->compressed_size = compressed_size;
279 async_extent->pages = pages;
280 async_extent->nr_pages = nr_pages;
281 list_add_tail(&async_extent->list, &cow->extents);
286 * we create compressed extents in two phases. The first
287 * phase compresses a range of pages that have already been
288 * locked (both pages and state bits are locked).
290 * This is done inside an ordered work queue, and the compression
291 * is spread across many cpus. The actual IO submission is step
292 * two, and the ordered work queue takes care of making sure that
293 * happens in the same order things were put onto the queue by
294 * writepages and friends.
296 * If this code finds it can't get good compression, it puts an
297 * entry onto the work queue to write the uncompressed bytes. This
298 * makes sure that both compressed inodes and uncompressed inodes
299 * are written in the same order that pdflush sent them down.
301 static noinline int compress_file_range(struct inode *inode,
302 struct page *locked_page,
304 struct async_cow *async_cow,
307 struct btrfs_root *root = BTRFS_I(inode)->root;
308 struct btrfs_trans_handle *trans;
312 u64 blocksize = root->sectorsize;
314 u64 isize = i_size_read(inode);
316 struct page **pages = NULL;
317 unsigned long nr_pages;
318 unsigned long nr_pages_ret = 0;
319 unsigned long total_compressed = 0;
320 unsigned long total_in = 0;
321 unsigned long max_compressed = 128 * 1024;
322 unsigned long max_uncompressed = 128 * 1024;
328 actual_end = min_t(u64, isize, end + 1);
331 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
332 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
335 * we don't want to send crud past the end of i_size through
336 * compression, that's just a waste of CPU time. So, if the
337 * end of the file is before the start of our current
338 * requested range of bytes, we bail out to the uncompressed
339 * cleanup code that can deal with all of this.
341 * It isn't really the fastest way to fix things, but this is a
342 * very uncommon corner.
344 if (actual_end <= start)
345 goto cleanup_and_bail_uncompressed;
347 total_compressed = actual_end - start;
349 /* we want to make sure that amount of ram required to uncompress
350 * an extent is reasonable, so we limit the total size in ram
351 * of a compressed extent to 128k. This is a crucial number
352 * because it also controls how easily we can spread reads across
353 * cpus for decompression.
355 * We also want to make sure the amount of IO required to do
356 * a random read is reasonably small, so we limit the size of
357 * a compressed extent to 128k.
359 total_compressed = min(total_compressed, max_uncompressed);
360 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
361 num_bytes = max(blocksize, num_bytes);
362 disk_num_bytes = num_bytes;
367 * we do compression for mount -o compress and when the
368 * inode has not been flagged as nocompress. This flag can
369 * change at any time if we discover bad compression ratios.
371 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
372 btrfs_test_opt(root, COMPRESS)) {
374 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
376 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
377 total_compressed, pages,
378 nr_pages, &nr_pages_ret,
384 unsigned long offset = total_compressed &
385 (PAGE_CACHE_SIZE - 1);
386 struct page *page = pages[nr_pages_ret - 1];
389 /* zero the tail end of the last page, we might be
390 * sending it down to disk
393 kaddr = kmap_atomic(page, KM_USER0);
394 memset(kaddr + offset, 0,
395 PAGE_CACHE_SIZE - offset);
396 kunmap_atomic(kaddr, KM_USER0);
402 trans = btrfs_join_transaction(root, 1);
404 btrfs_set_trans_block_group(trans, inode);
406 /* lets try to make an inline extent */
407 if (ret || total_in < (actual_end - start)) {
408 /* we didn't compress the entire range, try
409 * to make an uncompressed inline extent.
411 ret = cow_file_range_inline(trans, root, inode,
412 start, end, 0, NULL);
414 /* try making a compressed inline extent */
415 ret = cow_file_range_inline(trans, root, inode,
417 total_compressed, pages);
419 btrfs_end_transaction(trans, root);
422 * inline extent creation worked, we don't need
423 * to create any more async work items. Unlock
424 * and free up our temp pages.
426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree,
429 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
430 EXTENT_CLEAR_DELALLOC |
431 EXTENT_CLEAR_ACCOUNTING |
432 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
440 * we aren't doing an inline extent round the compressed size
441 * up to a block size boundary so the allocator does sane
444 total_compressed = (total_compressed + blocksize - 1) &
448 * one last check to make sure the compression is really a
449 * win, compare the page count read with the blocks on disk
451 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
452 ~(PAGE_CACHE_SIZE - 1);
453 if (total_compressed >= total_in) {
456 disk_num_bytes = total_compressed;
457 num_bytes = total_in;
460 if (!will_compress && pages) {
462 * the compression code ran but failed to make things smaller,
463 * free any pages it allocated and our page pointer array
465 for (i = 0; i < nr_pages_ret; i++) {
466 WARN_ON(pages[i]->mapping);
467 page_cache_release(pages[i]);
471 total_compressed = 0;
474 /* flag the file so we don't compress in the future */
475 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
480 /* the async work queues will take care of doing actual
481 * allocation on disk for these compressed pages,
482 * and will submit them to the elevator.
484 add_async_extent(async_cow, start, num_bytes,
485 total_compressed, pages, nr_pages_ret);
487 if (start + num_bytes < end && start + num_bytes < actual_end) {
494 cleanup_and_bail_uncompressed:
496 * No compression, but we still need to write the pages in
497 * the file we've been given so far. redirty the locked
498 * page if it corresponds to our extent and set things up
499 * for the async work queue to run cow_file_range to do
500 * the normal delalloc dance
502 if (page_offset(locked_page) >= start &&
503 page_offset(locked_page) <= end) {
504 __set_page_dirty_nobuffers(locked_page);
505 /* unlocked later on in the async handlers */
507 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
515 for (i = 0; i < nr_pages_ret; i++) {
516 WARN_ON(pages[i]->mapping);
517 page_cache_release(pages[i]);
525 * phase two of compressed writeback. This is the ordered portion
526 * of the code, which only gets called in the order the work was
527 * queued. We walk all the async extents created by compress_file_range
528 * and send them down to the disk.
530 static noinline int submit_compressed_extents(struct inode *inode,
531 struct async_cow *async_cow)
533 struct async_extent *async_extent;
535 struct btrfs_trans_handle *trans;
536 struct btrfs_key ins;
537 struct extent_map *em;
538 struct btrfs_root *root = BTRFS_I(inode)->root;
539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
540 struct extent_io_tree *io_tree;
543 if (list_empty(&async_cow->extents))
546 trans = btrfs_join_transaction(root, 1);
548 while (!list_empty(&async_cow->extents)) {
549 async_extent = list_entry(async_cow->extents.next,
550 struct async_extent, list);
551 list_del(&async_extent->list);
553 io_tree = &BTRFS_I(inode)->io_tree;
555 /* did the compression code fall back to uncompressed IO? */
556 if (!async_extent->pages) {
557 int page_started = 0;
558 unsigned long nr_written = 0;
560 lock_extent(io_tree, async_extent->start,
561 async_extent->start +
562 async_extent->ram_size - 1, GFP_NOFS);
564 /* allocate blocks */
565 cow_file_range(inode, async_cow->locked_page,
567 async_extent->start +
568 async_extent->ram_size - 1,
569 &page_started, &nr_written, 0);
572 * if page_started, cow_file_range inserted an
573 * inline extent and took care of all the unlocking
574 * and IO for us. Otherwise, we need to submit
575 * all those pages down to the drive.
578 extent_write_locked_range(io_tree,
579 inode, async_extent->start,
580 async_extent->start +
581 async_extent->ram_size - 1,
589 lock_extent(io_tree, async_extent->start,
590 async_extent->start + async_extent->ram_size - 1,
593 * here we're doing allocation and writeback of the
596 btrfs_drop_extent_cache(inode, async_extent->start,
597 async_extent->start +
598 async_extent->ram_size - 1, 0);
600 ret = btrfs_reserve_extent(trans, root,
601 async_extent->compressed_size,
602 async_extent->compressed_size,
606 em = alloc_extent_map(GFP_NOFS);
607 em->start = async_extent->start;
608 em->len = async_extent->ram_size;
609 em->orig_start = em->start;
611 em->block_start = ins.objectid;
612 em->block_len = ins.offset;
613 em->bdev = root->fs_info->fs_devices->latest_bdev;
614 set_bit(EXTENT_FLAG_PINNED, &em->flags);
615 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
618 write_lock(&em_tree->lock);
619 ret = add_extent_mapping(em_tree, em);
620 write_unlock(&em_tree->lock);
621 if (ret != -EEXIST) {
625 btrfs_drop_extent_cache(inode, async_extent->start,
626 async_extent->start +
627 async_extent->ram_size - 1, 0);
630 ret = btrfs_add_ordered_extent(inode, async_extent->start,
632 async_extent->ram_size,
634 BTRFS_ORDERED_COMPRESSED);
637 btrfs_end_transaction(trans, root);
640 * clear dirty, set writeback and unlock the pages.
642 extent_clear_unlock_delalloc(inode,
643 &BTRFS_I(inode)->io_tree,
645 async_extent->start +
646 async_extent->ram_size - 1,
647 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
648 EXTENT_CLEAR_UNLOCK |
649 EXTENT_CLEAR_DELALLOC |
650 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
652 ret = btrfs_submit_compressed_write(inode,
654 async_extent->ram_size,
656 ins.offset, async_extent->pages,
657 async_extent->nr_pages);
660 trans = btrfs_join_transaction(root, 1);
661 alloc_hint = ins.objectid + ins.offset;
666 btrfs_end_transaction(trans, root);
671 * when extent_io.c finds a delayed allocation range in the file,
672 * the call backs end up in this code. The basic idea is to
673 * allocate extents on disk for the range, and create ordered data structs
674 * in ram to track those extents.
676 * locked_page is the page that writepage had locked already. We use
677 * it to make sure we don't do extra locks or unlocks.
679 * *page_started is set to one if we unlock locked_page and do everything
680 * required to start IO on it. It may be clean and already done with
683 static noinline int cow_file_range(struct inode *inode,
684 struct page *locked_page,
685 u64 start, u64 end, int *page_started,
686 unsigned long *nr_written,
689 struct btrfs_root *root = BTRFS_I(inode)->root;
690 struct btrfs_trans_handle *trans;
693 unsigned long ram_size;
696 u64 blocksize = root->sectorsize;
698 u64 isize = i_size_read(inode);
699 struct btrfs_key ins;
700 struct extent_map *em;
701 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
704 trans = btrfs_join_transaction(root, 1);
706 btrfs_set_trans_block_group(trans, inode);
708 actual_end = min_t(u64, isize, end + 1);
710 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
711 num_bytes = max(blocksize, num_bytes);
712 disk_num_bytes = num_bytes;
716 /* lets try to make an inline extent */
717 ret = cow_file_range_inline(trans, root, inode,
718 start, end, 0, NULL);
720 extent_clear_unlock_delalloc(inode,
721 &BTRFS_I(inode)->io_tree,
723 EXTENT_CLEAR_UNLOCK_PAGE |
724 EXTENT_CLEAR_UNLOCK |
725 EXTENT_CLEAR_DELALLOC |
726 EXTENT_CLEAR_ACCOUNTING |
728 EXTENT_SET_WRITEBACK |
729 EXTENT_END_WRITEBACK);
730 *nr_written = *nr_written +
731 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
738 BUG_ON(disk_num_bytes >
739 btrfs_super_total_bytes(&root->fs_info->super_copy));
742 read_lock(&BTRFS_I(inode)->extent_tree.lock);
743 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
746 alloc_hint = em->block_start;
749 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
750 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
752 while (disk_num_bytes > 0) {
755 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
756 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
757 root->sectorsize, 0, alloc_hint,
761 em = alloc_extent_map(GFP_NOFS);
763 em->orig_start = em->start;
764 ram_size = ins.offset;
765 em->len = ins.offset;
767 em->block_start = ins.objectid;
768 em->block_len = ins.offset;
769 em->bdev = root->fs_info->fs_devices->latest_bdev;
770 set_bit(EXTENT_FLAG_PINNED, &em->flags);
773 write_lock(&em_tree->lock);
774 ret = add_extent_mapping(em_tree, em);
775 write_unlock(&em_tree->lock);
776 if (ret != -EEXIST) {
780 btrfs_drop_extent_cache(inode, start,
781 start + ram_size - 1, 0);
784 cur_alloc_size = ins.offset;
785 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
786 ram_size, cur_alloc_size, 0);
789 if (root->root_key.objectid ==
790 BTRFS_DATA_RELOC_TREE_OBJECTID) {
791 ret = btrfs_reloc_clone_csums(inode, start,
796 if (disk_num_bytes < cur_alloc_size)
799 /* we're not doing compressed IO, don't unlock the first
800 * page (which the caller expects to stay locked), don't
801 * clear any dirty bits and don't set any writeback bits
803 * Do set the Private2 bit so we know this page was properly
804 * setup for writepage
806 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
807 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
810 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
811 start, start + ram_size - 1,
813 disk_num_bytes -= cur_alloc_size;
814 num_bytes -= cur_alloc_size;
815 alloc_hint = ins.objectid + ins.offset;
816 start += cur_alloc_size;
820 btrfs_end_transaction(trans, root);
826 * work queue call back to started compression on a file and pages
828 static noinline void async_cow_start(struct btrfs_work *work)
830 struct async_cow *async_cow;
832 async_cow = container_of(work, struct async_cow, work);
834 compress_file_range(async_cow->inode, async_cow->locked_page,
835 async_cow->start, async_cow->end, async_cow,
838 async_cow->inode = NULL;
842 * work queue call back to submit previously compressed pages
844 static noinline void async_cow_submit(struct btrfs_work *work)
846 struct async_cow *async_cow;
847 struct btrfs_root *root;
848 unsigned long nr_pages;
850 async_cow = container_of(work, struct async_cow, work);
852 root = async_cow->root;
853 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
856 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
858 if (atomic_read(&root->fs_info->async_delalloc_pages) <
860 waitqueue_active(&root->fs_info->async_submit_wait))
861 wake_up(&root->fs_info->async_submit_wait);
863 if (async_cow->inode)
864 submit_compressed_extents(async_cow->inode, async_cow);
867 static noinline void async_cow_free(struct btrfs_work *work)
869 struct async_cow *async_cow;
870 async_cow = container_of(work, struct async_cow, work);
874 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
875 u64 start, u64 end, int *page_started,
876 unsigned long *nr_written)
878 struct async_cow *async_cow;
879 struct btrfs_root *root = BTRFS_I(inode)->root;
880 unsigned long nr_pages;
882 int limit = 10 * 1024 * 1042;
884 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
885 1, 0, NULL, GFP_NOFS);
886 while (start < end) {
887 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
888 async_cow->inode = inode;
889 async_cow->root = root;
890 async_cow->locked_page = locked_page;
891 async_cow->start = start;
893 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
896 cur_end = min(end, start + 512 * 1024 - 1);
898 async_cow->end = cur_end;
899 INIT_LIST_HEAD(&async_cow->extents);
901 async_cow->work.func = async_cow_start;
902 async_cow->work.ordered_func = async_cow_submit;
903 async_cow->work.ordered_free = async_cow_free;
904 async_cow->work.flags = 0;
906 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
908 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
910 btrfs_queue_worker(&root->fs_info->delalloc_workers,
913 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
914 wait_event(root->fs_info->async_submit_wait,
915 (atomic_read(&root->fs_info->async_delalloc_pages) <
919 while (atomic_read(&root->fs_info->async_submit_draining) &&
920 atomic_read(&root->fs_info->async_delalloc_pages)) {
921 wait_event(root->fs_info->async_submit_wait,
922 (atomic_read(&root->fs_info->async_delalloc_pages) ==
926 *nr_written += nr_pages;
933 static noinline int csum_exist_in_range(struct btrfs_root *root,
934 u64 bytenr, u64 num_bytes)
937 struct btrfs_ordered_sum *sums;
940 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
941 bytenr + num_bytes - 1, &list);
942 if (ret == 0 && list_empty(&list))
945 while (!list_empty(&list)) {
946 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
947 list_del(&sums->list);
954 * when nowcow writeback call back. This checks for snapshots or COW copies
955 * of the extents that exist in the file, and COWs the file as required.
957 * If no cow copies or snapshots exist, we write directly to the existing
960 static noinline int run_delalloc_nocow(struct inode *inode,
961 struct page *locked_page,
962 u64 start, u64 end, int *page_started, int force,
963 unsigned long *nr_written)
965 struct btrfs_root *root = BTRFS_I(inode)->root;
966 struct btrfs_trans_handle *trans;
967 struct extent_buffer *leaf;
968 struct btrfs_path *path;
969 struct btrfs_file_extent_item *fi;
970 struct btrfs_key found_key;
983 path = btrfs_alloc_path();
985 trans = btrfs_join_transaction(root, 1);
991 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
994 if (ret > 0 && path->slots[0] > 0 && check_prev) {
995 leaf = path->nodes[0];
996 btrfs_item_key_to_cpu(leaf, &found_key,
998 if (found_key.objectid == inode->i_ino &&
999 found_key.type == BTRFS_EXTENT_DATA_KEY)
1004 leaf = path->nodes[0];
1005 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006 ret = btrfs_next_leaf(root, path);
1011 leaf = path->nodes[0];
1017 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1019 if (found_key.objectid > inode->i_ino ||
1020 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1021 found_key.offset > end)
1024 if (found_key.offset > cur_offset) {
1025 extent_end = found_key.offset;
1030 fi = btrfs_item_ptr(leaf, path->slots[0],
1031 struct btrfs_file_extent_item);
1032 extent_type = btrfs_file_extent_type(leaf, fi);
1034 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1035 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1036 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1037 extent_offset = btrfs_file_extent_offset(leaf, fi);
1038 extent_end = found_key.offset +
1039 btrfs_file_extent_num_bytes(leaf, fi);
1040 if (extent_end <= start) {
1044 if (disk_bytenr == 0)
1046 if (btrfs_file_extent_compression(leaf, fi) ||
1047 btrfs_file_extent_encryption(leaf, fi) ||
1048 btrfs_file_extent_other_encoding(leaf, fi))
1050 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1052 if (btrfs_extent_readonly(root, disk_bytenr))
1054 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1056 extent_offset, disk_bytenr))
1058 disk_bytenr += extent_offset;
1059 disk_bytenr += cur_offset - found_key.offset;
1060 num_bytes = min(end + 1, extent_end) - cur_offset;
1062 * force cow if csum exists in the range.
1063 * this ensure that csum for a given extent are
1064 * either valid or do not exist.
1066 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1069 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1070 extent_end = found_key.offset +
1071 btrfs_file_extent_inline_len(leaf, fi);
1072 extent_end = ALIGN(extent_end, root->sectorsize);
1077 if (extent_end <= start) {
1082 if (cow_start == (u64)-1)
1083 cow_start = cur_offset;
1084 cur_offset = extent_end;
1085 if (cur_offset > end)
1091 btrfs_release_path(root, path);
1092 if (cow_start != (u64)-1) {
1093 ret = cow_file_range(inode, locked_page, cow_start,
1094 found_key.offset - 1, page_started,
1097 cow_start = (u64)-1;
1100 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1101 struct extent_map *em;
1102 struct extent_map_tree *em_tree;
1103 em_tree = &BTRFS_I(inode)->extent_tree;
1104 em = alloc_extent_map(GFP_NOFS);
1105 em->start = cur_offset;
1106 em->orig_start = em->start;
1107 em->len = num_bytes;
1108 em->block_len = num_bytes;
1109 em->block_start = disk_bytenr;
1110 em->bdev = root->fs_info->fs_devices->latest_bdev;
1111 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1113 write_lock(&em_tree->lock);
1114 ret = add_extent_mapping(em_tree, em);
1115 write_unlock(&em_tree->lock);
1116 if (ret != -EEXIST) {
1117 free_extent_map(em);
1120 btrfs_drop_extent_cache(inode, em->start,
1121 em->start + em->len - 1, 0);
1123 type = BTRFS_ORDERED_PREALLOC;
1125 type = BTRFS_ORDERED_NOCOW;
1128 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1129 num_bytes, num_bytes, type);
1132 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1133 cur_offset, cur_offset + num_bytes - 1,
1134 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1135 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1136 EXTENT_SET_PRIVATE2);
1137 cur_offset = extent_end;
1138 if (cur_offset > end)
1141 btrfs_release_path(root, path);
1143 if (cur_offset <= end && cow_start == (u64)-1)
1144 cow_start = cur_offset;
1145 if (cow_start != (u64)-1) {
1146 ret = cow_file_range(inode, locked_page, cow_start, end,
1147 page_started, nr_written, 1);
1151 ret = btrfs_end_transaction(trans, root);
1153 btrfs_free_path(path);
1158 * extent_io.c call back to do delayed allocation processing
1160 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1161 u64 start, u64 end, int *page_started,
1162 unsigned long *nr_written)
1165 struct btrfs_root *root = BTRFS_I(inode)->root;
1167 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1168 ret = run_delalloc_nocow(inode, locked_page, start, end,
1169 page_started, 1, nr_written);
1170 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1171 ret = run_delalloc_nocow(inode, locked_page, start, end,
1172 page_started, 0, nr_written);
1173 else if (!btrfs_test_opt(root, COMPRESS))
1174 ret = cow_file_range(inode, locked_page, start, end,
1175 page_started, nr_written, 1);
1177 ret = cow_file_range_async(inode, locked_page, start, end,
1178 page_started, nr_written);
1182 static int btrfs_split_extent_hook(struct inode *inode,
1183 struct extent_state *orig, u64 split)
1185 struct btrfs_root *root = BTRFS_I(inode)->root;
1188 if (!(orig->state & EXTENT_DELALLOC))
1191 size = orig->end - orig->start + 1;
1192 if (size > root->fs_info->max_extent) {
1196 new_size = orig->end - split + 1;
1197 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1198 root->fs_info->max_extent);
1201 * if we break a large extent up then leave oustanding_extents
1202 * be, since we've already accounted for the large extent.
1204 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1205 root->fs_info->max_extent) < num_extents)
1209 spin_lock(&BTRFS_I(inode)->accounting_lock);
1210 BTRFS_I(inode)->outstanding_extents++;
1211 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1217 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1218 * extents so we can keep track of new extents that are just merged onto old
1219 * extents, such as when we are doing sequential writes, so we can properly
1220 * account for the metadata space we'll need.
1222 static int btrfs_merge_extent_hook(struct inode *inode,
1223 struct extent_state *new,
1224 struct extent_state *other)
1226 struct btrfs_root *root = BTRFS_I(inode)->root;
1227 u64 new_size, old_size;
1230 /* not delalloc, ignore it */
1231 if (!(other->state & EXTENT_DELALLOC))
1234 old_size = other->end - other->start + 1;
1235 if (new->start < other->start)
1236 new_size = other->end - new->start + 1;
1238 new_size = new->end - other->start + 1;
1240 /* we're not bigger than the max, unreserve the space and go */
1241 if (new_size <= root->fs_info->max_extent) {
1242 spin_lock(&BTRFS_I(inode)->accounting_lock);
1243 BTRFS_I(inode)->outstanding_extents--;
1244 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1249 * If we grew by another max_extent, just return, we want to keep that
1252 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1253 root->fs_info->max_extent);
1254 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1255 root->fs_info->max_extent) > num_extents)
1258 spin_lock(&BTRFS_I(inode)->accounting_lock);
1259 BTRFS_I(inode)->outstanding_extents--;
1260 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1266 * extent_io.c set_bit_hook, used to track delayed allocation
1267 * bytes in this file, and to maintain the list of inodes that
1268 * have pending delalloc work to be done.
1270 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1271 unsigned long old, unsigned long bits)
1275 * set_bit and clear bit hooks normally require _irqsave/restore
1276 * but in this case, we are only testeing for the DELALLOC
1277 * bit, which is only set or cleared with irqs on
1279 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1280 struct btrfs_root *root = BTRFS_I(inode)->root;
1282 spin_lock(&BTRFS_I(inode)->accounting_lock);
1283 BTRFS_I(inode)->outstanding_extents++;
1284 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1285 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1286 spin_lock(&root->fs_info->delalloc_lock);
1287 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1288 root->fs_info->delalloc_bytes += end - start + 1;
1289 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1290 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1291 &root->fs_info->delalloc_inodes);
1293 spin_unlock(&root->fs_info->delalloc_lock);
1299 * extent_io.c clear_bit_hook, see set_bit_hook for why
1301 static int btrfs_clear_bit_hook(struct inode *inode,
1302 struct extent_state *state, unsigned long bits)
1305 * set_bit and clear bit hooks normally require _irqsave/restore
1306 * but in this case, we are only testeing for the DELALLOC
1307 * bit, which is only set or cleared with irqs on
1309 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1310 struct btrfs_root *root = BTRFS_I(inode)->root;
1312 if (bits & EXTENT_DO_ACCOUNTING) {
1313 spin_lock(&BTRFS_I(inode)->accounting_lock);
1314 BTRFS_I(inode)->outstanding_extents--;
1315 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1316 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1319 spin_lock(&root->fs_info->delalloc_lock);
1320 if (state->end - state->start + 1 >
1321 root->fs_info->delalloc_bytes) {
1322 printk(KERN_INFO "btrfs warning: delalloc account "
1324 (unsigned long long)
1325 state->end - state->start + 1,
1326 (unsigned long long)
1327 root->fs_info->delalloc_bytes);
1328 btrfs_delalloc_free_space(root, inode, (u64)-1);
1329 root->fs_info->delalloc_bytes = 0;
1330 BTRFS_I(inode)->delalloc_bytes = 0;
1332 btrfs_delalloc_free_space(root, inode,
1335 root->fs_info->delalloc_bytes -= state->end -
1337 BTRFS_I(inode)->delalloc_bytes -= state->end -
1340 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1341 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1342 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1344 spin_unlock(&root->fs_info->delalloc_lock);
1350 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1351 * we don't create bios that span stripes or chunks
1353 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1354 size_t size, struct bio *bio,
1355 unsigned long bio_flags)
1357 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1358 struct btrfs_mapping_tree *map_tree;
1359 u64 logical = (u64)bio->bi_sector << 9;
1364 if (bio_flags & EXTENT_BIO_COMPRESSED)
1367 length = bio->bi_size;
1368 map_tree = &root->fs_info->mapping_tree;
1369 map_length = length;
1370 ret = btrfs_map_block(map_tree, READ, logical,
1371 &map_length, NULL, 0);
1373 if (map_length < length + size)
1379 * in order to insert checksums into the metadata in large chunks,
1380 * we wait until bio submission time. All the pages in the bio are
1381 * checksummed and sums are attached onto the ordered extent record.
1383 * At IO completion time the cums attached on the ordered extent record
1384 * are inserted into the btree
1386 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1387 struct bio *bio, int mirror_num,
1388 unsigned long bio_flags)
1390 struct btrfs_root *root = BTRFS_I(inode)->root;
1393 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1399 * in order to insert checksums into the metadata in large chunks,
1400 * we wait until bio submission time. All the pages in the bio are
1401 * checksummed and sums are attached onto the ordered extent record.
1403 * At IO completion time the cums attached on the ordered extent record
1404 * are inserted into the btree
1406 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1407 int mirror_num, unsigned long bio_flags)
1409 struct btrfs_root *root = BTRFS_I(inode)->root;
1410 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1414 * extent_io.c submission hook. This does the right thing for csum calculation
1415 * on write, or reading the csums from the tree before a read
1417 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1418 int mirror_num, unsigned long bio_flags)
1420 struct btrfs_root *root = BTRFS_I(inode)->root;
1424 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1426 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1429 if (!(rw & (1 << BIO_RW))) {
1430 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1431 return btrfs_submit_compressed_read(inode, bio,
1432 mirror_num, bio_flags);
1433 } else if (!skip_sum)
1434 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1436 } else if (!skip_sum) {
1437 /* csum items have already been cloned */
1438 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1440 /* we're doing a write, do the async checksumming */
1441 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1442 inode, rw, bio, mirror_num,
1443 bio_flags, __btrfs_submit_bio_start,
1444 __btrfs_submit_bio_done);
1448 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1452 * given a list of ordered sums record them in the inode. This happens
1453 * at IO completion time based on sums calculated at bio submission time.
1455 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1456 struct inode *inode, u64 file_offset,
1457 struct list_head *list)
1459 struct btrfs_ordered_sum *sum;
1461 btrfs_set_trans_block_group(trans, inode);
1463 list_for_each_entry(sum, list, list) {
1464 btrfs_csum_file_blocks(trans,
1465 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1470 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1472 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1474 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1478 /* see btrfs_writepage_start_hook for details on why this is required */
1479 struct btrfs_writepage_fixup {
1481 struct btrfs_work work;
1484 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1486 struct btrfs_writepage_fixup *fixup;
1487 struct btrfs_ordered_extent *ordered;
1489 struct inode *inode;
1493 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1497 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1498 ClearPageChecked(page);
1502 inode = page->mapping->host;
1503 page_start = page_offset(page);
1504 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1506 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1508 /* already ordered? We're done */
1509 if (PagePrivate2(page))
1512 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1514 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1515 page_end, GFP_NOFS);
1517 btrfs_start_ordered_extent(inode, ordered, 1);
1521 btrfs_set_extent_delalloc(inode, page_start, page_end);
1522 ClearPageChecked(page);
1524 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1527 page_cache_release(page);
1531 * There are a few paths in the higher layers of the kernel that directly
1532 * set the page dirty bit without asking the filesystem if it is a
1533 * good idea. This causes problems because we want to make sure COW
1534 * properly happens and the data=ordered rules are followed.
1536 * In our case any range that doesn't have the ORDERED bit set
1537 * hasn't been properly setup for IO. We kick off an async process
1538 * to fix it up. The async helper will wait for ordered extents, set
1539 * the delalloc bit and make it safe to write the page.
1541 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1543 struct inode *inode = page->mapping->host;
1544 struct btrfs_writepage_fixup *fixup;
1545 struct btrfs_root *root = BTRFS_I(inode)->root;
1547 /* this page is properly in the ordered list */
1548 if (TestClearPagePrivate2(page))
1551 if (PageChecked(page))
1554 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1558 SetPageChecked(page);
1559 page_cache_get(page);
1560 fixup->work.func = btrfs_writepage_fixup_worker;
1562 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1566 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1567 struct inode *inode, u64 file_pos,
1568 u64 disk_bytenr, u64 disk_num_bytes,
1569 u64 num_bytes, u64 ram_bytes,
1571 u8 compression, u8 encryption,
1572 u16 other_encoding, int extent_type)
1574 struct btrfs_root *root = BTRFS_I(inode)->root;
1575 struct btrfs_file_extent_item *fi;
1576 struct btrfs_path *path;
1577 struct extent_buffer *leaf;
1578 struct btrfs_key ins;
1582 path = btrfs_alloc_path();
1585 path->leave_spinning = 1;
1588 * we may be replacing one extent in the tree with another.
1589 * The new extent is pinned in the extent map, and we don't want
1590 * to drop it from the cache until it is completely in the btree.
1592 * So, tell btrfs_drop_extents to leave this extent in the cache.
1593 * the caller is expected to unpin it and allow it to be merged
1596 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1597 file_pos + num_bytes, locked_end,
1598 file_pos, &hint, 0);
1601 ins.objectid = inode->i_ino;
1602 ins.offset = file_pos;
1603 ins.type = BTRFS_EXTENT_DATA_KEY;
1604 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1606 leaf = path->nodes[0];
1607 fi = btrfs_item_ptr(leaf, path->slots[0],
1608 struct btrfs_file_extent_item);
1609 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1610 btrfs_set_file_extent_type(leaf, fi, extent_type);
1611 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1612 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1613 btrfs_set_file_extent_offset(leaf, fi, 0);
1614 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1615 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1616 btrfs_set_file_extent_compression(leaf, fi, compression);
1617 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1618 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1620 btrfs_unlock_up_safe(path, 1);
1621 btrfs_set_lock_blocking(leaf);
1623 btrfs_mark_buffer_dirty(leaf);
1625 inode_add_bytes(inode, num_bytes);
1627 ins.objectid = disk_bytenr;
1628 ins.offset = disk_num_bytes;
1629 ins.type = BTRFS_EXTENT_ITEM_KEY;
1630 ret = btrfs_alloc_reserved_file_extent(trans, root,
1631 root->root_key.objectid,
1632 inode->i_ino, file_pos, &ins);
1634 btrfs_free_path(path);
1640 * helper function for btrfs_finish_ordered_io, this
1641 * just reads in some of the csum leaves to prime them into ram
1642 * before we start the transaction. It limits the amount of btree
1643 * reads required while inside the transaction.
1645 static noinline void reada_csum(struct btrfs_root *root,
1646 struct btrfs_path *path,
1647 struct btrfs_ordered_extent *ordered_extent)
1649 struct btrfs_ordered_sum *sum;
1652 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1654 bytenr = sum->sums[0].bytenr;
1657 * we don't care about the results, the point of this search is
1658 * just to get the btree leaves into ram
1660 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1663 /* as ordered data IO finishes, this gets called so we can finish
1664 * an ordered extent if the range of bytes in the file it covers are
1667 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1669 struct btrfs_root *root = BTRFS_I(inode)->root;
1670 struct btrfs_trans_handle *trans;
1671 struct btrfs_ordered_extent *ordered_extent = NULL;
1672 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1673 struct btrfs_path *path;
1677 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1682 * before we join the transaction, try to do some of our IO.
1683 * This will limit the amount of IO that we have to do with
1684 * the transaction running. We're unlikely to need to do any
1685 * IO if the file extents are new, the disk_i_size checks
1686 * covers the most common case.
1688 if (start < BTRFS_I(inode)->disk_i_size) {
1689 path = btrfs_alloc_path();
1691 ret = btrfs_lookup_file_extent(NULL, root, path,
1694 ordered_extent = btrfs_lookup_ordered_extent(inode,
1696 if (!list_empty(&ordered_extent->list)) {
1697 btrfs_release_path(root, path);
1698 reada_csum(root, path, ordered_extent);
1700 btrfs_free_path(path);
1704 trans = btrfs_join_transaction(root, 1);
1706 if (!ordered_extent)
1707 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1708 BUG_ON(!ordered_extent);
1709 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1712 lock_extent(io_tree, ordered_extent->file_offset,
1713 ordered_extent->file_offset + ordered_extent->len - 1,
1716 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1718 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1720 ret = btrfs_mark_extent_written(trans, root, inode,
1721 ordered_extent->file_offset,
1722 ordered_extent->file_offset +
1723 ordered_extent->len);
1726 ret = insert_reserved_file_extent(trans, inode,
1727 ordered_extent->file_offset,
1728 ordered_extent->start,
1729 ordered_extent->disk_len,
1730 ordered_extent->len,
1731 ordered_extent->len,
1732 ordered_extent->file_offset +
1733 ordered_extent->len,
1735 BTRFS_FILE_EXTENT_REG);
1736 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1737 ordered_extent->file_offset,
1738 ordered_extent->len);
1741 unlock_extent(io_tree, ordered_extent->file_offset,
1742 ordered_extent->file_offset + ordered_extent->len - 1,
1745 add_pending_csums(trans, inode, ordered_extent->file_offset,
1746 &ordered_extent->list);
1748 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1749 btrfs_ordered_update_i_size(inode, ordered_extent);
1750 btrfs_update_inode(trans, root, inode);
1751 btrfs_remove_ordered_extent(inode, ordered_extent);
1752 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1755 btrfs_put_ordered_extent(ordered_extent);
1756 /* once for the tree */
1757 btrfs_put_ordered_extent(ordered_extent);
1759 btrfs_end_transaction(trans, root);
1763 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1764 struct extent_state *state, int uptodate)
1766 ClearPagePrivate2(page);
1767 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1771 * When IO fails, either with EIO or csum verification fails, we
1772 * try other mirrors that might have a good copy of the data. This
1773 * io_failure_record is used to record state as we go through all the
1774 * mirrors. If another mirror has good data, the page is set up to date
1775 * and things continue. If a good mirror can't be found, the original
1776 * bio end_io callback is called to indicate things have failed.
1778 struct io_failure_record {
1783 unsigned long bio_flags;
1787 static int btrfs_io_failed_hook(struct bio *failed_bio,
1788 struct page *page, u64 start, u64 end,
1789 struct extent_state *state)
1791 struct io_failure_record *failrec = NULL;
1793 struct extent_map *em;
1794 struct inode *inode = page->mapping->host;
1795 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1796 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1803 ret = get_state_private(failure_tree, start, &private);
1805 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1808 failrec->start = start;
1809 failrec->len = end - start + 1;
1810 failrec->last_mirror = 0;
1811 failrec->bio_flags = 0;
1813 read_lock(&em_tree->lock);
1814 em = lookup_extent_mapping(em_tree, start, failrec->len);
1815 if (em->start > start || em->start + em->len < start) {
1816 free_extent_map(em);
1819 read_unlock(&em_tree->lock);
1821 if (!em || IS_ERR(em)) {
1825 logical = start - em->start;
1826 logical = em->block_start + logical;
1827 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1828 logical = em->block_start;
1829 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1831 failrec->logical = logical;
1832 free_extent_map(em);
1833 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1834 EXTENT_DIRTY, GFP_NOFS);
1835 set_state_private(failure_tree, start,
1836 (u64)(unsigned long)failrec);
1838 failrec = (struct io_failure_record *)(unsigned long)private;
1840 num_copies = btrfs_num_copies(
1841 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1842 failrec->logical, failrec->len);
1843 failrec->last_mirror++;
1845 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1846 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1849 if (state && state->start != failrec->start)
1851 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1853 if (!state || failrec->last_mirror > num_copies) {
1854 set_state_private(failure_tree, failrec->start, 0);
1855 clear_extent_bits(failure_tree, failrec->start,
1856 failrec->start + failrec->len - 1,
1857 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1861 bio = bio_alloc(GFP_NOFS, 1);
1862 bio->bi_private = state;
1863 bio->bi_end_io = failed_bio->bi_end_io;
1864 bio->bi_sector = failrec->logical >> 9;
1865 bio->bi_bdev = failed_bio->bi_bdev;
1868 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1869 if (failed_bio->bi_rw & (1 << BIO_RW))
1874 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1875 failrec->last_mirror,
1876 failrec->bio_flags);
1881 * each time an IO finishes, we do a fast check in the IO failure tree
1882 * to see if we need to process or clean up an io_failure_record
1884 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1887 u64 private_failure;
1888 struct io_failure_record *failure;
1892 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1893 (u64)-1, 1, EXTENT_DIRTY)) {
1894 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1895 start, &private_failure);
1897 failure = (struct io_failure_record *)(unsigned long)
1899 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1901 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1903 failure->start + failure->len - 1,
1904 EXTENT_DIRTY | EXTENT_LOCKED,
1913 * when reads are done, we need to check csums to verify the data is correct
1914 * if there's a match, we allow the bio to finish. If not, we go through
1915 * the io_failure_record routines to find good copies
1917 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1918 struct extent_state *state)
1920 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1921 struct inode *inode = page->mapping->host;
1922 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1924 u64 private = ~(u32)0;
1926 struct btrfs_root *root = BTRFS_I(inode)->root;
1929 if (PageChecked(page)) {
1930 ClearPageChecked(page);
1934 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1937 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1938 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1939 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1944 if (state && state->start == start) {
1945 private = state->private;
1948 ret = get_state_private(io_tree, start, &private);
1950 kaddr = kmap_atomic(page, KM_USER0);
1954 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1955 btrfs_csum_final(csum, (char *)&csum);
1956 if (csum != private)
1959 kunmap_atomic(kaddr, KM_USER0);
1961 /* if the io failure tree for this inode is non-empty,
1962 * check to see if we've recovered from a failed IO
1964 btrfs_clean_io_failures(inode, start);
1968 if (printk_ratelimit()) {
1969 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1970 "private %llu\n", page->mapping->host->i_ino,
1971 (unsigned long long)start, csum,
1972 (unsigned long long)private);
1974 memset(kaddr + offset, 1, end - start + 1);
1975 flush_dcache_page(page);
1976 kunmap_atomic(kaddr, KM_USER0);
1983 * This creates an orphan entry for the given inode in case something goes
1984 * wrong in the middle of an unlink/truncate.
1986 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1988 struct btrfs_root *root = BTRFS_I(inode)->root;
1991 spin_lock(&root->list_lock);
1993 /* already on the orphan list, we're good */
1994 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1995 spin_unlock(&root->list_lock);
1999 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2001 spin_unlock(&root->list_lock);
2004 * insert an orphan item to track this unlinked/truncated file
2006 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2012 * We have done the truncate/delete so we can go ahead and remove the orphan
2013 * item for this particular inode.
2015 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2017 struct btrfs_root *root = BTRFS_I(inode)->root;
2020 spin_lock(&root->list_lock);
2022 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2023 spin_unlock(&root->list_lock);
2027 list_del_init(&BTRFS_I(inode)->i_orphan);
2029 spin_unlock(&root->list_lock);
2033 spin_unlock(&root->list_lock);
2035 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2041 * this cleans up any orphans that may be left on the list from the last use
2044 void btrfs_orphan_cleanup(struct btrfs_root *root)
2046 struct btrfs_path *path;
2047 struct extent_buffer *leaf;
2048 struct btrfs_item *item;
2049 struct btrfs_key key, found_key;
2050 struct btrfs_trans_handle *trans;
2051 struct inode *inode;
2052 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2054 path = btrfs_alloc_path();
2059 key.objectid = BTRFS_ORPHAN_OBJECTID;
2060 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2061 key.offset = (u64)-1;
2065 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2067 printk(KERN_ERR "Error searching slot for orphan: %d"
2073 * if ret == 0 means we found what we were searching for, which
2074 * is weird, but possible, so only screw with path if we didnt
2075 * find the key and see if we have stuff that matches
2078 if (path->slots[0] == 0)
2083 /* pull out the item */
2084 leaf = path->nodes[0];
2085 item = btrfs_item_nr(leaf, path->slots[0]);
2086 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2088 /* make sure the item matches what we want */
2089 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2091 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2094 /* release the path since we're done with it */
2095 btrfs_release_path(root, path);
2098 * this is where we are basically btrfs_lookup, without the
2099 * crossing root thing. we store the inode number in the
2100 * offset of the orphan item.
2102 found_key.objectid = found_key.offset;
2103 found_key.type = BTRFS_INODE_ITEM_KEY;
2104 found_key.offset = 0;
2105 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
2110 * add this inode to the orphan list so btrfs_orphan_del does
2111 * the proper thing when we hit it
2113 spin_lock(&root->list_lock);
2114 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2115 spin_unlock(&root->list_lock);
2118 * if this is a bad inode, means we actually succeeded in
2119 * removing the inode, but not the orphan record, which means
2120 * we need to manually delete the orphan since iput will just
2121 * do a destroy_inode
2123 if (is_bad_inode(inode)) {
2124 trans = btrfs_start_transaction(root, 1);
2125 btrfs_orphan_del(trans, inode);
2126 btrfs_end_transaction(trans, root);
2131 /* if we have links, this was a truncate, lets do that */
2132 if (inode->i_nlink) {
2134 btrfs_truncate(inode);
2139 /* this will do delete_inode and everything for us */
2144 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2146 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2148 btrfs_free_path(path);
2152 * very simple check to peek ahead in the leaf looking for xattrs. If we
2153 * don't find any xattrs, we know there can't be any acls.
2155 * slot is the slot the inode is in, objectid is the objectid of the inode
2157 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2158 int slot, u64 objectid)
2160 u32 nritems = btrfs_header_nritems(leaf);
2161 struct btrfs_key found_key;
2165 while (slot < nritems) {
2166 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2168 /* we found a different objectid, there must not be acls */
2169 if (found_key.objectid != objectid)
2172 /* we found an xattr, assume we've got an acl */
2173 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2177 * we found a key greater than an xattr key, there can't
2178 * be any acls later on
2180 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2187 * it goes inode, inode backrefs, xattrs, extents,
2188 * so if there are a ton of hard links to an inode there can
2189 * be a lot of backrefs. Don't waste time searching too hard,
2190 * this is just an optimization
2195 /* we hit the end of the leaf before we found an xattr or
2196 * something larger than an xattr. We have to assume the inode
2203 * read an inode from the btree into the in-memory inode
2205 static void btrfs_read_locked_inode(struct inode *inode)
2207 struct btrfs_path *path;
2208 struct extent_buffer *leaf;
2209 struct btrfs_inode_item *inode_item;
2210 struct btrfs_timespec *tspec;
2211 struct btrfs_root *root = BTRFS_I(inode)->root;
2212 struct btrfs_key location;
2214 u64 alloc_group_block;
2218 path = btrfs_alloc_path();
2220 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2222 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2226 leaf = path->nodes[0];
2227 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2228 struct btrfs_inode_item);
2230 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2231 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2232 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2233 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2234 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2236 tspec = btrfs_inode_atime(inode_item);
2237 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2238 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2240 tspec = btrfs_inode_mtime(inode_item);
2241 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2242 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2244 tspec = btrfs_inode_ctime(inode_item);
2245 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2246 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2248 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2249 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2250 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2251 inode->i_generation = BTRFS_I(inode)->generation;
2253 rdev = btrfs_inode_rdev(leaf, inode_item);
2255 BTRFS_I(inode)->index_cnt = (u64)-1;
2256 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2258 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2261 * try to precache a NULL acl entry for files that don't have
2262 * any xattrs or acls
2264 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2266 cache_no_acl(inode);
2268 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2269 alloc_group_block, 0);
2270 btrfs_free_path(path);
2273 switch (inode->i_mode & S_IFMT) {
2275 inode->i_mapping->a_ops = &btrfs_aops;
2276 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2277 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2278 inode->i_fop = &btrfs_file_operations;
2279 inode->i_op = &btrfs_file_inode_operations;
2282 inode->i_fop = &btrfs_dir_file_operations;
2283 if (root == root->fs_info->tree_root)
2284 inode->i_op = &btrfs_dir_ro_inode_operations;
2286 inode->i_op = &btrfs_dir_inode_operations;
2289 inode->i_op = &btrfs_symlink_inode_operations;
2290 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2291 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2294 inode->i_op = &btrfs_special_inode_operations;
2295 init_special_inode(inode, inode->i_mode, rdev);
2299 btrfs_update_iflags(inode);
2303 btrfs_free_path(path);
2304 make_bad_inode(inode);
2308 * given a leaf and an inode, copy the inode fields into the leaf
2310 static void fill_inode_item(struct btrfs_trans_handle *trans,
2311 struct extent_buffer *leaf,
2312 struct btrfs_inode_item *item,
2313 struct inode *inode)
2315 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2316 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2317 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2318 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2319 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2321 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2322 inode->i_atime.tv_sec);
2323 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2324 inode->i_atime.tv_nsec);
2326 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2327 inode->i_mtime.tv_sec);
2328 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2329 inode->i_mtime.tv_nsec);
2331 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2332 inode->i_ctime.tv_sec);
2333 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2334 inode->i_ctime.tv_nsec);
2336 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2337 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2338 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2339 btrfs_set_inode_transid(leaf, item, trans->transid);
2340 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2341 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2342 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2346 * copy everything in the in-memory inode into the btree.
2348 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2349 struct btrfs_root *root, struct inode *inode)
2351 struct btrfs_inode_item *inode_item;
2352 struct btrfs_path *path;
2353 struct extent_buffer *leaf;
2356 path = btrfs_alloc_path();
2358 path->leave_spinning = 1;
2359 ret = btrfs_lookup_inode(trans, root, path,
2360 &BTRFS_I(inode)->location, 1);
2367 btrfs_unlock_up_safe(path, 1);
2368 leaf = path->nodes[0];
2369 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2370 struct btrfs_inode_item);
2372 fill_inode_item(trans, leaf, inode_item, inode);
2373 btrfs_mark_buffer_dirty(leaf);
2374 btrfs_set_inode_last_trans(trans, inode);
2377 btrfs_free_path(path);
2383 * unlink helper that gets used here in inode.c and in the tree logging
2384 * recovery code. It remove a link in a directory with a given name, and
2385 * also drops the back refs in the inode to the directory
2387 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2388 struct btrfs_root *root,
2389 struct inode *dir, struct inode *inode,
2390 const char *name, int name_len)
2392 struct btrfs_path *path;
2394 struct extent_buffer *leaf;
2395 struct btrfs_dir_item *di;
2396 struct btrfs_key key;
2399 path = btrfs_alloc_path();
2405 path->leave_spinning = 1;
2406 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2407 name, name_len, -1);
2416 leaf = path->nodes[0];
2417 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2418 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2421 btrfs_release_path(root, path);
2423 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2425 dir->i_ino, &index);
2427 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2428 "inode %lu parent %lu\n", name_len, name,
2429 inode->i_ino, dir->i_ino);
2433 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2434 index, name, name_len, -1);
2443 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2444 btrfs_release_path(root, path);
2446 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2448 BUG_ON(ret != 0 && ret != -ENOENT);
2450 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2454 btrfs_free_path(path);
2458 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2459 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2460 btrfs_update_inode(trans, root, dir);
2461 btrfs_drop_nlink(inode);
2462 ret = btrfs_update_inode(trans, root, inode);
2467 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2469 struct btrfs_root *root;
2470 struct btrfs_trans_handle *trans;
2471 struct inode *inode = dentry->d_inode;
2473 unsigned long nr = 0;
2475 root = BTRFS_I(dir)->root;
2477 trans = btrfs_start_transaction(root, 1);
2479 btrfs_set_trans_block_group(trans, dir);
2481 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2483 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2484 dentry->d_name.name, dentry->d_name.len);
2486 if (inode->i_nlink == 0)
2487 ret = btrfs_orphan_add(trans, inode);
2489 nr = trans->blocks_used;
2491 btrfs_end_transaction_throttle(trans, root);
2492 btrfs_btree_balance_dirty(root, nr);
2496 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2497 struct btrfs_root *root,
2498 struct inode *dir, u64 objectid,
2499 const char *name, int name_len)
2501 struct btrfs_path *path;
2502 struct extent_buffer *leaf;
2503 struct btrfs_dir_item *di;
2504 struct btrfs_key key;
2508 path = btrfs_alloc_path();
2512 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2513 name, name_len, -1);
2514 BUG_ON(!di || IS_ERR(di));
2516 leaf = path->nodes[0];
2517 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2518 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2519 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2521 btrfs_release_path(root, path);
2523 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2524 objectid, root->root_key.objectid,
2525 dir->i_ino, &index, name, name_len);
2527 BUG_ON(ret != -ENOENT);
2528 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2530 BUG_ON(!di || IS_ERR(di));
2532 leaf = path->nodes[0];
2533 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2534 btrfs_release_path(root, path);
2538 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2539 index, name, name_len, -1);
2540 BUG_ON(!di || IS_ERR(di));
2542 leaf = path->nodes[0];
2543 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2544 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2545 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2547 btrfs_release_path(root, path);
2549 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2550 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2551 ret = btrfs_update_inode(trans, root, dir);
2553 dir->i_sb->s_dirt = 1;
2555 btrfs_free_path(path);
2559 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2561 struct inode *inode = dentry->d_inode;
2564 struct btrfs_root *root = BTRFS_I(dir)->root;
2565 struct btrfs_trans_handle *trans;
2566 unsigned long nr = 0;
2568 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2569 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2572 trans = btrfs_start_transaction(root, 1);
2573 btrfs_set_trans_block_group(trans, dir);
2575 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2576 err = btrfs_unlink_subvol(trans, root, dir,
2577 BTRFS_I(inode)->location.objectid,
2578 dentry->d_name.name,
2579 dentry->d_name.len);
2583 err = btrfs_orphan_add(trans, inode);
2587 /* now the directory is empty */
2588 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2589 dentry->d_name.name, dentry->d_name.len);
2591 btrfs_i_size_write(inode, 0);
2593 nr = trans->blocks_used;
2594 ret = btrfs_end_transaction_throttle(trans, root);
2595 btrfs_btree_balance_dirty(root, nr);
2604 * when truncating bytes in a file, it is possible to avoid reading
2605 * the leaves that contain only checksum items. This can be the
2606 * majority of the IO required to delete a large file, but it must
2607 * be done carefully.
2609 * The keys in the level just above the leaves are checked to make sure
2610 * the lowest key in a given leaf is a csum key, and starts at an offset
2611 * after the new size.
2613 * Then the key for the next leaf is checked to make sure it also has
2614 * a checksum item for the same file. If it does, we know our target leaf
2615 * contains only checksum items, and it can be safely freed without reading
2618 * This is just an optimization targeted at large files. It may do
2619 * nothing. It will return 0 unless things went badly.
2621 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2622 struct btrfs_root *root,
2623 struct btrfs_path *path,
2624 struct inode *inode, u64 new_size)
2626 struct btrfs_key key;
2629 struct btrfs_key found_key;
2630 struct btrfs_key other_key;
2631 struct btrfs_leaf_ref *ref;
2635 path->lowest_level = 1;
2636 key.objectid = inode->i_ino;
2637 key.type = BTRFS_CSUM_ITEM_KEY;
2638 key.offset = new_size;
2640 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2644 if (path->nodes[1] == NULL) {
2649 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2650 nritems = btrfs_header_nritems(path->nodes[1]);
2655 if (path->slots[1] >= nritems)
2658 /* did we find a key greater than anything we want to delete? */
2659 if (found_key.objectid > inode->i_ino ||
2660 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2663 /* we check the next key in the node to make sure the leave contains
2664 * only checksum items. This comparison doesn't work if our
2665 * leaf is the last one in the node
2667 if (path->slots[1] + 1 >= nritems) {
2669 /* search forward from the last key in the node, this
2670 * will bring us into the next node in the tree
2672 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2674 /* unlikely, but we inc below, so check to be safe */
2675 if (found_key.offset == (u64)-1)
2678 /* search_forward needs a path with locks held, do the
2679 * search again for the original key. It is possible
2680 * this will race with a balance and return a path that
2681 * we could modify, but this drop is just an optimization
2682 * and is allowed to miss some leaves.
2684 btrfs_release_path(root, path);
2687 /* setup a max key for search_forward */
2688 other_key.offset = (u64)-1;
2689 other_key.type = key.type;
2690 other_key.objectid = key.objectid;
2692 path->keep_locks = 1;
2693 ret = btrfs_search_forward(root, &found_key, &other_key,
2695 path->keep_locks = 0;
2696 if (ret || found_key.objectid != key.objectid ||
2697 found_key.type != key.type) {
2702 key.offset = found_key.offset;
2703 btrfs_release_path(root, path);
2708 /* we know there's one more slot after us in the tree,
2709 * read that key so we can verify it is also a checksum item
2711 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2713 if (found_key.objectid < inode->i_ino)
2716 if (found_key.type != key.type || found_key.offset < new_size)
2720 * if the key for the next leaf isn't a csum key from this objectid,
2721 * we can't be sure there aren't good items inside this leaf.
2724 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2727 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2728 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2730 * it is safe to delete this leaf, it contains only
2731 * csum items from this inode at an offset >= new_size
2733 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2736 if (root->ref_cows && leaf_gen < trans->transid) {
2737 ref = btrfs_alloc_leaf_ref(root, 0);
2739 ref->root_gen = root->root_key.offset;
2740 ref->bytenr = leaf_start;
2742 ref->generation = leaf_gen;
2745 btrfs_sort_leaf_ref(ref);
2747 ret = btrfs_add_leaf_ref(root, ref, 0);
2749 btrfs_free_leaf_ref(root, ref);
2755 btrfs_release_path(root, path);
2757 if (other_key.objectid == inode->i_ino &&
2758 other_key.type == key.type && other_key.offset > key.offset) {
2759 key.offset = other_key.offset;
2765 /* fixup any changes we've made to the path */
2766 path->lowest_level = 0;
2767 path->keep_locks = 0;
2768 btrfs_release_path(root, path);
2775 * this can truncate away extent items, csum items and directory items.
2776 * It starts at a high offset and removes keys until it can't find
2777 * any higher than new_size
2779 * csum items that cross the new i_size are truncated to the new size
2782 * min_type is the minimum key type to truncate down to. If set to 0, this
2783 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2785 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2786 struct btrfs_root *root,
2787 struct inode *inode,
2788 u64 new_size, u32 min_type)
2791 struct btrfs_path *path;
2792 struct btrfs_key key;
2793 struct btrfs_key found_key;
2794 u32 found_type = (u8)-1;
2795 struct extent_buffer *leaf;
2796 struct btrfs_file_extent_item *fi;
2797 u64 extent_start = 0;
2798 u64 extent_num_bytes = 0;
2799 u64 extent_offset = 0;
2803 int pending_del_nr = 0;
2804 int pending_del_slot = 0;
2805 int extent_type = -1;
2807 u64 mask = root->sectorsize - 1;
2810 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2811 path = btrfs_alloc_path();
2815 /* FIXME, add redo link to tree so we don't leak on crash */
2816 key.objectid = inode->i_ino;
2817 key.offset = (u64)-1;
2821 path->leave_spinning = 1;
2822 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2827 /* there are no items in the tree for us to truncate, we're
2830 if (path->slots[0] == 0) {
2839 leaf = path->nodes[0];
2840 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2841 found_type = btrfs_key_type(&found_key);
2844 if (found_key.objectid != inode->i_ino)
2847 if (found_type < min_type)
2850 item_end = found_key.offset;
2851 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2852 fi = btrfs_item_ptr(leaf, path->slots[0],
2853 struct btrfs_file_extent_item);
2854 extent_type = btrfs_file_extent_type(leaf, fi);
2855 encoding = btrfs_file_extent_compression(leaf, fi);
2856 encoding |= btrfs_file_extent_encryption(leaf, fi);
2857 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2859 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2861 btrfs_file_extent_num_bytes(leaf, fi);
2862 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2863 item_end += btrfs_file_extent_inline_len(leaf,
2868 if (item_end < new_size) {
2869 if (found_type == BTRFS_DIR_ITEM_KEY)
2870 found_type = BTRFS_INODE_ITEM_KEY;
2871 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2872 found_type = BTRFS_EXTENT_DATA_KEY;
2873 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2874 found_type = BTRFS_XATTR_ITEM_KEY;
2875 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2876 found_type = BTRFS_INODE_REF_KEY;
2877 else if (found_type)
2881 btrfs_set_key_type(&key, found_type);
2884 if (found_key.offset >= new_size)
2890 /* FIXME, shrink the extent if the ref count is only 1 */
2891 if (found_type != BTRFS_EXTENT_DATA_KEY)
2894 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2896 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2897 if (!del_item && !encoding) {
2898 u64 orig_num_bytes =
2899 btrfs_file_extent_num_bytes(leaf, fi);
2900 extent_num_bytes = new_size -
2901 found_key.offset + root->sectorsize - 1;
2902 extent_num_bytes = extent_num_bytes &
2903 ~((u64)root->sectorsize - 1);
2904 btrfs_set_file_extent_num_bytes(leaf, fi,
2906 num_dec = (orig_num_bytes -
2908 if (root->ref_cows && extent_start != 0)
2909 inode_sub_bytes(inode, num_dec);
2910 btrfs_mark_buffer_dirty(leaf);
2913 btrfs_file_extent_disk_num_bytes(leaf,
2915 extent_offset = found_key.offset -
2916 btrfs_file_extent_offset(leaf, fi);
2918 /* FIXME blocksize != 4096 */
2919 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2920 if (extent_start != 0) {
2923 inode_sub_bytes(inode, num_dec);
2926 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2928 * we can't truncate inline items that have had
2932 btrfs_file_extent_compression(leaf, fi) == 0 &&
2933 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2934 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2935 u32 size = new_size - found_key.offset;
2937 if (root->ref_cows) {
2938 inode_sub_bytes(inode, item_end + 1 -
2942 btrfs_file_extent_calc_inline_size(size);
2943 ret = btrfs_truncate_item(trans, root, path,
2946 } else if (root->ref_cows) {
2947 inode_sub_bytes(inode, item_end + 1 -
2953 if (!pending_del_nr) {
2954 /* no pending yet, add ourselves */
2955 pending_del_slot = path->slots[0];
2957 } else if (pending_del_nr &&
2958 path->slots[0] + 1 == pending_del_slot) {
2959 /* hop on the pending chunk */
2961 pending_del_slot = path->slots[0];
2968 if (found_extent && root->ref_cows) {
2969 btrfs_set_path_blocking(path);
2970 ret = btrfs_free_extent(trans, root, extent_start,
2971 extent_num_bytes, 0,
2972 btrfs_header_owner(leaf),
2973 inode->i_ino, extent_offset);
2977 if (path->slots[0] == 0) {
2980 btrfs_release_path(root, path);
2981 if (found_type == BTRFS_INODE_ITEM_KEY)
2987 if (pending_del_nr &&
2988 path->slots[0] + 1 != pending_del_slot) {
2989 struct btrfs_key debug;
2991 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2993 ret = btrfs_del_items(trans, root, path,
2998 btrfs_release_path(root, path);
2999 if (found_type == BTRFS_INODE_ITEM_KEY)
3006 if (pending_del_nr) {
3007 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3010 btrfs_free_path(path);
3015 * taken from block_truncate_page, but does cow as it zeros out
3016 * any bytes left in the last page in the file.
3018 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3020 struct inode *inode = mapping->host;
3021 struct btrfs_root *root = BTRFS_I(inode)->root;
3022 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3023 struct btrfs_ordered_extent *ordered;
3025 u32 blocksize = root->sectorsize;
3026 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3027 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3033 if ((offset & (blocksize - 1)) == 0)
3038 page = grab_cache_page(mapping, index);
3042 page_start = page_offset(page);
3043 page_end = page_start + PAGE_CACHE_SIZE - 1;
3045 if (!PageUptodate(page)) {
3046 ret = btrfs_readpage(NULL, page);
3048 if (page->mapping != mapping) {
3050 page_cache_release(page);
3053 if (!PageUptodate(page)) {
3058 wait_on_page_writeback(page);
3060 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3061 set_page_extent_mapped(page);
3063 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3065 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3067 page_cache_release(page);
3068 btrfs_start_ordered_extent(inode, ordered, 1);
3069 btrfs_put_ordered_extent(ordered);
3073 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3075 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3080 if (offset != PAGE_CACHE_SIZE) {
3082 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3083 flush_dcache_page(page);
3086 ClearPageChecked(page);
3087 set_page_dirty(page);
3088 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3092 page_cache_release(page);
3097 int btrfs_cont_expand(struct inode *inode, loff_t size)
3099 struct btrfs_trans_handle *trans;
3100 struct btrfs_root *root = BTRFS_I(inode)->root;
3101 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3102 struct extent_map *em;
3103 u64 mask = root->sectorsize - 1;
3104 u64 hole_start = (inode->i_size + mask) & ~mask;
3105 u64 block_end = (size + mask) & ~mask;
3111 if (size <= hole_start)
3114 btrfs_truncate_page(inode->i_mapping, inode->i_size);
3117 struct btrfs_ordered_extent *ordered;
3118 btrfs_wait_ordered_range(inode, hole_start,
3119 block_end - hole_start);
3120 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3121 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3124 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3125 btrfs_put_ordered_extent(ordered);
3128 trans = btrfs_start_transaction(root, 1);
3129 btrfs_set_trans_block_group(trans, inode);
3131 cur_offset = hole_start;
3133 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3134 block_end - cur_offset, 0);
3135 BUG_ON(IS_ERR(em) || !em);
3136 last_byte = min(extent_map_end(em), block_end);
3137 last_byte = (last_byte + mask) & ~mask;
3138 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
3140 hole_size = last_byte - cur_offset;
3141 err = btrfs_drop_extents(trans, root, inode,
3143 cur_offset + hole_size,
3145 cur_offset, &hint_byte, 1);
3149 err = btrfs_reserve_metadata_space(root, 1);
3153 err = btrfs_insert_file_extent(trans, root,
3154 inode->i_ino, cur_offset, 0,
3155 0, hole_size, 0, hole_size,
3157 btrfs_drop_extent_cache(inode, hole_start,
3159 btrfs_unreserve_metadata_space(root, 1);
3161 free_extent_map(em);
3162 cur_offset = last_byte;
3163 if (err || cur_offset >= block_end)
3167 btrfs_end_transaction(trans, root);
3168 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3172 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3174 struct inode *inode = dentry->d_inode;
3177 err = inode_change_ok(inode, attr);
3181 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3182 if (attr->ia_size > inode->i_size) {
3183 err = btrfs_cont_expand(inode, attr->ia_size);
3186 } else if (inode->i_size > 0 &&
3187 attr->ia_size == 0) {
3189 /* we're truncating a file that used to have good
3190 * data down to zero. Make sure it gets into
3191 * the ordered flush list so that any new writes
3192 * get down to disk quickly.
3194 BTRFS_I(inode)->ordered_data_close = 1;
3198 err = inode_setattr(inode, attr);
3200 if (!err && ((attr->ia_valid & ATTR_MODE)))
3201 err = btrfs_acl_chmod(inode);
3205 void btrfs_delete_inode(struct inode *inode)
3207 struct btrfs_trans_handle *trans;
3208 struct btrfs_root *root = BTRFS_I(inode)->root;
3212 truncate_inode_pages(&inode->i_data, 0);
3213 if (is_bad_inode(inode)) {
3214 btrfs_orphan_del(NULL, inode);
3217 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3219 if (inode->i_nlink > 0) {
3220 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3224 btrfs_i_size_write(inode, 0);
3225 trans = btrfs_join_transaction(root, 1);
3227 btrfs_set_trans_block_group(trans, inode);
3228 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3230 btrfs_orphan_del(NULL, inode);
3231 goto no_delete_lock;
3234 btrfs_orphan_del(trans, inode);
3236 nr = trans->blocks_used;
3239 btrfs_end_transaction(trans, root);
3240 btrfs_btree_balance_dirty(root, nr);
3244 nr = trans->blocks_used;
3245 btrfs_end_transaction(trans, root);
3246 btrfs_btree_balance_dirty(root, nr);
3252 * this returns the key found in the dir entry in the location pointer.
3253 * If no dir entries were found, location->objectid is 0.
3255 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3256 struct btrfs_key *location)
3258 const char *name = dentry->d_name.name;
3259 int namelen = dentry->d_name.len;
3260 struct btrfs_dir_item *di;
3261 struct btrfs_path *path;
3262 struct btrfs_root *root = BTRFS_I(dir)->root;
3265 path = btrfs_alloc_path();
3268 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3273 if (!di || IS_ERR(di))
3276 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3278 btrfs_free_path(path);
3281 location->objectid = 0;
3286 * when we hit a tree root in a directory, the btrfs part of the inode
3287 * needs to be changed to reflect the root directory of the tree root. This
3288 * is kind of like crossing a mount point.
3290 static int fixup_tree_root_location(struct btrfs_root *root,
3292 struct dentry *dentry,
3293 struct btrfs_key *location,
3294 struct btrfs_root **sub_root)
3296 struct btrfs_path *path;
3297 struct btrfs_root *new_root;
3298 struct btrfs_root_ref *ref;
3299 struct extent_buffer *leaf;
3303 path = btrfs_alloc_path();
3310 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3311 BTRFS_I(dir)->root->root_key.objectid,
3312 location->objectid);
3319 leaf = path->nodes[0];
3320 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3321 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3322 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3325 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3326 (unsigned long)(ref + 1),
3327 dentry->d_name.len);
3331 btrfs_release_path(root->fs_info->tree_root, path);
3333 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3334 if (IS_ERR(new_root)) {
3335 err = PTR_ERR(new_root);
3339 if (btrfs_root_refs(&new_root->root_item) == 0) {
3344 *sub_root = new_root;
3345 location->objectid = btrfs_root_dirid(&new_root->root_item);
3346 location->type = BTRFS_INODE_ITEM_KEY;
3347 location->offset = 0;
3350 btrfs_free_path(path);
3354 static void inode_tree_add(struct inode *inode)
3356 struct btrfs_root *root = BTRFS_I(inode)->root;
3357 struct btrfs_inode *entry;
3359 struct rb_node *parent;
3361 p = &root->inode_tree.rb_node;
3364 if (hlist_unhashed(&inode->i_hash))
3367 spin_lock(&root->inode_lock);
3370 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3372 if (inode->i_ino < entry->vfs_inode.i_ino)
3373 p = &parent->rb_left;
3374 else if (inode->i_ino > entry->vfs_inode.i_ino)
3375 p = &parent->rb_right;
3377 WARN_ON(!(entry->vfs_inode.i_state &
3378 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3379 rb_erase(parent, &root->inode_tree);
3380 RB_CLEAR_NODE(parent);
3381 spin_unlock(&root->inode_lock);
3385 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3386 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3387 spin_unlock(&root->inode_lock);
3390 static void inode_tree_del(struct inode *inode)
3392 struct btrfs_root *root = BTRFS_I(inode)->root;
3395 spin_lock(&root->inode_lock);
3396 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3397 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3398 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3399 empty = RB_EMPTY_ROOT(&root->inode_tree);
3401 spin_unlock(&root->inode_lock);
3403 if (empty && btrfs_root_refs(&root->root_item) == 0) {
3404 synchronize_srcu(&root->fs_info->subvol_srcu);
3405 spin_lock(&root->inode_lock);
3406 empty = RB_EMPTY_ROOT(&root->inode_tree);
3407 spin_unlock(&root->inode_lock);
3409 btrfs_add_dead_root(root);
3413 int btrfs_invalidate_inodes(struct btrfs_root *root)
3415 struct rb_node *node;
3416 struct rb_node *prev;
3417 struct btrfs_inode *entry;
3418 struct inode *inode;
3421 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3423 spin_lock(&root->inode_lock);
3425 node = root->inode_tree.rb_node;
3429 entry = rb_entry(node, struct btrfs_inode, rb_node);
3431 if (objectid < entry->vfs_inode.i_ino)
3432 node = node->rb_left;
3433 else if (objectid > entry->vfs_inode.i_ino)
3434 node = node->rb_right;
3440 entry = rb_entry(prev, struct btrfs_inode, rb_node);
3441 if (objectid <= entry->vfs_inode.i_ino) {
3445 prev = rb_next(prev);
3449 entry = rb_entry(node, struct btrfs_inode, rb_node);
3450 objectid = entry->vfs_inode.i_ino + 1;
3451 inode = igrab(&entry->vfs_inode);
3453 spin_unlock(&root->inode_lock);
3454 if (atomic_read(&inode->i_count) > 1)
3455 d_prune_aliases(inode);
3457 * btrfs_drop_inode will remove it from
3458 * the inode cache when its usage count
3463 spin_lock(&root->inode_lock);
3467 if (cond_resched_lock(&root->inode_lock))
3470 node = rb_next(node);
3472 spin_unlock(&root->inode_lock);
3476 static noinline void init_btrfs_i(struct inode *inode)
3478 struct btrfs_inode *bi = BTRFS_I(inode);
3483 bi->last_sub_trans = 0;
3484 bi->logged_trans = 0;
3485 bi->delalloc_bytes = 0;
3486 bi->reserved_bytes = 0;
3487 bi->disk_i_size = 0;
3489 bi->index_cnt = (u64)-1;
3490 bi->last_unlink_trans = 0;
3491 bi->ordered_data_close = 0;
3492 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3493 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3494 inode->i_mapping, GFP_NOFS);
3495 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3496 inode->i_mapping, GFP_NOFS);
3497 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3498 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3499 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3500 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3501 mutex_init(&BTRFS_I(inode)->extent_mutex);
3502 mutex_init(&BTRFS_I(inode)->log_mutex);
3505 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3507 struct btrfs_iget_args *args = p;
3508 inode->i_ino = args->ino;
3509 init_btrfs_i(inode);
3510 BTRFS_I(inode)->root = args->root;
3511 btrfs_set_inode_space_info(args->root, inode);
3515 static int btrfs_find_actor(struct inode *inode, void *opaque)
3517 struct btrfs_iget_args *args = opaque;
3518 return args->ino == inode->i_ino &&
3519 args->root == BTRFS_I(inode)->root;
3522 static struct inode *btrfs_iget_locked(struct super_block *s,
3524 struct btrfs_root *root)
3526 struct inode *inode;
3527 struct btrfs_iget_args args;
3528 args.ino = objectid;
3531 inode = iget5_locked(s, objectid, btrfs_find_actor,
3532 btrfs_init_locked_inode,
3537 /* Get an inode object given its location and corresponding root.
3538 * Returns in *is_new if the inode was read from disk
3540 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3541 struct btrfs_root *root)
3543 struct inode *inode;
3545 inode = btrfs_iget_locked(s, location->objectid, root);
3547 return ERR_PTR(-ENOMEM);
3549 if (inode->i_state & I_NEW) {
3550 BTRFS_I(inode)->root = root;
3551 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3552 btrfs_read_locked_inode(inode);
3554 inode_tree_add(inode);
3555 unlock_new_inode(inode);
3561 static struct inode *new_simple_dir(struct super_block *s,
3562 struct btrfs_key *key,
3563 struct btrfs_root *root)
3565 struct inode *inode = new_inode(s);
3568 return ERR_PTR(-ENOMEM);
3570 init_btrfs_i(inode);
3572 BTRFS_I(inode)->root = root;
3573 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3574 BTRFS_I(inode)->dummy_inode = 1;
3576 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3577 inode->i_op = &simple_dir_inode_operations;
3578 inode->i_fop = &simple_dir_operations;
3579 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3580 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3585 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3587 struct inode *inode;
3588 struct btrfs_root *root = BTRFS_I(dir)->root;
3589 struct btrfs_root *sub_root = root;
3590 struct btrfs_key location;
3594 dentry->d_op = &btrfs_dentry_operations;
3596 if (dentry->d_name.len > BTRFS_NAME_LEN)
3597 return ERR_PTR(-ENAMETOOLONG);
3599 ret = btrfs_inode_by_name(dir, dentry, &location);
3602 return ERR_PTR(ret);
3604 if (location.objectid == 0)
3607 if (location.type == BTRFS_INODE_ITEM_KEY) {
3608 inode = btrfs_iget(dir->i_sb, &location, root);
3612 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3614 index = srcu_read_lock(&root->fs_info->subvol_srcu);
3615 ret = fixup_tree_root_location(root, dir, dentry,
3616 &location, &sub_root);
3619 inode = ERR_PTR(ret);
3621 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3623 inode = btrfs_iget(dir->i_sb, &location, sub_root);
3625 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3630 static int btrfs_dentry_delete(struct dentry *dentry)
3632 struct btrfs_root *root;
3634 if (!dentry->d_inode && !IS_ROOT(dentry))
3635 dentry = dentry->d_parent;
3637 if (dentry->d_inode) {
3638 root = BTRFS_I(dentry->d_inode)->root;
3639 if (btrfs_root_refs(&root->root_item) == 0)
3645 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3646 struct nameidata *nd)
3648 struct inode *inode;
3650 inode = btrfs_lookup_dentry(dir, dentry);
3652 return ERR_CAST(inode);
3654 return d_splice_alias(inode, dentry);
3657 static unsigned char btrfs_filetype_table[] = {
3658 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3661 static int btrfs_real_readdir(struct file *filp, void *dirent,
3664 struct inode *inode = filp->f_dentry->d_inode;
3665 struct btrfs_root *root = BTRFS_I(inode)->root;
3666 struct btrfs_item *item;
3667 struct btrfs_dir_item *di;
3668 struct btrfs_key key;
3669 struct btrfs_key found_key;
3670 struct btrfs_path *path;
3673 struct extent_buffer *leaf;
3676 unsigned char d_type;
3681 int key_type = BTRFS_DIR_INDEX_KEY;
3686 /* FIXME, use a real flag for deciding about the key type */
3687 if (root->fs_info->tree_root == root)
3688 key_type = BTRFS_DIR_ITEM_KEY;
3690 /* special case for "." */
3691 if (filp->f_pos == 0) {
3692 over = filldir(dirent, ".", 1,
3699 /* special case for .., just use the back ref */
3700 if (filp->f_pos == 1) {
3701 u64 pino = parent_ino(filp->f_path.dentry);
3702 over = filldir(dirent, "..", 2,
3708 path = btrfs_alloc_path();
3711 btrfs_set_key_type(&key, key_type);
3712 key.offset = filp->f_pos;
3713 key.objectid = inode->i_ino;
3715 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3721 leaf = path->nodes[0];
3722 nritems = btrfs_header_nritems(leaf);
3723 slot = path->slots[0];
3724 if (advance || slot >= nritems) {
3725 if (slot >= nritems - 1) {
3726 ret = btrfs_next_leaf(root, path);
3729 leaf = path->nodes[0];
3730 nritems = btrfs_header_nritems(leaf);
3731 slot = path->slots[0];
3739 item = btrfs_item_nr(leaf, slot);
3740 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3742 if (found_key.objectid != key.objectid)
3744 if (btrfs_key_type(&found_key) != key_type)
3746 if (found_key.offset < filp->f_pos)
3749 filp->f_pos = found_key.offset;
3751 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3753 di_total = btrfs_item_size(leaf, item);
3755 while (di_cur < di_total) {
3756 struct btrfs_key location;
3758 name_len = btrfs_dir_name_len(leaf, di);
3759 if (name_len <= sizeof(tmp_name)) {
3760 name_ptr = tmp_name;
3762 name_ptr = kmalloc(name_len, GFP_NOFS);
3768 read_extent_buffer(leaf, name_ptr,
3769 (unsigned long)(di + 1), name_len);
3771 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3772 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3774 /* is this a reference to our own snapshot? If so
3777 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3778 location.objectid == root->root_key.objectid) {
3782 over = filldir(dirent, name_ptr, name_len,
3783 found_key.offset, location.objectid,
3787 if (name_ptr != tmp_name)
3792 di_len = btrfs_dir_name_len(leaf, di) +
3793 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3795 di = (struct btrfs_dir_item *)((char *)di + di_len);
3799 /* Reached end of directory/root. Bump pos past the last item. */
3800 if (key_type == BTRFS_DIR_INDEX_KEY)
3801 filp->f_pos = INT_LIMIT(off_t);
3807 btrfs_free_path(path);
3811 int btrfs_write_inode(struct inode *inode, int wait)
3813 struct btrfs_root *root = BTRFS_I(inode)->root;
3814 struct btrfs_trans_handle *trans;
3817 if (root->fs_info->btree_inode == inode)
3821 trans = btrfs_join_transaction(root, 1);
3822 btrfs_set_trans_block_group(trans, inode);
3823 ret = btrfs_commit_transaction(trans, root);
3829 * This is somewhat expensive, updating the tree every time the
3830 * inode changes. But, it is most likely to find the inode in cache.
3831 * FIXME, needs more benchmarking...there are no reasons other than performance
3832 * to keep or drop this code.
3834 void btrfs_dirty_inode(struct inode *inode)
3836 struct btrfs_root *root = BTRFS_I(inode)->root;
3837 struct btrfs_trans_handle *trans;
3839 trans = btrfs_join_transaction(root, 1);
3840 btrfs_set_trans_block_group(trans, inode);
3841 btrfs_update_inode(trans, root, inode);
3842 btrfs_end_transaction(trans, root);
3846 * find the highest existing sequence number in a directory
3847 * and then set the in-memory index_cnt variable to reflect
3848 * free sequence numbers
3850 static int btrfs_set_inode_index_count(struct inode *inode)
3852 struct btrfs_root *root = BTRFS_I(inode)->root;
3853 struct btrfs_key key, found_key;
3854 struct btrfs_path *path;
3855 struct extent_buffer *leaf;
3858 key.objectid = inode->i_ino;
3859 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3860 key.offset = (u64)-1;
3862 path = btrfs_alloc_path();
3866 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3869 /* FIXME: we should be able to handle this */
3875 * MAGIC NUMBER EXPLANATION:
3876 * since we search a directory based on f_pos we have to start at 2
3877 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3878 * else has to start at 2
3880 if (path->slots[0] == 0) {
3881 BTRFS_I(inode)->index_cnt = 2;
3887 leaf = path->nodes[0];
3888 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3890 if (found_key.objectid != inode->i_ino ||
3891 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3892 BTRFS_I(inode)->index_cnt = 2;
3896 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3898 btrfs_free_path(path);
3903 * helper to find a free sequence number in a given directory. This current
3904 * code is very simple, later versions will do smarter things in the btree
3906 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3910 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3911 ret = btrfs_set_inode_index_count(dir);
3916 *index = BTRFS_I(dir)->index_cnt;
3917 BTRFS_I(dir)->index_cnt++;
3922 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3923 struct btrfs_root *root,
3925 const char *name, int name_len,
3926 u64 ref_objectid, u64 objectid,
3927 u64 alloc_hint, int mode, u64 *index)
3929 struct inode *inode;
3930 struct btrfs_inode_item *inode_item;
3931 struct btrfs_key *location;
3932 struct btrfs_path *path;
3933 struct btrfs_inode_ref *ref;
3934 struct btrfs_key key[2];
3940 path = btrfs_alloc_path();
3943 inode = new_inode(root->fs_info->sb);
3945 return ERR_PTR(-ENOMEM);
3948 ret = btrfs_set_inode_index(dir, index);
3951 return ERR_PTR(ret);
3955 * index_cnt is ignored for everything but a dir,
3956 * btrfs_get_inode_index_count has an explanation for the magic
3959 init_btrfs_i(inode);
3960 BTRFS_I(inode)->index_cnt = 2;
3961 BTRFS_I(inode)->root = root;
3962 BTRFS_I(inode)->generation = trans->transid;
3963 btrfs_set_inode_space_info(root, inode);
3969 BTRFS_I(inode)->block_group =
3970 btrfs_find_block_group(root, 0, alloc_hint, owner);
3972 key[0].objectid = objectid;
3973 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3976 key[1].objectid = objectid;
3977 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3978 key[1].offset = ref_objectid;
3980 sizes[0] = sizeof(struct btrfs_inode_item);
3981 sizes[1] = name_len + sizeof(*ref);
3983 path->leave_spinning = 1;
3984 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3988 inode->i_uid = current_fsuid();
3990 if (dir && (dir->i_mode & S_ISGID)) {
3991 inode->i_gid = dir->i_gid;
3995 inode->i_gid = current_fsgid();
3997 inode->i_mode = mode;
3998 inode->i_ino = objectid;
3999 inode_set_bytes(inode, 0);
4000 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4001 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4002 struct btrfs_inode_item);
4003 fill_inode_item(trans, path->nodes[0], inode_item, inode);
4005 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4006 struct btrfs_inode_ref);
4007 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4008 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4009 ptr = (unsigned long)(ref + 1);
4010 write_extent_buffer(path->nodes[0], name, ptr, name_len);
4012 btrfs_mark_buffer_dirty(path->nodes[0]);
4013 btrfs_free_path(path);
4015 location = &BTRFS_I(inode)->location;
4016 location->objectid = objectid;
4017 location->offset = 0;
4018 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4020 btrfs_inherit_iflags(inode, dir);
4022 if ((mode & S_IFREG)) {
4023 if (btrfs_test_opt(root, NODATASUM))
4024 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4025 if (btrfs_test_opt(root, NODATACOW))
4026 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4029 insert_inode_hash(inode);
4030 inode_tree_add(inode);
4034 BTRFS_I(dir)->index_cnt--;
4035 btrfs_free_path(path);
4037 return ERR_PTR(ret);
4040 static inline u8 btrfs_inode_type(struct inode *inode)
4042 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4046 * utility function to add 'inode' into 'parent_inode' with
4047 * a give name and a given sequence number.
4048 * if 'add_backref' is true, also insert a backref from the
4049 * inode to the parent directory.
4051 int btrfs_add_link(struct btrfs_trans_handle *trans,
4052 struct inode *parent_inode, struct inode *inode,
4053 const char *name, int name_len, int add_backref, u64 index)
4056 struct btrfs_key key;
4057 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4059 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4060 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4062 key.objectid = inode->i_ino;
4063 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4067 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4068 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4069 key.objectid, root->root_key.objectid,
4070 parent_inode->i_ino,
4071 index, name, name_len);
4072 } else if (add_backref) {
4073 ret = btrfs_insert_inode_ref(trans, root,
4074 name, name_len, inode->i_ino,
4075 parent_inode->i_ino, index);
4079 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4080 parent_inode->i_ino, &key,
4081 btrfs_inode_type(inode), index);
4084 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4086 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4087 ret = btrfs_update_inode(trans, root, parent_inode);
4092 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4093 struct dentry *dentry, struct inode *inode,
4094 int backref, u64 index)
4096 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4097 inode, dentry->d_name.name,
4098 dentry->d_name.len, backref, index);
4100 d_instantiate(dentry, inode);
4108 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4109 int mode, dev_t rdev)
4111 struct btrfs_trans_handle *trans;
4112 struct btrfs_root *root = BTRFS_I(dir)->root;
4113 struct inode *inode = NULL;
4117 unsigned long nr = 0;
4120 if (!new_valid_dev(rdev))
4124 * 2 for inode item and ref
4126 * 1 for xattr if selinux is on
4128 err = btrfs_reserve_metadata_space(root, 5);
4132 trans = btrfs_start_transaction(root, 1);
4135 btrfs_set_trans_block_group(trans, dir);
4137 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4143 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4145 dentry->d_parent->d_inode->i_ino, objectid,
4146 BTRFS_I(dir)->block_group, mode, &index);
4147 err = PTR_ERR(inode);
4151 err = btrfs_init_inode_security(inode, dir);
4157 btrfs_set_trans_block_group(trans, inode);
4158 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4162 inode->i_op = &btrfs_special_inode_operations;
4163 init_special_inode(inode, inode->i_mode, rdev);
4164 btrfs_update_inode(trans, root, inode);
4166 btrfs_update_inode_block_group(trans, inode);
4167 btrfs_update_inode_block_group(trans, dir);
4169 nr = trans->blocks_used;
4170 btrfs_end_transaction_throttle(trans, root);
4172 btrfs_unreserve_metadata_space(root, 5);
4174 inode_dec_link_count(inode);
4177 btrfs_btree_balance_dirty(root, nr);
4181 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4182 int mode, struct nameidata *nd)
4184 struct btrfs_trans_handle *trans;
4185 struct btrfs_root *root = BTRFS_I(dir)->root;
4186 struct inode *inode = NULL;
4189 unsigned long nr = 0;
4194 * 2 for inode item and ref
4196 * 1 for xattr if selinux is on
4198 err = btrfs_reserve_metadata_space(root, 5);
4202 trans = btrfs_start_transaction(root, 1);
4205 btrfs_set_trans_block_group(trans, dir);
4207 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4213 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4215 dentry->d_parent->d_inode->i_ino,
4216 objectid, BTRFS_I(dir)->block_group, mode,
4218 err = PTR_ERR(inode);
4222 err = btrfs_init_inode_security(inode, dir);
4228 btrfs_set_trans_block_group(trans, inode);
4229 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4233 inode->i_mapping->a_ops = &btrfs_aops;
4234 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4235 inode->i_fop = &btrfs_file_operations;
4236 inode->i_op = &btrfs_file_inode_operations;
4237 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4239 btrfs_update_inode_block_group(trans, inode);
4240 btrfs_update_inode_block_group(trans, dir);
4242 nr = trans->blocks_used;
4243 btrfs_end_transaction_throttle(trans, root);
4245 btrfs_unreserve_metadata_space(root, 5);
4247 inode_dec_link_count(inode);
4250 btrfs_btree_balance_dirty(root, nr);
4254 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4255 struct dentry *dentry)
4257 struct btrfs_trans_handle *trans;
4258 struct btrfs_root *root = BTRFS_I(dir)->root;
4259 struct inode *inode = old_dentry->d_inode;
4261 unsigned long nr = 0;
4265 if (inode->i_nlink == 0)
4269 * 1 item for inode ref
4270 * 2 items for dir items
4272 err = btrfs_reserve_metadata_space(root, 3);
4276 btrfs_inc_nlink(inode);
4278 err = btrfs_set_inode_index(dir, &index);
4282 trans = btrfs_start_transaction(root, 1);
4284 btrfs_set_trans_block_group(trans, dir);
4285 atomic_inc(&inode->i_count);
4287 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4292 btrfs_update_inode_block_group(trans, dir);
4293 err = btrfs_update_inode(trans, root, inode);
4295 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4298 nr = trans->blocks_used;
4299 btrfs_end_transaction_throttle(trans, root);
4301 btrfs_unreserve_metadata_space(root, 3);
4303 inode_dec_link_count(inode);
4306 btrfs_btree_balance_dirty(root, nr);
4310 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4312 struct inode *inode = NULL;
4313 struct btrfs_trans_handle *trans;
4314 struct btrfs_root *root = BTRFS_I(dir)->root;
4316 int drop_on_err = 0;
4319 unsigned long nr = 1;
4322 * 2 items for inode and ref
4323 * 2 items for dir items
4324 * 1 for xattr if selinux is on
4326 err = btrfs_reserve_metadata_space(root, 5);
4330 trans = btrfs_start_transaction(root, 1);
4335 btrfs_set_trans_block_group(trans, dir);
4337 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4343 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4345 dentry->d_parent->d_inode->i_ino, objectid,
4346 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4348 if (IS_ERR(inode)) {
4349 err = PTR_ERR(inode);
4355 err = btrfs_init_inode_security(inode, dir);
4359 inode->i_op = &btrfs_dir_inode_operations;
4360 inode->i_fop = &btrfs_dir_file_operations;
4361 btrfs_set_trans_block_group(trans, inode);
4363 btrfs_i_size_write(inode, 0);
4364 err = btrfs_update_inode(trans, root, inode);
4368 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4369 inode, dentry->d_name.name,
4370 dentry->d_name.len, 0, index);
4374 d_instantiate(dentry, inode);
4376 btrfs_update_inode_block_group(trans, inode);
4377 btrfs_update_inode_block_group(trans, dir);
4380 nr = trans->blocks_used;
4381 btrfs_end_transaction_throttle(trans, root);
4384 btrfs_unreserve_metadata_space(root, 5);
4387 btrfs_btree_balance_dirty(root, nr);
4391 /* helper for btfs_get_extent. Given an existing extent in the tree,
4392 * and an extent that you want to insert, deal with overlap and insert
4393 * the new extent into the tree.
4395 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4396 struct extent_map *existing,
4397 struct extent_map *em,
4398 u64 map_start, u64 map_len)
4402 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4403 start_diff = map_start - em->start;
4404 em->start = map_start;
4406 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4407 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4408 em->block_start += start_diff;
4409 em->block_len -= start_diff;
4411 return add_extent_mapping(em_tree, em);
4414 static noinline int uncompress_inline(struct btrfs_path *path,
4415 struct inode *inode, struct page *page,
4416 size_t pg_offset, u64 extent_offset,
4417 struct btrfs_file_extent_item *item)
4420 struct extent_buffer *leaf = path->nodes[0];
4423 unsigned long inline_size;
4426 WARN_ON(pg_offset != 0);
4427 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4428 inline_size = btrfs_file_extent_inline_item_len(leaf,
4429 btrfs_item_nr(leaf, path->slots[0]));
4430 tmp = kmalloc(inline_size, GFP_NOFS);
4431 ptr = btrfs_file_extent_inline_start(item);
4433 read_extent_buffer(leaf, tmp, ptr, inline_size);
4435 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4436 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4437 inline_size, max_size);
4439 char *kaddr = kmap_atomic(page, KM_USER0);
4440 unsigned long copy_size = min_t(u64,
4441 PAGE_CACHE_SIZE - pg_offset,
4442 max_size - extent_offset);
4443 memset(kaddr + pg_offset, 0, copy_size);
4444 kunmap_atomic(kaddr, KM_USER0);
4451 * a bit scary, this does extent mapping from logical file offset to the disk.
4452 * the ugly parts come from merging extents from the disk with the in-ram
4453 * representation. This gets more complex because of the data=ordered code,
4454 * where the in-ram extents might be locked pending data=ordered completion.
4456 * This also copies inline extents directly into the page.
4459 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4460 size_t pg_offset, u64 start, u64 len,
4466 u64 extent_start = 0;
4468 u64 objectid = inode->i_ino;
4470 struct btrfs_path *path = NULL;
4471 struct btrfs_root *root = BTRFS_I(inode)->root;
4472 struct btrfs_file_extent_item *item;
4473 struct extent_buffer *leaf;
4474 struct btrfs_key found_key;
4475 struct extent_map *em = NULL;
4476 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4477 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4478 struct btrfs_trans_handle *trans = NULL;
4482 read_lock(&em_tree->lock);
4483 em = lookup_extent_mapping(em_tree, start, len);
4485 em->bdev = root->fs_info->fs_devices->latest_bdev;
4486 read_unlock(&em_tree->lock);
4489 if (em->start > start || em->start + em->len <= start)
4490 free_extent_map(em);
4491 else if (em->block_start == EXTENT_MAP_INLINE && page)
4492 free_extent_map(em);
4496 em = alloc_extent_map(GFP_NOFS);
4501 em->bdev = root->fs_info->fs_devices->latest_bdev;
4502 em->start = EXTENT_MAP_HOLE;
4503 em->orig_start = EXTENT_MAP_HOLE;
4505 em->block_len = (u64)-1;
4508 path = btrfs_alloc_path();
4512 ret = btrfs_lookup_file_extent(trans, root, path,
4513 objectid, start, trans != NULL);
4520 if (path->slots[0] == 0)
4525 leaf = path->nodes[0];
4526 item = btrfs_item_ptr(leaf, path->slots[0],
4527 struct btrfs_file_extent_item);
4528 /* are we inside the extent that was found? */
4529 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4530 found_type = btrfs_key_type(&found_key);
4531 if (found_key.objectid != objectid ||
4532 found_type != BTRFS_EXTENT_DATA_KEY) {
4536 found_type = btrfs_file_extent_type(leaf, item);
4537 extent_start = found_key.offset;
4538 compressed = btrfs_file_extent_compression(leaf, item);
4539 if (found_type == BTRFS_FILE_EXTENT_REG ||
4540 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4541 extent_end = extent_start +
4542 btrfs_file_extent_num_bytes(leaf, item);
4543 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4545 size = btrfs_file_extent_inline_len(leaf, item);
4546 extent_end = (extent_start + size + root->sectorsize - 1) &
4547 ~((u64)root->sectorsize - 1);
4550 if (start >= extent_end) {
4552 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4553 ret = btrfs_next_leaf(root, path);
4560 leaf = path->nodes[0];
4562 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4563 if (found_key.objectid != objectid ||
4564 found_key.type != BTRFS_EXTENT_DATA_KEY)
4566 if (start + len <= found_key.offset)
4569 em->len = found_key.offset - start;
4573 if (found_type == BTRFS_FILE_EXTENT_REG ||
4574 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4575 em->start = extent_start;
4576 em->len = extent_end - extent_start;
4577 em->orig_start = extent_start -
4578 btrfs_file_extent_offset(leaf, item);
4579 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4581 em->block_start = EXTENT_MAP_HOLE;
4585 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4586 em->block_start = bytenr;
4587 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4590 bytenr += btrfs_file_extent_offset(leaf, item);
4591 em->block_start = bytenr;
4592 em->block_len = em->len;
4593 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4594 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4597 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4601 size_t extent_offset;
4604 em->block_start = EXTENT_MAP_INLINE;
4605 if (!page || create) {
4606 em->start = extent_start;
4607 em->len = extent_end - extent_start;
4611 size = btrfs_file_extent_inline_len(leaf, item);
4612 extent_offset = page_offset(page) + pg_offset - extent_start;
4613 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4614 size - extent_offset);
4615 em->start = extent_start + extent_offset;
4616 em->len = (copy_size + root->sectorsize - 1) &
4617 ~((u64)root->sectorsize - 1);
4618 em->orig_start = EXTENT_MAP_INLINE;
4620 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4621 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4622 if (create == 0 && !PageUptodate(page)) {
4623 if (btrfs_file_extent_compression(leaf, item) ==
4624 BTRFS_COMPRESS_ZLIB) {
4625 ret = uncompress_inline(path, inode, page,
4627 extent_offset, item);
4631 read_extent_buffer(leaf, map + pg_offset, ptr,
4633 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4634 memset(map + pg_offset + copy_size, 0,
4635 PAGE_CACHE_SIZE - pg_offset -
4640 flush_dcache_page(page);
4641 } else if (create && PageUptodate(page)) {
4644 free_extent_map(em);
4646 btrfs_release_path(root, path);
4647 trans = btrfs_join_transaction(root, 1);
4651 write_extent_buffer(leaf, map + pg_offset, ptr,
4654 btrfs_mark_buffer_dirty(leaf);
4656 set_extent_uptodate(io_tree, em->start,
4657 extent_map_end(em) - 1, GFP_NOFS);
4660 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4667 em->block_start = EXTENT_MAP_HOLE;
4668 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4670 btrfs_release_path(root, path);
4671 if (em->start > start || extent_map_end(em) <= start) {
4672 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4673 "[%llu %llu]\n", (unsigned long long)em->start,
4674 (unsigned long long)em->len,
4675 (unsigned long long)start,
4676 (unsigned long long)len);
4682 write_lock(&em_tree->lock);
4683 ret = add_extent_mapping(em_tree, em);
4684 /* it is possible that someone inserted the extent into the tree
4685 * while we had the lock dropped. It is also possible that
4686 * an overlapping map exists in the tree
4688 if (ret == -EEXIST) {
4689 struct extent_map *existing;
4693 existing = lookup_extent_mapping(em_tree, start, len);
4694 if (existing && (existing->start > start ||
4695 existing->start + existing->len <= start)) {
4696 free_extent_map(existing);
4700 existing = lookup_extent_mapping(em_tree, em->start,
4703 err = merge_extent_mapping(em_tree, existing,
4706 free_extent_map(existing);
4708 free_extent_map(em);
4713 free_extent_map(em);
4717 free_extent_map(em);
4722 write_unlock(&em_tree->lock);
4725 btrfs_free_path(path);
4727 ret = btrfs_end_transaction(trans, root);
4732 free_extent_map(em);
4733 return ERR_PTR(err);
4738 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4739 const struct iovec *iov, loff_t offset,
4740 unsigned long nr_segs)
4745 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4746 __u64 start, __u64 len)
4748 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4751 int btrfs_readpage(struct file *file, struct page *page)
4753 struct extent_io_tree *tree;
4754 tree = &BTRFS_I(page->mapping->host)->io_tree;
4755 return extent_read_full_page(tree, page, btrfs_get_extent);
4758 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4760 struct extent_io_tree *tree;
4763 if (current->flags & PF_MEMALLOC) {
4764 redirty_page_for_writepage(wbc, page);
4768 tree = &BTRFS_I(page->mapping->host)->io_tree;
4769 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4772 int btrfs_writepages(struct address_space *mapping,
4773 struct writeback_control *wbc)
4775 struct extent_io_tree *tree;
4777 tree = &BTRFS_I(mapping->host)->io_tree;
4778 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4782 btrfs_readpages(struct file *file, struct address_space *mapping,
4783 struct list_head *pages, unsigned nr_pages)
4785 struct extent_io_tree *tree;
4786 tree = &BTRFS_I(mapping->host)->io_tree;
4787 return extent_readpages(tree, mapping, pages, nr_pages,
4790 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4792 struct extent_io_tree *tree;
4793 struct extent_map_tree *map;
4796 tree = &BTRFS_I(page->mapping->host)->io_tree;
4797 map = &BTRFS_I(page->mapping->host)->extent_tree;
4798 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4800 ClearPagePrivate(page);
4801 set_page_private(page, 0);
4802 page_cache_release(page);
4807 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4809 if (PageWriteback(page) || PageDirty(page))
4811 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4814 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4816 struct extent_io_tree *tree;
4817 struct btrfs_ordered_extent *ordered;
4818 u64 page_start = page_offset(page);
4819 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4823 * we have the page locked, so new writeback can't start,
4824 * and the dirty bit won't be cleared while we are here.
4826 * Wait for IO on this page so that we can safely clear
4827 * the PagePrivate2 bit and do ordered accounting
4829 wait_on_page_writeback(page);
4831 tree = &BTRFS_I(page->mapping->host)->io_tree;
4833 btrfs_releasepage(page, GFP_NOFS);
4836 lock_extent(tree, page_start, page_end, GFP_NOFS);
4837 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4841 * IO on this page will never be started, so we need
4842 * to account for any ordered extents now
4844 clear_extent_bit(tree, page_start, page_end,
4845 EXTENT_DIRTY | EXTENT_DELALLOC |
4846 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
4849 * whoever cleared the private bit is responsible
4850 * for the finish_ordered_io
4852 if (TestClearPagePrivate2(page)) {
4853 btrfs_finish_ordered_io(page->mapping->host,
4854 page_start, page_end);
4856 btrfs_put_ordered_extent(ordered);
4857 lock_extent(tree, page_start, page_end, GFP_NOFS);
4859 clear_extent_bit(tree, page_start, page_end,
4860 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4861 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
4862 __btrfs_releasepage(page, GFP_NOFS);
4864 ClearPageChecked(page);
4865 if (PagePrivate(page)) {
4866 ClearPagePrivate(page);
4867 set_page_private(page, 0);
4868 page_cache_release(page);
4873 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4874 * called from a page fault handler when a page is first dirtied. Hence we must
4875 * be careful to check for EOF conditions here. We set the page up correctly
4876 * for a written page which means we get ENOSPC checking when writing into
4877 * holes and correct delalloc and unwritten extent mapping on filesystems that
4878 * support these features.
4880 * We are not allowed to take the i_mutex here so we have to play games to
4881 * protect against truncate races as the page could now be beyond EOF. Because
4882 * vmtruncate() writes the inode size before removing pages, once we have the
4883 * page lock we can determine safely if the page is beyond EOF. If it is not
4884 * beyond EOF, then the page is guaranteed safe against truncation until we
4887 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4889 struct page *page = vmf->page;
4890 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4891 struct btrfs_root *root = BTRFS_I(inode)->root;
4892 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4893 struct btrfs_ordered_extent *ordered;
4895 unsigned long zero_start;
4901 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4905 else /* -ENOSPC, -EIO, etc */
4906 ret = VM_FAULT_SIGBUS;
4910 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
4912 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4913 ret = VM_FAULT_SIGBUS;
4917 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4920 size = i_size_read(inode);
4921 page_start = page_offset(page);
4922 page_end = page_start + PAGE_CACHE_SIZE - 1;
4924 if ((page->mapping != inode->i_mapping) ||
4925 (page_start >= size)) {
4926 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4927 /* page got truncated out from underneath us */
4930 wait_on_page_writeback(page);
4932 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4933 set_page_extent_mapped(page);
4936 * we can't set the delalloc bits if there are pending ordered
4937 * extents. Drop our locks and wait for them to finish
4939 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4941 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4943 btrfs_start_ordered_extent(inode, ordered, 1);
4944 btrfs_put_ordered_extent(ordered);
4949 * XXX - page_mkwrite gets called every time the page is dirtied, even
4950 * if it was already dirty, so for space accounting reasons we need to
4951 * clear any delalloc bits for the range we are fixing to save. There
4952 * is probably a better way to do this, but for now keep consistent with
4953 * prepare_pages in the normal write path.
4955 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
4956 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
4959 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
4961 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4962 ret = VM_FAULT_SIGBUS;
4963 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4968 /* page is wholly or partially inside EOF */
4969 if (page_start + PAGE_CACHE_SIZE > size)
4970 zero_start = size & ~PAGE_CACHE_MASK;
4972 zero_start = PAGE_CACHE_SIZE;
4974 if (zero_start != PAGE_CACHE_SIZE) {
4976 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4977 flush_dcache_page(page);
4980 ClearPageChecked(page);
4981 set_page_dirty(page);
4982 SetPageUptodate(page);
4984 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4985 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
4987 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4990 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
4992 return VM_FAULT_LOCKED;
4998 static void btrfs_truncate(struct inode *inode)
5000 struct btrfs_root *root = BTRFS_I(inode)->root;
5002 struct btrfs_trans_handle *trans;
5004 u64 mask = root->sectorsize - 1;
5006 if (!S_ISREG(inode->i_mode))
5008 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
5011 btrfs_truncate_page(inode->i_mapping, inode->i_size);
5012 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5014 trans = btrfs_start_transaction(root, 1);
5017 * setattr is responsible for setting the ordered_data_close flag,
5018 * but that is only tested during the last file release. That
5019 * could happen well after the next commit, leaving a great big
5020 * window where new writes may get lost if someone chooses to write
5021 * to this file after truncating to zero
5023 * The inode doesn't have any dirty data here, and so if we commit
5024 * this is a noop. If someone immediately starts writing to the inode
5025 * it is very likely we'll catch some of their writes in this
5026 * transaction, and the commit will find this file on the ordered
5027 * data list with good things to send down.
5029 * This is a best effort solution, there is still a window where
5030 * using truncate to replace the contents of the file will
5031 * end up with a zero length file after a crash.
5033 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5034 btrfs_add_ordered_operation(trans, root, inode);
5036 btrfs_set_trans_block_group(trans, inode);
5037 btrfs_i_size_write(inode, inode->i_size);
5039 ret = btrfs_orphan_add(trans, inode);
5042 /* FIXME, add redo link to tree so we don't leak on crash */
5043 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
5044 BTRFS_EXTENT_DATA_KEY);
5045 btrfs_update_inode(trans, root, inode);
5047 ret = btrfs_orphan_del(trans, inode);
5051 nr = trans->blocks_used;
5052 ret = btrfs_end_transaction_throttle(trans, root);
5054 btrfs_btree_balance_dirty(root, nr);
5058 * create a new subvolume directory/inode (helper for the ioctl).
5060 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5061 struct btrfs_root *new_root,
5062 u64 new_dirid, u64 alloc_hint)
5064 struct inode *inode;
5068 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5069 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5071 return PTR_ERR(inode);
5072 inode->i_op = &btrfs_dir_inode_operations;
5073 inode->i_fop = &btrfs_dir_file_operations;
5076 btrfs_i_size_write(inode, 0);
5078 err = btrfs_update_inode(trans, new_root, inode);
5085 /* helper function for file defrag and space balancing. This
5086 * forces readahead on a given range of bytes in an inode
5088 unsigned long btrfs_force_ra(struct address_space *mapping,
5089 struct file_ra_state *ra, struct file *file,
5090 pgoff_t offset, pgoff_t last_index)
5092 pgoff_t req_size = last_index - offset + 1;
5094 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5095 return offset + req_size;
5098 struct inode *btrfs_alloc_inode(struct super_block *sb)
5100 struct btrfs_inode *ei;
5102 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5106 ei->last_sub_trans = 0;
5107 ei->logged_trans = 0;
5108 ei->outstanding_extents = 0;
5109 ei->reserved_extents = 0;
5110 spin_lock_init(&ei->accounting_lock);
5111 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5112 INIT_LIST_HEAD(&ei->i_orphan);
5113 INIT_LIST_HEAD(&ei->ordered_operations);
5114 return &ei->vfs_inode;
5117 void btrfs_destroy_inode(struct inode *inode)
5119 struct btrfs_ordered_extent *ordered;
5120 struct btrfs_root *root = BTRFS_I(inode)->root;
5122 WARN_ON(!list_empty(&inode->i_dentry));
5123 WARN_ON(inode->i_data.nrpages);
5126 * Make sure we're properly removed from the ordered operation
5130 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5131 spin_lock(&root->fs_info->ordered_extent_lock);
5132 list_del_init(&BTRFS_I(inode)->ordered_operations);
5133 spin_unlock(&root->fs_info->ordered_extent_lock);
5136 spin_lock(&root->list_lock);
5137 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5138 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
5139 " list\n", inode->i_ino);
5142 spin_unlock(&root->list_lock);
5145 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5149 printk(KERN_ERR "btrfs found ordered "
5150 "extent %llu %llu on inode cleanup\n",
5151 (unsigned long long)ordered->file_offset,
5152 (unsigned long long)ordered->len);
5153 btrfs_remove_ordered_extent(inode, ordered);
5154 btrfs_put_ordered_extent(ordered);
5155 btrfs_put_ordered_extent(ordered);
5158 inode_tree_del(inode);
5159 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5160 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5163 void btrfs_drop_inode(struct inode *inode)
5165 struct btrfs_root *root = BTRFS_I(inode)->root;
5167 if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5168 generic_delete_inode(inode);
5170 generic_drop_inode(inode);
5173 static void init_once(void *foo)
5175 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5177 inode_init_once(&ei->vfs_inode);
5180 void btrfs_destroy_cachep(void)
5182 if (btrfs_inode_cachep)
5183 kmem_cache_destroy(btrfs_inode_cachep);
5184 if (btrfs_trans_handle_cachep)
5185 kmem_cache_destroy(btrfs_trans_handle_cachep);
5186 if (btrfs_transaction_cachep)
5187 kmem_cache_destroy(btrfs_transaction_cachep);
5188 if (btrfs_path_cachep)
5189 kmem_cache_destroy(btrfs_path_cachep);
5192 int btrfs_init_cachep(void)
5194 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5195 sizeof(struct btrfs_inode), 0,
5196 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5197 if (!btrfs_inode_cachep)
5200 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5201 sizeof(struct btrfs_trans_handle), 0,
5202 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5203 if (!btrfs_trans_handle_cachep)
5206 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5207 sizeof(struct btrfs_transaction), 0,
5208 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5209 if (!btrfs_transaction_cachep)
5212 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5213 sizeof(struct btrfs_path), 0,
5214 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5215 if (!btrfs_path_cachep)
5220 btrfs_destroy_cachep();
5224 static int btrfs_getattr(struct vfsmount *mnt,
5225 struct dentry *dentry, struct kstat *stat)
5227 struct inode *inode = dentry->d_inode;
5228 generic_fillattr(inode, stat);
5229 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5230 stat->blksize = PAGE_CACHE_SIZE;
5231 stat->blocks = (inode_get_bytes(inode) +
5232 BTRFS_I(inode)->delalloc_bytes) >> 9;
5236 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5237 struct inode *new_dir, struct dentry *new_dentry)
5239 struct btrfs_trans_handle *trans;
5240 struct btrfs_root *root = BTRFS_I(old_dir)->root;
5241 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5242 struct inode *new_inode = new_dentry->d_inode;
5243 struct inode *old_inode = old_dentry->d_inode;
5244 struct timespec ctime = CURRENT_TIME;
5249 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5252 /* we only allow rename subvolume link between subvolumes */
5253 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5256 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5257 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5260 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5261 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5265 * 2 items for dir items
5266 * 1 item for orphan entry
5269 ret = btrfs_reserve_metadata_space(root, 4);
5274 * we're using rename to replace one file with another.
5275 * and the replacement file is large. Start IO on it now so
5276 * we don't add too much work to the end of the transaction
5278 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5279 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5280 filemap_flush(old_inode->i_mapping);
5282 /* close the racy window with snapshot create/destroy ioctl */
5283 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5284 down_read(&root->fs_info->subvol_sem);
5286 trans = btrfs_start_transaction(root, 1);
5287 btrfs_set_trans_block_group(trans, new_dir);
5290 btrfs_record_root_in_trans(trans, dest);
5292 ret = btrfs_set_inode_index(new_dir, &index);
5296 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5297 /* force full log commit if subvolume involved. */
5298 root->fs_info->last_trans_log_full_commit = trans->transid;
5300 ret = btrfs_insert_inode_ref(trans, dest,
5301 new_dentry->d_name.name,
5302 new_dentry->d_name.len,
5304 new_dir->i_ino, index);
5308 * this is an ugly little race, but the rename is required
5309 * to make sure that if we crash, the inode is either at the
5310 * old name or the new one. pinning the log transaction lets
5311 * us make sure we don't allow a log commit to come in after
5312 * we unlink the name but before we add the new name back in.
5314 btrfs_pin_log_trans(root);
5317 * make sure the inode gets flushed if it is replacing
5320 if (new_inode && new_inode->i_size &&
5321 old_inode && S_ISREG(old_inode->i_mode)) {
5322 btrfs_add_ordered_operation(trans, root, old_inode);
5325 old_dir->i_ctime = old_dir->i_mtime = ctime;
5326 new_dir->i_ctime = new_dir->i_mtime = ctime;
5327 old_inode->i_ctime = ctime;
5329 if (old_dentry->d_parent != new_dentry->d_parent)
5330 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5332 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5333 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5334 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5335 old_dentry->d_name.name,
5336 old_dentry->d_name.len);
5338 btrfs_inc_nlink(old_dentry->d_inode);
5339 ret = btrfs_unlink_inode(trans, root, old_dir,
5340 old_dentry->d_inode,
5341 old_dentry->d_name.name,
5342 old_dentry->d_name.len);
5347 new_inode->i_ctime = CURRENT_TIME;
5348 if (unlikely(new_inode->i_ino ==
5349 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5350 root_objectid = BTRFS_I(new_inode)->location.objectid;
5351 ret = btrfs_unlink_subvol(trans, dest, new_dir,
5353 new_dentry->d_name.name,
5354 new_dentry->d_name.len);
5355 BUG_ON(new_inode->i_nlink == 0);
5357 ret = btrfs_unlink_inode(trans, dest, new_dir,
5358 new_dentry->d_inode,
5359 new_dentry->d_name.name,
5360 new_dentry->d_name.len);
5363 if (new_inode->i_nlink == 0) {
5364 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5369 ret = btrfs_add_link(trans, new_dir, old_inode,
5370 new_dentry->d_name.name,
5371 new_dentry->d_name.len, 0, index);
5374 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5375 btrfs_log_new_name(trans, old_inode, old_dir,
5376 new_dentry->d_parent);
5377 btrfs_end_log_trans(root);
5380 btrfs_end_transaction_throttle(trans, root);
5382 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5383 up_read(&root->fs_info->subvol_sem);
5385 btrfs_unreserve_metadata_space(root, 4);
5390 * some fairly slow code that needs optimization. This walks the list
5391 * of all the inodes with pending delalloc and forces them to disk.
5393 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
5395 struct list_head *head = &root->fs_info->delalloc_inodes;
5396 struct btrfs_inode *binode;
5397 struct inode *inode;
5399 if (root->fs_info->sb->s_flags & MS_RDONLY)
5402 spin_lock(&root->fs_info->delalloc_lock);
5403 while (!list_empty(head)) {
5404 binode = list_entry(head->next, struct btrfs_inode,
5406 inode = igrab(&binode->vfs_inode);
5408 list_del_init(&binode->delalloc_inodes);
5409 spin_unlock(&root->fs_info->delalloc_lock);
5411 filemap_flush(inode->i_mapping);
5415 spin_lock(&root->fs_info->delalloc_lock);
5417 spin_unlock(&root->fs_info->delalloc_lock);
5419 /* the filemap_flush will queue IO into the worker threads, but
5420 * we have to make sure the IO is actually started and that
5421 * ordered extents get created before we return
5423 atomic_inc(&root->fs_info->async_submit_draining);
5424 while (atomic_read(&root->fs_info->nr_async_submits) ||
5425 atomic_read(&root->fs_info->async_delalloc_pages)) {
5426 wait_event(root->fs_info->async_submit_wait,
5427 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5428 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5430 atomic_dec(&root->fs_info->async_submit_draining);
5434 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5435 const char *symname)
5437 struct btrfs_trans_handle *trans;
5438 struct btrfs_root *root = BTRFS_I(dir)->root;
5439 struct btrfs_path *path;
5440 struct btrfs_key key;
5441 struct inode *inode = NULL;
5449 struct btrfs_file_extent_item *ei;
5450 struct extent_buffer *leaf;
5451 unsigned long nr = 0;
5453 name_len = strlen(symname) + 1;
5454 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5455 return -ENAMETOOLONG;
5458 * 2 items for inode item and ref
5459 * 2 items for dir items
5460 * 1 item for xattr if selinux is on
5462 err = btrfs_reserve_metadata_space(root, 5);
5466 trans = btrfs_start_transaction(root, 1);
5469 btrfs_set_trans_block_group(trans, dir);
5471 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5477 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5479 dentry->d_parent->d_inode->i_ino, objectid,
5480 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5482 err = PTR_ERR(inode);
5486 err = btrfs_init_inode_security(inode, dir);
5492 btrfs_set_trans_block_group(trans, inode);
5493 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5497 inode->i_mapping->a_ops = &btrfs_aops;
5498 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5499 inode->i_fop = &btrfs_file_operations;
5500 inode->i_op = &btrfs_file_inode_operations;
5501 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5503 btrfs_update_inode_block_group(trans, inode);
5504 btrfs_update_inode_block_group(trans, dir);
5508 path = btrfs_alloc_path();
5510 key.objectid = inode->i_ino;
5512 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5513 datasize = btrfs_file_extent_calc_inline_size(name_len);
5514 err = btrfs_insert_empty_item(trans, root, path, &key,
5520 leaf = path->nodes[0];
5521 ei = btrfs_item_ptr(leaf, path->slots[0],
5522 struct btrfs_file_extent_item);
5523 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5524 btrfs_set_file_extent_type(leaf, ei,
5525 BTRFS_FILE_EXTENT_INLINE);
5526 btrfs_set_file_extent_encryption(leaf, ei, 0);
5527 btrfs_set_file_extent_compression(leaf, ei, 0);
5528 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5529 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5531 ptr = btrfs_file_extent_inline_start(ei);
5532 write_extent_buffer(leaf, symname, ptr, name_len);
5533 btrfs_mark_buffer_dirty(leaf);
5534 btrfs_free_path(path);
5536 inode->i_op = &btrfs_symlink_inode_operations;
5537 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5538 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5539 inode_set_bytes(inode, name_len);
5540 btrfs_i_size_write(inode, name_len - 1);
5541 err = btrfs_update_inode(trans, root, inode);
5546 nr = trans->blocks_used;
5547 btrfs_end_transaction_throttle(trans, root);
5549 btrfs_unreserve_metadata_space(root, 5);
5551 inode_dec_link_count(inode);
5554 btrfs_btree_balance_dirty(root, nr);
5558 static int prealloc_file_range(struct btrfs_trans_handle *trans,
5559 struct inode *inode, u64 start, u64 end,
5560 u64 locked_end, u64 alloc_hint, int mode)
5562 struct btrfs_root *root = BTRFS_I(inode)->root;
5563 struct btrfs_key ins;
5565 u64 cur_offset = start;
5566 u64 num_bytes = end - start;
5569 while (num_bytes > 0) {
5570 alloc_size = min(num_bytes, root->fs_info->max_extent);
5572 ret = btrfs_reserve_metadata_space(root, 1);
5576 ret = btrfs_reserve_extent(trans, root, alloc_size,
5577 root->sectorsize, 0, alloc_hint,
5583 ret = insert_reserved_file_extent(trans, inode,
5584 cur_offset, ins.objectid,
5585 ins.offset, ins.offset,
5586 ins.offset, locked_end,
5588 BTRFS_FILE_EXTENT_PREALLOC);
5590 btrfs_drop_extent_cache(inode, cur_offset,
5591 cur_offset + ins.offset -1, 0);
5592 num_bytes -= ins.offset;
5593 cur_offset += ins.offset;
5594 alloc_hint = ins.objectid + ins.offset;
5595 btrfs_unreserve_metadata_space(root, 1);
5598 if (cur_offset > start) {
5599 inode->i_ctime = CURRENT_TIME;
5600 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5601 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5602 cur_offset > i_size_read(inode))
5603 btrfs_i_size_write(inode, cur_offset);
5604 ret = btrfs_update_inode(trans, root, inode);
5611 static long btrfs_fallocate(struct inode *inode, int mode,
5612 loff_t offset, loff_t len)
5620 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5621 struct extent_map *em;
5622 struct btrfs_trans_handle *trans;
5623 struct btrfs_root *root;
5626 alloc_start = offset & ~mask;
5627 alloc_end = (offset + len + mask) & ~mask;
5630 * wait for ordered IO before we have any locks. We'll loop again
5631 * below with the locks held.
5633 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5635 mutex_lock(&inode->i_mutex);
5636 if (alloc_start > inode->i_size) {
5637 ret = btrfs_cont_expand(inode, alloc_start);
5642 root = BTRFS_I(inode)->root;
5644 ret = btrfs_check_data_free_space(root, inode,
5645 alloc_end - alloc_start);
5649 locked_end = alloc_end - 1;
5651 struct btrfs_ordered_extent *ordered;
5653 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5659 /* the extent lock is ordered inside the running
5662 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5664 ordered = btrfs_lookup_first_ordered_extent(inode,
5667 ordered->file_offset + ordered->len > alloc_start &&
5668 ordered->file_offset < alloc_end) {
5669 btrfs_put_ordered_extent(ordered);
5670 unlock_extent(&BTRFS_I(inode)->io_tree,
5671 alloc_start, locked_end, GFP_NOFS);
5672 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5675 * we can't wait on the range with the transaction
5676 * running or with the extent lock held
5678 btrfs_wait_ordered_range(inode, alloc_start,
5679 alloc_end - alloc_start);
5682 btrfs_put_ordered_extent(ordered);
5687 cur_offset = alloc_start;
5689 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5690 alloc_end - cur_offset, 0);
5691 BUG_ON(IS_ERR(em) || !em);
5692 last_byte = min(extent_map_end(em), alloc_end);
5693 last_byte = (last_byte + mask) & ~mask;
5694 if (em->block_start == EXTENT_MAP_HOLE) {
5695 ret = prealloc_file_range(trans, inode, cur_offset,
5696 last_byte, locked_end + 1,
5699 free_extent_map(em);
5703 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5704 alloc_hint = em->block_start;
5705 free_extent_map(em);
5707 cur_offset = last_byte;
5708 if (cur_offset >= alloc_end) {
5713 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5716 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5718 btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
5720 mutex_unlock(&inode->i_mutex);
5724 static int btrfs_set_page_dirty(struct page *page)
5726 return __set_page_dirty_nobuffers(page);
5729 static int btrfs_permission(struct inode *inode, int mask)
5731 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5733 return generic_permission(inode, mask, btrfs_check_acl);
5736 static struct inode_operations btrfs_dir_inode_operations = {
5737 .getattr = btrfs_getattr,
5738 .lookup = btrfs_lookup,
5739 .create = btrfs_create,
5740 .unlink = btrfs_unlink,
5742 .mkdir = btrfs_mkdir,
5743 .rmdir = btrfs_rmdir,
5744 .rename = btrfs_rename,
5745 .symlink = btrfs_symlink,
5746 .setattr = btrfs_setattr,
5747 .mknod = btrfs_mknod,
5748 .setxattr = btrfs_setxattr,
5749 .getxattr = btrfs_getxattr,
5750 .listxattr = btrfs_listxattr,
5751 .removexattr = btrfs_removexattr,
5752 .permission = btrfs_permission,
5754 static struct inode_operations btrfs_dir_ro_inode_operations = {
5755 .lookup = btrfs_lookup,
5756 .permission = btrfs_permission,
5759 static struct file_operations btrfs_dir_file_operations = {
5760 .llseek = generic_file_llseek,
5761 .read = generic_read_dir,
5762 .readdir = btrfs_real_readdir,
5763 .unlocked_ioctl = btrfs_ioctl,
5764 #ifdef CONFIG_COMPAT
5765 .compat_ioctl = btrfs_ioctl,
5767 .release = btrfs_release_file,
5768 .fsync = btrfs_sync_file,
5771 static struct extent_io_ops btrfs_extent_io_ops = {
5772 .fill_delalloc = run_delalloc_range,
5773 .submit_bio_hook = btrfs_submit_bio_hook,
5774 .merge_bio_hook = btrfs_merge_bio_hook,
5775 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5776 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5777 .writepage_start_hook = btrfs_writepage_start_hook,
5778 .readpage_io_failed_hook = btrfs_io_failed_hook,
5779 .set_bit_hook = btrfs_set_bit_hook,
5780 .clear_bit_hook = btrfs_clear_bit_hook,
5781 .merge_extent_hook = btrfs_merge_extent_hook,
5782 .split_extent_hook = btrfs_split_extent_hook,
5786 * btrfs doesn't support the bmap operation because swapfiles
5787 * use bmap to make a mapping of extents in the file. They assume
5788 * these extents won't change over the life of the file and they
5789 * use the bmap result to do IO directly to the drive.
5791 * the btrfs bmap call would return logical addresses that aren't
5792 * suitable for IO and they also will change frequently as COW
5793 * operations happen. So, swapfile + btrfs == corruption.
5795 * For now we're avoiding this by dropping bmap.
5797 static struct address_space_operations btrfs_aops = {
5798 .readpage = btrfs_readpage,
5799 .writepage = btrfs_writepage,
5800 .writepages = btrfs_writepages,
5801 .readpages = btrfs_readpages,
5802 .sync_page = block_sync_page,
5803 .direct_IO = btrfs_direct_IO,
5804 .invalidatepage = btrfs_invalidatepage,
5805 .releasepage = btrfs_releasepage,
5806 .set_page_dirty = btrfs_set_page_dirty,
5809 static struct address_space_operations btrfs_symlink_aops = {
5810 .readpage = btrfs_readpage,
5811 .writepage = btrfs_writepage,
5812 .invalidatepage = btrfs_invalidatepage,
5813 .releasepage = btrfs_releasepage,
5816 static struct inode_operations btrfs_file_inode_operations = {
5817 .truncate = btrfs_truncate,
5818 .getattr = btrfs_getattr,
5819 .setattr = btrfs_setattr,
5820 .setxattr = btrfs_setxattr,
5821 .getxattr = btrfs_getxattr,
5822 .listxattr = btrfs_listxattr,
5823 .removexattr = btrfs_removexattr,
5824 .permission = btrfs_permission,
5825 .fallocate = btrfs_fallocate,
5826 .fiemap = btrfs_fiemap,
5828 static struct inode_operations btrfs_special_inode_operations = {
5829 .getattr = btrfs_getattr,
5830 .setattr = btrfs_setattr,
5831 .permission = btrfs_permission,
5832 .setxattr = btrfs_setxattr,
5833 .getxattr = btrfs_getxattr,
5834 .listxattr = btrfs_listxattr,
5835 .removexattr = btrfs_removexattr,
5837 static struct inode_operations btrfs_symlink_inode_operations = {
5838 .readlink = generic_readlink,
5839 .follow_link = page_follow_link_light,
5840 .put_link = page_put_link,
5841 .permission = btrfs_permission,
5842 .setxattr = btrfs_setxattr,
5843 .getxattr = btrfs_getxattr,
5844 .listxattr = btrfs_listxattr,
5845 .removexattr = btrfs_removexattr,
5848 const struct dentry_operations btrfs_dentry_operations = {
5849 .d_delete = btrfs_dentry_delete,