2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
42 #include "transaction.h"
43 #include "btrfs_inode.h"
45 #include "print-tree.h"
47 #include "ordered-data.h"
50 #include "compression.h"
53 struct btrfs_iget_args {
55 struct btrfs_root *root;
58 static struct inode_operations btrfs_dir_inode_operations;
59 static struct inode_operations btrfs_symlink_inode_operations;
60 static struct inode_operations btrfs_dir_ro_inode_operations;
61 static struct inode_operations btrfs_special_inode_operations;
62 static struct inode_operations btrfs_file_inode_operations;
63 static struct address_space_operations btrfs_aops;
64 static struct address_space_operations btrfs_symlink_aops;
65 static struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
91 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
95 err = btrfs_init_acl(inode, dir);
97 err = btrfs_xattr_security_init(inode, dir);
102 * this does all the hard work for inserting an inline extent into
103 * the btree. The caller should have done a btrfs_drop_extents so that
104 * no overlapping inline items exist in the btree
106 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root, struct inode *inode,
108 u64 start, size_t size, size_t compressed_size,
109 struct page **compressed_pages)
111 struct btrfs_key key;
112 struct btrfs_path *path;
113 struct extent_buffer *leaf;
114 struct page *page = NULL;
117 struct btrfs_file_extent_item *ei;
120 size_t cur_size = size;
122 unsigned long offset;
123 int use_compress = 0;
125 if (compressed_size && compressed_pages) {
127 cur_size = compressed_size;
130 path = btrfs_alloc_path();
134 path->leave_spinning = 1;
135 btrfs_set_trans_block_group(trans, inode);
137 key.objectid = inode->i_ino;
139 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
140 datasize = btrfs_file_extent_calc_inline_size(cur_size);
142 inode_add_bytes(inode, size);
143 ret = btrfs_insert_empty_item(trans, root, path, &key,
150 leaf = path->nodes[0];
151 ei = btrfs_item_ptr(leaf, path->slots[0],
152 struct btrfs_file_extent_item);
153 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
154 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
155 btrfs_set_file_extent_encryption(leaf, ei, 0);
156 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
157 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
158 ptr = btrfs_file_extent_inline_start(ei);
163 while (compressed_size > 0) {
164 cpage = compressed_pages[i];
165 cur_size = min_t(unsigned long, compressed_size,
168 kaddr = kmap_atomic(cpage, KM_USER0);
169 write_extent_buffer(leaf, kaddr, ptr, cur_size);
170 kunmap_atomic(kaddr, KM_USER0);
174 compressed_size -= cur_size;
176 btrfs_set_file_extent_compression(leaf, ei,
177 BTRFS_COMPRESS_ZLIB);
179 page = find_get_page(inode->i_mapping,
180 start >> PAGE_CACHE_SHIFT);
181 btrfs_set_file_extent_compression(leaf, ei, 0);
182 kaddr = kmap_atomic(page, KM_USER0);
183 offset = start & (PAGE_CACHE_SIZE - 1);
184 write_extent_buffer(leaf, kaddr + offset, ptr, size);
185 kunmap_atomic(kaddr, KM_USER0);
186 page_cache_release(page);
188 btrfs_mark_buffer_dirty(leaf);
189 btrfs_free_path(path);
191 BTRFS_I(inode)->disk_i_size = inode->i_size;
192 btrfs_update_inode(trans, root, inode);
195 btrfs_free_path(path);
201 * conditionally insert an inline extent into the file. This
202 * does the checks required to make sure the data is small enough
203 * to fit as an inline extent.
205 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
206 struct btrfs_root *root,
207 struct inode *inode, u64 start, u64 end,
208 size_t compressed_size,
209 struct page **compressed_pages)
211 u64 isize = i_size_read(inode);
212 u64 actual_end = min(end + 1, isize);
213 u64 inline_len = actual_end - start;
214 u64 aligned_end = (end + root->sectorsize - 1) &
215 ~((u64)root->sectorsize - 1);
217 u64 data_len = inline_len;
221 data_len = compressed_size;
224 actual_end >= PAGE_CACHE_SIZE ||
225 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
227 (actual_end & (root->sectorsize - 1)) == 0) ||
229 data_len > root->fs_info->max_inline) {
233 ret = btrfs_drop_extents(trans, root, inode, start,
234 aligned_end, aligned_end, start,
238 if (isize > actual_end)
239 inline_len = min_t(u64, isize, actual_end);
240 ret = insert_inline_extent(trans, root, inode, start,
241 inline_len, compressed_size,
244 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
248 struct async_extent {
253 unsigned long nr_pages;
254 struct list_head list;
259 struct btrfs_root *root;
260 struct page *locked_page;
263 struct list_head extents;
264 struct btrfs_work work;
267 static noinline int add_async_extent(struct async_cow *cow,
268 u64 start, u64 ram_size,
271 unsigned long nr_pages)
273 struct async_extent *async_extent;
275 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
276 async_extent->start = start;
277 async_extent->ram_size = ram_size;
278 async_extent->compressed_size = compressed_size;
279 async_extent->pages = pages;
280 async_extent->nr_pages = nr_pages;
281 list_add_tail(&async_extent->list, &cow->extents);
286 * we create compressed extents in two phases. The first
287 * phase compresses a range of pages that have already been
288 * locked (both pages and state bits are locked).
290 * This is done inside an ordered work queue, and the compression
291 * is spread across many cpus. The actual IO submission is step
292 * two, and the ordered work queue takes care of making sure that
293 * happens in the same order things were put onto the queue by
294 * writepages and friends.
296 * If this code finds it can't get good compression, it puts an
297 * entry onto the work queue to write the uncompressed bytes. This
298 * makes sure that both compressed inodes and uncompressed inodes
299 * are written in the same order that pdflush sent them down.
301 static noinline int compress_file_range(struct inode *inode,
302 struct page *locked_page,
304 struct async_cow *async_cow,
307 struct btrfs_root *root = BTRFS_I(inode)->root;
308 struct btrfs_trans_handle *trans;
312 u64 blocksize = root->sectorsize;
314 u64 isize = i_size_read(inode);
316 struct page **pages = NULL;
317 unsigned long nr_pages;
318 unsigned long nr_pages_ret = 0;
319 unsigned long total_compressed = 0;
320 unsigned long total_in = 0;
321 unsigned long max_compressed = 128 * 1024;
322 unsigned long max_uncompressed = 128 * 1024;
328 actual_end = min_t(u64, isize, end + 1);
331 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
332 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
335 * we don't want to send crud past the end of i_size through
336 * compression, that's just a waste of CPU time. So, if the
337 * end of the file is before the start of our current
338 * requested range of bytes, we bail out to the uncompressed
339 * cleanup code that can deal with all of this.
341 * It isn't really the fastest way to fix things, but this is a
342 * very uncommon corner.
344 if (actual_end <= start)
345 goto cleanup_and_bail_uncompressed;
347 total_compressed = actual_end - start;
349 /* we want to make sure that amount of ram required to uncompress
350 * an extent is reasonable, so we limit the total size in ram
351 * of a compressed extent to 128k. This is a crucial number
352 * because it also controls how easily we can spread reads across
353 * cpus for decompression.
355 * We also want to make sure the amount of IO required to do
356 * a random read is reasonably small, so we limit the size of
357 * a compressed extent to 128k.
359 total_compressed = min(total_compressed, max_uncompressed);
360 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
361 num_bytes = max(blocksize, num_bytes);
362 disk_num_bytes = num_bytes;
367 * we do compression for mount -o compress and when the
368 * inode has not been flagged as nocompress. This flag can
369 * change at any time if we discover bad compression ratios.
371 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
372 btrfs_test_opt(root, COMPRESS)) {
374 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
376 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
377 total_compressed, pages,
378 nr_pages, &nr_pages_ret,
384 unsigned long offset = total_compressed &
385 (PAGE_CACHE_SIZE - 1);
386 struct page *page = pages[nr_pages_ret - 1];
389 /* zero the tail end of the last page, we might be
390 * sending it down to disk
393 kaddr = kmap_atomic(page, KM_USER0);
394 memset(kaddr + offset, 0,
395 PAGE_CACHE_SIZE - offset);
396 kunmap_atomic(kaddr, KM_USER0);
402 trans = btrfs_join_transaction(root, 1);
404 btrfs_set_trans_block_group(trans, inode);
406 /* lets try to make an inline extent */
407 if (ret || total_in < (actual_end - start)) {
408 /* we didn't compress the entire range, try
409 * to make an uncompressed inline extent.
411 ret = cow_file_range_inline(trans, root, inode,
412 start, end, 0, NULL);
414 /* try making a compressed inline extent */
415 ret = cow_file_range_inline(trans, root, inode,
417 total_compressed, pages);
419 btrfs_end_transaction(trans, root);
422 * inline extent creation worked, we don't need
423 * to create any more async work items. Unlock
424 * and free up our temp pages.
426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree,
429 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
430 EXTENT_CLEAR_DELALLOC |
431 EXTENT_CLEAR_ACCOUNTING |
432 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
440 * we aren't doing an inline extent round the compressed size
441 * up to a block size boundary so the allocator does sane
444 total_compressed = (total_compressed + blocksize - 1) &
448 * one last check to make sure the compression is really a
449 * win, compare the page count read with the blocks on disk
451 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
452 ~(PAGE_CACHE_SIZE - 1);
453 if (total_compressed >= total_in) {
456 disk_num_bytes = total_compressed;
457 num_bytes = total_in;
460 if (!will_compress && pages) {
462 * the compression code ran but failed to make things smaller,
463 * free any pages it allocated and our page pointer array
465 for (i = 0; i < nr_pages_ret; i++) {
466 WARN_ON(pages[i]->mapping);
467 page_cache_release(pages[i]);
471 total_compressed = 0;
474 /* flag the file so we don't compress in the future */
475 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
480 /* the async work queues will take care of doing actual
481 * allocation on disk for these compressed pages,
482 * and will submit them to the elevator.
484 add_async_extent(async_cow, start, num_bytes,
485 total_compressed, pages, nr_pages_ret);
487 if (start + num_bytes < end && start + num_bytes < actual_end) {
494 cleanup_and_bail_uncompressed:
496 * No compression, but we still need to write the pages in
497 * the file we've been given so far. redirty the locked
498 * page if it corresponds to our extent and set things up
499 * for the async work queue to run cow_file_range to do
500 * the normal delalloc dance
502 if (page_offset(locked_page) >= start &&
503 page_offset(locked_page) <= end) {
504 __set_page_dirty_nobuffers(locked_page);
505 /* unlocked later on in the async handlers */
507 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
515 for (i = 0; i < nr_pages_ret; i++) {
516 WARN_ON(pages[i]->mapping);
517 page_cache_release(pages[i]);
525 * phase two of compressed writeback. This is the ordered portion
526 * of the code, which only gets called in the order the work was
527 * queued. We walk all the async extents created by compress_file_range
528 * and send them down to the disk.
530 static noinline int submit_compressed_extents(struct inode *inode,
531 struct async_cow *async_cow)
533 struct async_extent *async_extent;
535 struct btrfs_trans_handle *trans;
536 struct btrfs_key ins;
537 struct extent_map *em;
538 struct btrfs_root *root = BTRFS_I(inode)->root;
539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
540 struct extent_io_tree *io_tree;
543 if (list_empty(&async_cow->extents))
546 trans = btrfs_join_transaction(root, 1);
548 while (!list_empty(&async_cow->extents)) {
549 async_extent = list_entry(async_cow->extents.next,
550 struct async_extent, list);
551 list_del(&async_extent->list);
553 io_tree = &BTRFS_I(inode)->io_tree;
555 /* did the compression code fall back to uncompressed IO? */
556 if (!async_extent->pages) {
557 int page_started = 0;
558 unsigned long nr_written = 0;
560 lock_extent(io_tree, async_extent->start,
561 async_extent->start +
562 async_extent->ram_size - 1, GFP_NOFS);
564 /* allocate blocks */
565 cow_file_range(inode, async_cow->locked_page,
567 async_extent->start +
568 async_extent->ram_size - 1,
569 &page_started, &nr_written, 0);
572 * if page_started, cow_file_range inserted an
573 * inline extent and took care of all the unlocking
574 * and IO for us. Otherwise, we need to submit
575 * all those pages down to the drive.
578 extent_write_locked_range(io_tree,
579 inode, async_extent->start,
580 async_extent->start +
581 async_extent->ram_size - 1,
589 lock_extent(io_tree, async_extent->start,
590 async_extent->start + async_extent->ram_size - 1,
593 * here we're doing allocation and writeback of the
596 btrfs_drop_extent_cache(inode, async_extent->start,
597 async_extent->start +
598 async_extent->ram_size - 1, 0);
600 ret = btrfs_reserve_extent(trans, root,
601 async_extent->compressed_size,
602 async_extent->compressed_size,
606 em = alloc_extent_map(GFP_NOFS);
607 em->start = async_extent->start;
608 em->len = async_extent->ram_size;
609 em->orig_start = em->start;
611 em->block_start = ins.objectid;
612 em->block_len = ins.offset;
613 em->bdev = root->fs_info->fs_devices->latest_bdev;
614 set_bit(EXTENT_FLAG_PINNED, &em->flags);
615 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
618 write_lock(&em_tree->lock);
619 ret = add_extent_mapping(em_tree, em);
620 write_unlock(&em_tree->lock);
621 if (ret != -EEXIST) {
625 btrfs_drop_extent_cache(inode, async_extent->start,
626 async_extent->start +
627 async_extent->ram_size - 1, 0);
630 ret = btrfs_add_ordered_extent(inode, async_extent->start,
632 async_extent->ram_size,
634 BTRFS_ORDERED_COMPRESSED);
637 btrfs_end_transaction(trans, root);
640 * clear dirty, set writeback and unlock the pages.
642 extent_clear_unlock_delalloc(inode,
643 &BTRFS_I(inode)->io_tree,
645 async_extent->start +
646 async_extent->ram_size - 1,
647 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
648 EXTENT_CLEAR_UNLOCK |
649 EXTENT_CLEAR_DELALLOC |
650 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
652 ret = btrfs_submit_compressed_write(inode,
654 async_extent->ram_size,
656 ins.offset, async_extent->pages,
657 async_extent->nr_pages);
660 trans = btrfs_join_transaction(root, 1);
661 alloc_hint = ins.objectid + ins.offset;
666 btrfs_end_transaction(trans, root);
671 * when extent_io.c finds a delayed allocation range in the file,
672 * the call backs end up in this code. The basic idea is to
673 * allocate extents on disk for the range, and create ordered data structs
674 * in ram to track those extents.
676 * locked_page is the page that writepage had locked already. We use
677 * it to make sure we don't do extra locks or unlocks.
679 * *page_started is set to one if we unlock locked_page and do everything
680 * required to start IO on it. It may be clean and already done with
683 static noinline int cow_file_range(struct inode *inode,
684 struct page *locked_page,
685 u64 start, u64 end, int *page_started,
686 unsigned long *nr_written,
689 struct btrfs_root *root = BTRFS_I(inode)->root;
690 struct btrfs_trans_handle *trans;
693 unsigned long ram_size;
696 u64 blocksize = root->sectorsize;
698 u64 isize = i_size_read(inode);
699 struct btrfs_key ins;
700 struct extent_map *em;
701 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
704 trans = btrfs_join_transaction(root, 1);
706 btrfs_set_trans_block_group(trans, inode);
708 actual_end = min_t(u64, isize, end + 1);
710 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
711 num_bytes = max(blocksize, num_bytes);
712 disk_num_bytes = num_bytes;
716 /* lets try to make an inline extent */
717 ret = cow_file_range_inline(trans, root, inode,
718 start, end, 0, NULL);
720 extent_clear_unlock_delalloc(inode,
721 &BTRFS_I(inode)->io_tree,
723 EXTENT_CLEAR_UNLOCK_PAGE |
724 EXTENT_CLEAR_UNLOCK |
725 EXTENT_CLEAR_DELALLOC |
726 EXTENT_CLEAR_ACCOUNTING |
728 EXTENT_SET_WRITEBACK |
729 EXTENT_END_WRITEBACK);
730 *nr_written = *nr_written +
731 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
738 BUG_ON(disk_num_bytes >
739 btrfs_super_total_bytes(&root->fs_info->super_copy));
742 read_lock(&BTRFS_I(inode)->extent_tree.lock);
743 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
747 * if block start isn't an actual block number then find the
748 * first block in this inode and use that as a hint. If that
749 * block is also bogus then just don't worry about it.
751 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
753 em = search_extent_mapping(em_tree, 0, 0);
754 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
755 alloc_hint = em->block_start;
759 alloc_hint = em->block_start;
763 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
764 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
766 while (disk_num_bytes > 0) {
769 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
770 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
771 root->sectorsize, 0, alloc_hint,
775 em = alloc_extent_map(GFP_NOFS);
777 em->orig_start = em->start;
778 ram_size = ins.offset;
779 em->len = ins.offset;
781 em->block_start = ins.objectid;
782 em->block_len = ins.offset;
783 em->bdev = root->fs_info->fs_devices->latest_bdev;
784 set_bit(EXTENT_FLAG_PINNED, &em->flags);
787 write_lock(&em_tree->lock);
788 ret = add_extent_mapping(em_tree, em);
789 write_unlock(&em_tree->lock);
790 if (ret != -EEXIST) {
794 btrfs_drop_extent_cache(inode, start,
795 start + ram_size - 1, 0);
798 cur_alloc_size = ins.offset;
799 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
800 ram_size, cur_alloc_size, 0);
803 if (root->root_key.objectid ==
804 BTRFS_DATA_RELOC_TREE_OBJECTID) {
805 ret = btrfs_reloc_clone_csums(inode, start,
810 if (disk_num_bytes < cur_alloc_size)
813 /* we're not doing compressed IO, don't unlock the first
814 * page (which the caller expects to stay locked), don't
815 * clear any dirty bits and don't set any writeback bits
817 * Do set the Private2 bit so we know this page was properly
818 * setup for writepage
820 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
821 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
824 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
825 start, start + ram_size - 1,
827 disk_num_bytes -= cur_alloc_size;
828 num_bytes -= cur_alloc_size;
829 alloc_hint = ins.objectid + ins.offset;
830 start += cur_alloc_size;
834 btrfs_end_transaction(trans, root);
840 * work queue call back to started compression on a file and pages
842 static noinline void async_cow_start(struct btrfs_work *work)
844 struct async_cow *async_cow;
846 async_cow = container_of(work, struct async_cow, work);
848 compress_file_range(async_cow->inode, async_cow->locked_page,
849 async_cow->start, async_cow->end, async_cow,
852 async_cow->inode = NULL;
856 * work queue call back to submit previously compressed pages
858 static noinline void async_cow_submit(struct btrfs_work *work)
860 struct async_cow *async_cow;
861 struct btrfs_root *root;
862 unsigned long nr_pages;
864 async_cow = container_of(work, struct async_cow, work);
866 root = async_cow->root;
867 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
870 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
872 if (atomic_read(&root->fs_info->async_delalloc_pages) <
874 waitqueue_active(&root->fs_info->async_submit_wait))
875 wake_up(&root->fs_info->async_submit_wait);
877 if (async_cow->inode)
878 submit_compressed_extents(async_cow->inode, async_cow);
881 static noinline void async_cow_free(struct btrfs_work *work)
883 struct async_cow *async_cow;
884 async_cow = container_of(work, struct async_cow, work);
888 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
889 u64 start, u64 end, int *page_started,
890 unsigned long *nr_written)
892 struct async_cow *async_cow;
893 struct btrfs_root *root = BTRFS_I(inode)->root;
894 unsigned long nr_pages;
896 int limit = 10 * 1024 * 1042;
898 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
899 1, 0, NULL, GFP_NOFS);
900 while (start < end) {
901 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
902 async_cow->inode = inode;
903 async_cow->root = root;
904 async_cow->locked_page = locked_page;
905 async_cow->start = start;
907 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
910 cur_end = min(end, start + 512 * 1024 - 1);
912 async_cow->end = cur_end;
913 INIT_LIST_HEAD(&async_cow->extents);
915 async_cow->work.func = async_cow_start;
916 async_cow->work.ordered_func = async_cow_submit;
917 async_cow->work.ordered_free = async_cow_free;
918 async_cow->work.flags = 0;
920 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
922 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
924 btrfs_queue_worker(&root->fs_info->delalloc_workers,
927 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
928 wait_event(root->fs_info->async_submit_wait,
929 (atomic_read(&root->fs_info->async_delalloc_pages) <
933 while (atomic_read(&root->fs_info->async_submit_draining) &&
934 atomic_read(&root->fs_info->async_delalloc_pages)) {
935 wait_event(root->fs_info->async_submit_wait,
936 (atomic_read(&root->fs_info->async_delalloc_pages) ==
940 *nr_written += nr_pages;
947 static noinline int csum_exist_in_range(struct btrfs_root *root,
948 u64 bytenr, u64 num_bytes)
951 struct btrfs_ordered_sum *sums;
954 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
955 bytenr + num_bytes - 1, &list);
956 if (ret == 0 && list_empty(&list))
959 while (!list_empty(&list)) {
960 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
961 list_del(&sums->list);
968 * when nowcow writeback call back. This checks for snapshots or COW copies
969 * of the extents that exist in the file, and COWs the file as required.
971 * If no cow copies or snapshots exist, we write directly to the existing
974 static noinline int run_delalloc_nocow(struct inode *inode,
975 struct page *locked_page,
976 u64 start, u64 end, int *page_started, int force,
977 unsigned long *nr_written)
979 struct btrfs_root *root = BTRFS_I(inode)->root;
980 struct btrfs_trans_handle *trans;
981 struct extent_buffer *leaf;
982 struct btrfs_path *path;
983 struct btrfs_file_extent_item *fi;
984 struct btrfs_key found_key;
997 path = btrfs_alloc_path();
999 trans = btrfs_join_transaction(root, 1);
1002 cow_start = (u64)-1;
1005 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1008 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1009 leaf = path->nodes[0];
1010 btrfs_item_key_to_cpu(leaf, &found_key,
1011 path->slots[0] - 1);
1012 if (found_key.objectid == inode->i_ino &&
1013 found_key.type == BTRFS_EXTENT_DATA_KEY)
1018 leaf = path->nodes[0];
1019 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1020 ret = btrfs_next_leaf(root, path);
1025 leaf = path->nodes[0];
1031 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1033 if (found_key.objectid > inode->i_ino ||
1034 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1035 found_key.offset > end)
1038 if (found_key.offset > cur_offset) {
1039 extent_end = found_key.offset;
1044 fi = btrfs_item_ptr(leaf, path->slots[0],
1045 struct btrfs_file_extent_item);
1046 extent_type = btrfs_file_extent_type(leaf, fi);
1048 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1049 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1050 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1051 extent_offset = btrfs_file_extent_offset(leaf, fi);
1052 extent_end = found_key.offset +
1053 btrfs_file_extent_num_bytes(leaf, fi);
1054 if (extent_end <= start) {
1058 if (disk_bytenr == 0)
1060 if (btrfs_file_extent_compression(leaf, fi) ||
1061 btrfs_file_extent_encryption(leaf, fi) ||
1062 btrfs_file_extent_other_encoding(leaf, fi))
1064 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1066 if (btrfs_extent_readonly(root, disk_bytenr))
1068 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1070 extent_offset, disk_bytenr))
1072 disk_bytenr += extent_offset;
1073 disk_bytenr += cur_offset - found_key.offset;
1074 num_bytes = min(end + 1, extent_end) - cur_offset;
1076 * force cow if csum exists in the range.
1077 * this ensure that csum for a given extent are
1078 * either valid or do not exist.
1080 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1083 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1084 extent_end = found_key.offset +
1085 btrfs_file_extent_inline_len(leaf, fi);
1086 extent_end = ALIGN(extent_end, root->sectorsize);
1091 if (extent_end <= start) {
1096 if (cow_start == (u64)-1)
1097 cow_start = cur_offset;
1098 cur_offset = extent_end;
1099 if (cur_offset > end)
1105 btrfs_release_path(root, path);
1106 if (cow_start != (u64)-1) {
1107 ret = cow_file_range(inode, locked_page, cow_start,
1108 found_key.offset - 1, page_started,
1111 cow_start = (u64)-1;
1114 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1115 struct extent_map *em;
1116 struct extent_map_tree *em_tree;
1117 em_tree = &BTRFS_I(inode)->extent_tree;
1118 em = alloc_extent_map(GFP_NOFS);
1119 em->start = cur_offset;
1120 em->orig_start = em->start;
1121 em->len = num_bytes;
1122 em->block_len = num_bytes;
1123 em->block_start = disk_bytenr;
1124 em->bdev = root->fs_info->fs_devices->latest_bdev;
1125 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1127 write_lock(&em_tree->lock);
1128 ret = add_extent_mapping(em_tree, em);
1129 write_unlock(&em_tree->lock);
1130 if (ret != -EEXIST) {
1131 free_extent_map(em);
1134 btrfs_drop_extent_cache(inode, em->start,
1135 em->start + em->len - 1, 0);
1137 type = BTRFS_ORDERED_PREALLOC;
1139 type = BTRFS_ORDERED_NOCOW;
1142 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1143 num_bytes, num_bytes, type);
1146 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1147 cur_offset, cur_offset + num_bytes - 1,
1148 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1149 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1150 EXTENT_SET_PRIVATE2);
1151 cur_offset = extent_end;
1152 if (cur_offset > end)
1155 btrfs_release_path(root, path);
1157 if (cur_offset <= end && cow_start == (u64)-1)
1158 cow_start = cur_offset;
1159 if (cow_start != (u64)-1) {
1160 ret = cow_file_range(inode, locked_page, cow_start, end,
1161 page_started, nr_written, 1);
1165 ret = btrfs_end_transaction(trans, root);
1167 btrfs_free_path(path);
1172 * extent_io.c call back to do delayed allocation processing
1174 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1175 u64 start, u64 end, int *page_started,
1176 unsigned long *nr_written)
1179 struct btrfs_root *root = BTRFS_I(inode)->root;
1181 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1182 ret = run_delalloc_nocow(inode, locked_page, start, end,
1183 page_started, 1, nr_written);
1184 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1185 ret = run_delalloc_nocow(inode, locked_page, start, end,
1186 page_started, 0, nr_written);
1187 else if (!btrfs_test_opt(root, COMPRESS))
1188 ret = cow_file_range(inode, locked_page, start, end,
1189 page_started, nr_written, 1);
1191 ret = cow_file_range_async(inode, locked_page, start, end,
1192 page_started, nr_written);
1196 static int btrfs_split_extent_hook(struct inode *inode,
1197 struct extent_state *orig, u64 split)
1199 struct btrfs_root *root = BTRFS_I(inode)->root;
1202 if (!(orig->state & EXTENT_DELALLOC))
1205 size = orig->end - orig->start + 1;
1206 if (size > root->fs_info->max_extent) {
1210 new_size = orig->end - split + 1;
1211 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1212 root->fs_info->max_extent);
1215 * if we break a large extent up then leave oustanding_extents
1216 * be, since we've already accounted for the large extent.
1218 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1219 root->fs_info->max_extent) < num_extents)
1223 spin_lock(&BTRFS_I(inode)->accounting_lock);
1224 BTRFS_I(inode)->outstanding_extents++;
1225 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1231 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1232 * extents so we can keep track of new extents that are just merged onto old
1233 * extents, such as when we are doing sequential writes, so we can properly
1234 * account for the metadata space we'll need.
1236 static int btrfs_merge_extent_hook(struct inode *inode,
1237 struct extent_state *new,
1238 struct extent_state *other)
1240 struct btrfs_root *root = BTRFS_I(inode)->root;
1241 u64 new_size, old_size;
1244 /* not delalloc, ignore it */
1245 if (!(other->state & EXTENT_DELALLOC))
1248 old_size = other->end - other->start + 1;
1249 if (new->start < other->start)
1250 new_size = other->end - new->start + 1;
1252 new_size = new->end - other->start + 1;
1254 /* we're not bigger than the max, unreserve the space and go */
1255 if (new_size <= root->fs_info->max_extent) {
1256 spin_lock(&BTRFS_I(inode)->accounting_lock);
1257 BTRFS_I(inode)->outstanding_extents--;
1258 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1263 * If we grew by another max_extent, just return, we want to keep that
1266 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1267 root->fs_info->max_extent);
1268 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1269 root->fs_info->max_extent) > num_extents)
1272 spin_lock(&BTRFS_I(inode)->accounting_lock);
1273 BTRFS_I(inode)->outstanding_extents--;
1274 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1280 * extent_io.c set_bit_hook, used to track delayed allocation
1281 * bytes in this file, and to maintain the list of inodes that
1282 * have pending delalloc work to be done.
1284 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1285 unsigned long old, unsigned long bits)
1289 * set_bit and clear bit hooks normally require _irqsave/restore
1290 * but in this case, we are only testeing for the DELALLOC
1291 * bit, which is only set or cleared with irqs on
1293 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1294 struct btrfs_root *root = BTRFS_I(inode)->root;
1296 spin_lock(&BTRFS_I(inode)->accounting_lock);
1297 BTRFS_I(inode)->outstanding_extents++;
1298 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1299 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1300 spin_lock(&root->fs_info->delalloc_lock);
1301 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1302 root->fs_info->delalloc_bytes += end - start + 1;
1303 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1304 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1305 &root->fs_info->delalloc_inodes);
1307 spin_unlock(&root->fs_info->delalloc_lock);
1313 * extent_io.c clear_bit_hook, see set_bit_hook for why
1315 static int btrfs_clear_bit_hook(struct inode *inode,
1316 struct extent_state *state, unsigned long bits)
1319 * set_bit and clear bit hooks normally require _irqsave/restore
1320 * but in this case, we are only testeing for the DELALLOC
1321 * bit, which is only set or cleared with irqs on
1323 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1324 struct btrfs_root *root = BTRFS_I(inode)->root;
1326 if (bits & EXTENT_DO_ACCOUNTING) {
1327 spin_lock(&BTRFS_I(inode)->accounting_lock);
1328 BTRFS_I(inode)->outstanding_extents--;
1329 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1330 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1333 spin_lock(&root->fs_info->delalloc_lock);
1334 if (state->end - state->start + 1 >
1335 root->fs_info->delalloc_bytes) {
1336 printk(KERN_INFO "btrfs warning: delalloc account "
1338 (unsigned long long)
1339 state->end - state->start + 1,
1340 (unsigned long long)
1341 root->fs_info->delalloc_bytes);
1342 btrfs_delalloc_free_space(root, inode, (u64)-1);
1343 root->fs_info->delalloc_bytes = 0;
1344 BTRFS_I(inode)->delalloc_bytes = 0;
1346 btrfs_delalloc_free_space(root, inode,
1349 root->fs_info->delalloc_bytes -= state->end -
1351 BTRFS_I(inode)->delalloc_bytes -= state->end -
1354 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1355 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1356 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1358 spin_unlock(&root->fs_info->delalloc_lock);
1364 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1365 * we don't create bios that span stripes or chunks
1367 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1368 size_t size, struct bio *bio,
1369 unsigned long bio_flags)
1371 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1372 struct btrfs_mapping_tree *map_tree;
1373 u64 logical = (u64)bio->bi_sector << 9;
1378 if (bio_flags & EXTENT_BIO_COMPRESSED)
1381 length = bio->bi_size;
1382 map_tree = &root->fs_info->mapping_tree;
1383 map_length = length;
1384 ret = btrfs_map_block(map_tree, READ, logical,
1385 &map_length, NULL, 0);
1387 if (map_length < length + size)
1393 * in order to insert checksums into the metadata in large chunks,
1394 * we wait until bio submission time. All the pages in the bio are
1395 * checksummed and sums are attached onto the ordered extent record.
1397 * At IO completion time the cums attached on the ordered extent record
1398 * are inserted into the btree
1400 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1401 struct bio *bio, int mirror_num,
1402 unsigned long bio_flags)
1404 struct btrfs_root *root = BTRFS_I(inode)->root;
1407 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1413 * in order to insert checksums into the metadata in large chunks,
1414 * we wait until bio submission time. All the pages in the bio are
1415 * checksummed and sums are attached onto the ordered extent record.
1417 * At IO completion time the cums attached on the ordered extent record
1418 * are inserted into the btree
1420 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1421 int mirror_num, unsigned long bio_flags)
1423 struct btrfs_root *root = BTRFS_I(inode)->root;
1424 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1428 * extent_io.c submission hook. This does the right thing for csum calculation
1429 * on write, or reading the csums from the tree before a read
1431 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1432 int mirror_num, unsigned long bio_flags)
1434 struct btrfs_root *root = BTRFS_I(inode)->root;
1438 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1440 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1443 if (!(rw & (1 << BIO_RW))) {
1444 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1445 return btrfs_submit_compressed_read(inode, bio,
1446 mirror_num, bio_flags);
1447 } else if (!skip_sum)
1448 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1450 } else if (!skip_sum) {
1451 /* csum items have already been cloned */
1452 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1454 /* we're doing a write, do the async checksumming */
1455 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1456 inode, rw, bio, mirror_num,
1457 bio_flags, __btrfs_submit_bio_start,
1458 __btrfs_submit_bio_done);
1462 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1466 * given a list of ordered sums record them in the inode. This happens
1467 * at IO completion time based on sums calculated at bio submission time.
1469 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1470 struct inode *inode, u64 file_offset,
1471 struct list_head *list)
1473 struct btrfs_ordered_sum *sum;
1475 btrfs_set_trans_block_group(trans, inode);
1477 list_for_each_entry(sum, list, list) {
1478 btrfs_csum_file_blocks(trans,
1479 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1484 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1486 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1488 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1492 /* see btrfs_writepage_start_hook for details on why this is required */
1493 struct btrfs_writepage_fixup {
1495 struct btrfs_work work;
1498 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1500 struct btrfs_writepage_fixup *fixup;
1501 struct btrfs_ordered_extent *ordered;
1503 struct inode *inode;
1507 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1511 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1512 ClearPageChecked(page);
1516 inode = page->mapping->host;
1517 page_start = page_offset(page);
1518 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1520 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1522 /* already ordered? We're done */
1523 if (PagePrivate2(page))
1526 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1528 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1529 page_end, GFP_NOFS);
1531 btrfs_start_ordered_extent(inode, ordered, 1);
1535 btrfs_set_extent_delalloc(inode, page_start, page_end);
1536 ClearPageChecked(page);
1538 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1541 page_cache_release(page);
1545 * There are a few paths in the higher layers of the kernel that directly
1546 * set the page dirty bit without asking the filesystem if it is a
1547 * good idea. This causes problems because we want to make sure COW
1548 * properly happens and the data=ordered rules are followed.
1550 * In our case any range that doesn't have the ORDERED bit set
1551 * hasn't been properly setup for IO. We kick off an async process
1552 * to fix it up. The async helper will wait for ordered extents, set
1553 * the delalloc bit and make it safe to write the page.
1555 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1557 struct inode *inode = page->mapping->host;
1558 struct btrfs_writepage_fixup *fixup;
1559 struct btrfs_root *root = BTRFS_I(inode)->root;
1561 /* this page is properly in the ordered list */
1562 if (TestClearPagePrivate2(page))
1565 if (PageChecked(page))
1568 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1572 SetPageChecked(page);
1573 page_cache_get(page);
1574 fixup->work.func = btrfs_writepage_fixup_worker;
1576 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1580 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1581 struct inode *inode, u64 file_pos,
1582 u64 disk_bytenr, u64 disk_num_bytes,
1583 u64 num_bytes, u64 ram_bytes,
1585 u8 compression, u8 encryption,
1586 u16 other_encoding, int extent_type)
1588 struct btrfs_root *root = BTRFS_I(inode)->root;
1589 struct btrfs_file_extent_item *fi;
1590 struct btrfs_path *path;
1591 struct extent_buffer *leaf;
1592 struct btrfs_key ins;
1596 path = btrfs_alloc_path();
1599 path->leave_spinning = 1;
1602 * we may be replacing one extent in the tree with another.
1603 * The new extent is pinned in the extent map, and we don't want
1604 * to drop it from the cache until it is completely in the btree.
1606 * So, tell btrfs_drop_extents to leave this extent in the cache.
1607 * the caller is expected to unpin it and allow it to be merged
1610 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1611 file_pos + num_bytes, locked_end,
1612 file_pos, &hint, 0);
1615 ins.objectid = inode->i_ino;
1616 ins.offset = file_pos;
1617 ins.type = BTRFS_EXTENT_DATA_KEY;
1618 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1620 leaf = path->nodes[0];
1621 fi = btrfs_item_ptr(leaf, path->slots[0],
1622 struct btrfs_file_extent_item);
1623 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1624 btrfs_set_file_extent_type(leaf, fi, extent_type);
1625 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1626 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1627 btrfs_set_file_extent_offset(leaf, fi, 0);
1628 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1629 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1630 btrfs_set_file_extent_compression(leaf, fi, compression);
1631 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1632 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1634 btrfs_unlock_up_safe(path, 1);
1635 btrfs_set_lock_blocking(leaf);
1637 btrfs_mark_buffer_dirty(leaf);
1639 inode_add_bytes(inode, num_bytes);
1641 ins.objectid = disk_bytenr;
1642 ins.offset = disk_num_bytes;
1643 ins.type = BTRFS_EXTENT_ITEM_KEY;
1644 ret = btrfs_alloc_reserved_file_extent(trans, root,
1645 root->root_key.objectid,
1646 inode->i_ino, file_pos, &ins);
1648 btrfs_free_path(path);
1654 * helper function for btrfs_finish_ordered_io, this
1655 * just reads in some of the csum leaves to prime them into ram
1656 * before we start the transaction. It limits the amount of btree
1657 * reads required while inside the transaction.
1659 static noinline void reada_csum(struct btrfs_root *root,
1660 struct btrfs_path *path,
1661 struct btrfs_ordered_extent *ordered_extent)
1663 struct btrfs_ordered_sum *sum;
1666 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1668 bytenr = sum->sums[0].bytenr;
1671 * we don't care about the results, the point of this search is
1672 * just to get the btree leaves into ram
1674 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1677 /* as ordered data IO finishes, this gets called so we can finish
1678 * an ordered extent if the range of bytes in the file it covers are
1681 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1683 struct btrfs_root *root = BTRFS_I(inode)->root;
1684 struct btrfs_trans_handle *trans;
1685 struct btrfs_ordered_extent *ordered_extent = NULL;
1686 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1687 struct btrfs_path *path;
1691 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1696 * before we join the transaction, try to do some of our IO.
1697 * This will limit the amount of IO that we have to do with
1698 * the transaction running. We're unlikely to need to do any
1699 * IO if the file extents are new, the disk_i_size checks
1700 * covers the most common case.
1702 if (start < BTRFS_I(inode)->disk_i_size) {
1703 path = btrfs_alloc_path();
1705 ret = btrfs_lookup_file_extent(NULL, root, path,
1708 ordered_extent = btrfs_lookup_ordered_extent(inode,
1710 if (!list_empty(&ordered_extent->list)) {
1711 btrfs_release_path(root, path);
1712 reada_csum(root, path, ordered_extent);
1714 btrfs_free_path(path);
1718 trans = btrfs_join_transaction(root, 1);
1720 if (!ordered_extent)
1721 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1722 BUG_ON(!ordered_extent);
1723 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1726 lock_extent(io_tree, ordered_extent->file_offset,
1727 ordered_extent->file_offset + ordered_extent->len - 1,
1730 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1732 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1734 ret = btrfs_mark_extent_written(trans, root, inode,
1735 ordered_extent->file_offset,
1736 ordered_extent->file_offset +
1737 ordered_extent->len);
1740 ret = insert_reserved_file_extent(trans, inode,
1741 ordered_extent->file_offset,
1742 ordered_extent->start,
1743 ordered_extent->disk_len,
1744 ordered_extent->len,
1745 ordered_extent->len,
1746 ordered_extent->file_offset +
1747 ordered_extent->len,
1749 BTRFS_FILE_EXTENT_REG);
1750 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1751 ordered_extent->file_offset,
1752 ordered_extent->len);
1755 unlock_extent(io_tree, ordered_extent->file_offset,
1756 ordered_extent->file_offset + ordered_extent->len - 1,
1759 add_pending_csums(trans, inode, ordered_extent->file_offset,
1760 &ordered_extent->list);
1762 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1763 btrfs_ordered_update_i_size(inode, ordered_extent);
1764 btrfs_update_inode(trans, root, inode);
1765 btrfs_remove_ordered_extent(inode, ordered_extent);
1766 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1769 btrfs_put_ordered_extent(ordered_extent);
1770 /* once for the tree */
1771 btrfs_put_ordered_extent(ordered_extent);
1773 btrfs_end_transaction(trans, root);
1777 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1778 struct extent_state *state, int uptodate)
1780 ClearPagePrivate2(page);
1781 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1785 * When IO fails, either with EIO or csum verification fails, we
1786 * try other mirrors that might have a good copy of the data. This
1787 * io_failure_record is used to record state as we go through all the
1788 * mirrors. If another mirror has good data, the page is set up to date
1789 * and things continue. If a good mirror can't be found, the original
1790 * bio end_io callback is called to indicate things have failed.
1792 struct io_failure_record {
1797 unsigned long bio_flags;
1801 static int btrfs_io_failed_hook(struct bio *failed_bio,
1802 struct page *page, u64 start, u64 end,
1803 struct extent_state *state)
1805 struct io_failure_record *failrec = NULL;
1807 struct extent_map *em;
1808 struct inode *inode = page->mapping->host;
1809 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1810 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1817 ret = get_state_private(failure_tree, start, &private);
1819 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1822 failrec->start = start;
1823 failrec->len = end - start + 1;
1824 failrec->last_mirror = 0;
1825 failrec->bio_flags = 0;
1827 read_lock(&em_tree->lock);
1828 em = lookup_extent_mapping(em_tree, start, failrec->len);
1829 if (em->start > start || em->start + em->len < start) {
1830 free_extent_map(em);
1833 read_unlock(&em_tree->lock);
1835 if (!em || IS_ERR(em)) {
1839 logical = start - em->start;
1840 logical = em->block_start + logical;
1841 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1842 logical = em->block_start;
1843 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1845 failrec->logical = logical;
1846 free_extent_map(em);
1847 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1848 EXTENT_DIRTY, GFP_NOFS);
1849 set_state_private(failure_tree, start,
1850 (u64)(unsigned long)failrec);
1852 failrec = (struct io_failure_record *)(unsigned long)private;
1854 num_copies = btrfs_num_copies(
1855 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1856 failrec->logical, failrec->len);
1857 failrec->last_mirror++;
1859 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1860 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1863 if (state && state->start != failrec->start)
1865 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1867 if (!state || failrec->last_mirror > num_copies) {
1868 set_state_private(failure_tree, failrec->start, 0);
1869 clear_extent_bits(failure_tree, failrec->start,
1870 failrec->start + failrec->len - 1,
1871 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1875 bio = bio_alloc(GFP_NOFS, 1);
1876 bio->bi_private = state;
1877 bio->bi_end_io = failed_bio->bi_end_io;
1878 bio->bi_sector = failrec->logical >> 9;
1879 bio->bi_bdev = failed_bio->bi_bdev;
1882 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1883 if (failed_bio->bi_rw & (1 << BIO_RW))
1888 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1889 failrec->last_mirror,
1890 failrec->bio_flags);
1895 * each time an IO finishes, we do a fast check in the IO failure tree
1896 * to see if we need to process or clean up an io_failure_record
1898 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1901 u64 private_failure;
1902 struct io_failure_record *failure;
1906 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1907 (u64)-1, 1, EXTENT_DIRTY)) {
1908 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1909 start, &private_failure);
1911 failure = (struct io_failure_record *)(unsigned long)
1913 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1915 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1917 failure->start + failure->len - 1,
1918 EXTENT_DIRTY | EXTENT_LOCKED,
1927 * when reads are done, we need to check csums to verify the data is correct
1928 * if there's a match, we allow the bio to finish. If not, we go through
1929 * the io_failure_record routines to find good copies
1931 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1932 struct extent_state *state)
1934 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1935 struct inode *inode = page->mapping->host;
1936 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1938 u64 private = ~(u32)0;
1940 struct btrfs_root *root = BTRFS_I(inode)->root;
1943 if (PageChecked(page)) {
1944 ClearPageChecked(page);
1948 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1951 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1952 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1953 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1958 if (state && state->start == start) {
1959 private = state->private;
1962 ret = get_state_private(io_tree, start, &private);
1964 kaddr = kmap_atomic(page, KM_USER0);
1968 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1969 btrfs_csum_final(csum, (char *)&csum);
1970 if (csum != private)
1973 kunmap_atomic(kaddr, KM_USER0);
1975 /* if the io failure tree for this inode is non-empty,
1976 * check to see if we've recovered from a failed IO
1978 btrfs_clean_io_failures(inode, start);
1982 if (printk_ratelimit()) {
1983 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1984 "private %llu\n", page->mapping->host->i_ino,
1985 (unsigned long long)start, csum,
1986 (unsigned long long)private);
1988 memset(kaddr + offset, 1, end - start + 1);
1989 flush_dcache_page(page);
1990 kunmap_atomic(kaddr, KM_USER0);
1997 * This creates an orphan entry for the given inode in case something goes
1998 * wrong in the middle of an unlink/truncate.
2000 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2002 struct btrfs_root *root = BTRFS_I(inode)->root;
2005 spin_lock(&root->list_lock);
2007 /* already on the orphan list, we're good */
2008 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2009 spin_unlock(&root->list_lock);
2013 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2015 spin_unlock(&root->list_lock);
2018 * insert an orphan item to track this unlinked/truncated file
2020 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2026 * We have done the truncate/delete so we can go ahead and remove the orphan
2027 * item for this particular inode.
2029 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2031 struct btrfs_root *root = BTRFS_I(inode)->root;
2034 spin_lock(&root->list_lock);
2036 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2037 spin_unlock(&root->list_lock);
2041 list_del_init(&BTRFS_I(inode)->i_orphan);
2043 spin_unlock(&root->list_lock);
2047 spin_unlock(&root->list_lock);
2049 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2055 * this cleans up any orphans that may be left on the list from the last use
2058 void btrfs_orphan_cleanup(struct btrfs_root *root)
2060 struct btrfs_path *path;
2061 struct extent_buffer *leaf;
2062 struct btrfs_item *item;
2063 struct btrfs_key key, found_key;
2064 struct btrfs_trans_handle *trans;
2065 struct inode *inode;
2066 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2068 path = btrfs_alloc_path();
2073 key.objectid = BTRFS_ORPHAN_OBJECTID;
2074 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2075 key.offset = (u64)-1;
2079 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2081 printk(KERN_ERR "Error searching slot for orphan: %d"
2087 * if ret == 0 means we found what we were searching for, which
2088 * is weird, but possible, so only screw with path if we didnt
2089 * find the key and see if we have stuff that matches
2092 if (path->slots[0] == 0)
2097 /* pull out the item */
2098 leaf = path->nodes[0];
2099 item = btrfs_item_nr(leaf, path->slots[0]);
2100 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2102 /* make sure the item matches what we want */
2103 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2105 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2108 /* release the path since we're done with it */
2109 btrfs_release_path(root, path);
2112 * this is where we are basically btrfs_lookup, without the
2113 * crossing root thing. we store the inode number in the
2114 * offset of the orphan item.
2116 found_key.objectid = found_key.offset;
2117 found_key.type = BTRFS_INODE_ITEM_KEY;
2118 found_key.offset = 0;
2119 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
2124 * add this inode to the orphan list so btrfs_orphan_del does
2125 * the proper thing when we hit it
2127 spin_lock(&root->list_lock);
2128 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2129 spin_unlock(&root->list_lock);
2132 * if this is a bad inode, means we actually succeeded in
2133 * removing the inode, but not the orphan record, which means
2134 * we need to manually delete the orphan since iput will just
2135 * do a destroy_inode
2137 if (is_bad_inode(inode)) {
2138 trans = btrfs_start_transaction(root, 1);
2139 btrfs_orphan_del(trans, inode);
2140 btrfs_end_transaction(trans, root);
2145 /* if we have links, this was a truncate, lets do that */
2146 if (inode->i_nlink) {
2148 btrfs_truncate(inode);
2153 /* this will do delete_inode and everything for us */
2158 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2160 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2162 btrfs_free_path(path);
2166 * very simple check to peek ahead in the leaf looking for xattrs. If we
2167 * don't find any xattrs, we know there can't be any acls.
2169 * slot is the slot the inode is in, objectid is the objectid of the inode
2171 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2172 int slot, u64 objectid)
2174 u32 nritems = btrfs_header_nritems(leaf);
2175 struct btrfs_key found_key;
2179 while (slot < nritems) {
2180 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2182 /* we found a different objectid, there must not be acls */
2183 if (found_key.objectid != objectid)
2186 /* we found an xattr, assume we've got an acl */
2187 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2191 * we found a key greater than an xattr key, there can't
2192 * be any acls later on
2194 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2201 * it goes inode, inode backrefs, xattrs, extents,
2202 * so if there are a ton of hard links to an inode there can
2203 * be a lot of backrefs. Don't waste time searching too hard,
2204 * this is just an optimization
2209 /* we hit the end of the leaf before we found an xattr or
2210 * something larger than an xattr. We have to assume the inode
2217 * read an inode from the btree into the in-memory inode
2219 static void btrfs_read_locked_inode(struct inode *inode)
2221 struct btrfs_path *path;
2222 struct extent_buffer *leaf;
2223 struct btrfs_inode_item *inode_item;
2224 struct btrfs_timespec *tspec;
2225 struct btrfs_root *root = BTRFS_I(inode)->root;
2226 struct btrfs_key location;
2228 u64 alloc_group_block;
2232 path = btrfs_alloc_path();
2234 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2236 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2240 leaf = path->nodes[0];
2241 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2242 struct btrfs_inode_item);
2244 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2245 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2246 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2247 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2248 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2250 tspec = btrfs_inode_atime(inode_item);
2251 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2252 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2254 tspec = btrfs_inode_mtime(inode_item);
2255 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2256 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2258 tspec = btrfs_inode_ctime(inode_item);
2259 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2260 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2262 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2263 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2264 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2265 inode->i_generation = BTRFS_I(inode)->generation;
2267 rdev = btrfs_inode_rdev(leaf, inode_item);
2269 BTRFS_I(inode)->index_cnt = (u64)-1;
2270 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2272 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2275 * try to precache a NULL acl entry for files that don't have
2276 * any xattrs or acls
2278 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2280 cache_no_acl(inode);
2282 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2283 alloc_group_block, 0);
2284 btrfs_free_path(path);
2287 switch (inode->i_mode & S_IFMT) {
2289 inode->i_mapping->a_ops = &btrfs_aops;
2290 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2291 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2292 inode->i_fop = &btrfs_file_operations;
2293 inode->i_op = &btrfs_file_inode_operations;
2296 inode->i_fop = &btrfs_dir_file_operations;
2297 if (root == root->fs_info->tree_root)
2298 inode->i_op = &btrfs_dir_ro_inode_operations;
2300 inode->i_op = &btrfs_dir_inode_operations;
2303 inode->i_op = &btrfs_symlink_inode_operations;
2304 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2305 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2308 inode->i_op = &btrfs_special_inode_operations;
2309 init_special_inode(inode, inode->i_mode, rdev);
2313 btrfs_update_iflags(inode);
2317 btrfs_free_path(path);
2318 make_bad_inode(inode);
2322 * given a leaf and an inode, copy the inode fields into the leaf
2324 static void fill_inode_item(struct btrfs_trans_handle *trans,
2325 struct extent_buffer *leaf,
2326 struct btrfs_inode_item *item,
2327 struct inode *inode)
2329 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2330 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2331 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2332 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2333 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2335 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2336 inode->i_atime.tv_sec);
2337 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2338 inode->i_atime.tv_nsec);
2340 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2341 inode->i_mtime.tv_sec);
2342 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2343 inode->i_mtime.tv_nsec);
2345 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2346 inode->i_ctime.tv_sec);
2347 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2348 inode->i_ctime.tv_nsec);
2350 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2351 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2352 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2353 btrfs_set_inode_transid(leaf, item, trans->transid);
2354 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2355 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2356 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2360 * copy everything in the in-memory inode into the btree.
2362 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2363 struct btrfs_root *root, struct inode *inode)
2365 struct btrfs_inode_item *inode_item;
2366 struct btrfs_path *path;
2367 struct extent_buffer *leaf;
2370 path = btrfs_alloc_path();
2372 path->leave_spinning = 1;
2373 ret = btrfs_lookup_inode(trans, root, path,
2374 &BTRFS_I(inode)->location, 1);
2381 btrfs_unlock_up_safe(path, 1);
2382 leaf = path->nodes[0];
2383 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2384 struct btrfs_inode_item);
2386 fill_inode_item(trans, leaf, inode_item, inode);
2387 btrfs_mark_buffer_dirty(leaf);
2388 btrfs_set_inode_last_trans(trans, inode);
2391 btrfs_free_path(path);
2397 * unlink helper that gets used here in inode.c and in the tree logging
2398 * recovery code. It remove a link in a directory with a given name, and
2399 * also drops the back refs in the inode to the directory
2401 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2402 struct btrfs_root *root,
2403 struct inode *dir, struct inode *inode,
2404 const char *name, int name_len)
2406 struct btrfs_path *path;
2408 struct extent_buffer *leaf;
2409 struct btrfs_dir_item *di;
2410 struct btrfs_key key;
2413 path = btrfs_alloc_path();
2419 path->leave_spinning = 1;
2420 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2421 name, name_len, -1);
2430 leaf = path->nodes[0];
2431 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2432 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2435 btrfs_release_path(root, path);
2437 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2439 dir->i_ino, &index);
2441 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2442 "inode %lu parent %lu\n", name_len, name,
2443 inode->i_ino, dir->i_ino);
2447 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2448 index, name, name_len, -1);
2457 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2458 btrfs_release_path(root, path);
2460 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2462 BUG_ON(ret != 0 && ret != -ENOENT);
2464 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2468 btrfs_free_path(path);
2472 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2473 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2474 btrfs_update_inode(trans, root, dir);
2475 btrfs_drop_nlink(inode);
2476 ret = btrfs_update_inode(trans, root, inode);
2481 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2483 struct btrfs_root *root;
2484 struct btrfs_trans_handle *trans;
2485 struct inode *inode = dentry->d_inode;
2487 unsigned long nr = 0;
2489 root = BTRFS_I(dir)->root;
2492 * 5 items for unlink inode
2495 ret = btrfs_reserve_metadata_space(root, 6);
2499 trans = btrfs_start_transaction(root, 1);
2500 if (IS_ERR(trans)) {
2501 btrfs_unreserve_metadata_space(root, 6);
2502 return PTR_ERR(trans);
2505 btrfs_set_trans_block_group(trans, dir);
2507 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2509 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2510 dentry->d_name.name, dentry->d_name.len);
2512 if (inode->i_nlink == 0)
2513 ret = btrfs_orphan_add(trans, inode);
2515 nr = trans->blocks_used;
2517 btrfs_end_transaction_throttle(trans, root);
2518 btrfs_unreserve_metadata_space(root, 6);
2519 btrfs_btree_balance_dirty(root, nr);
2523 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2524 struct btrfs_root *root,
2525 struct inode *dir, u64 objectid,
2526 const char *name, int name_len)
2528 struct btrfs_path *path;
2529 struct extent_buffer *leaf;
2530 struct btrfs_dir_item *di;
2531 struct btrfs_key key;
2535 path = btrfs_alloc_path();
2539 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2540 name, name_len, -1);
2541 BUG_ON(!di || IS_ERR(di));
2543 leaf = path->nodes[0];
2544 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2545 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2546 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2548 btrfs_release_path(root, path);
2550 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2551 objectid, root->root_key.objectid,
2552 dir->i_ino, &index, name, name_len);
2554 BUG_ON(ret != -ENOENT);
2555 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2557 BUG_ON(!di || IS_ERR(di));
2559 leaf = path->nodes[0];
2560 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2561 btrfs_release_path(root, path);
2565 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2566 index, name, name_len, -1);
2567 BUG_ON(!di || IS_ERR(di));
2569 leaf = path->nodes[0];
2570 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2571 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2572 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2574 btrfs_release_path(root, path);
2576 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2577 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2578 ret = btrfs_update_inode(trans, root, dir);
2580 dir->i_sb->s_dirt = 1;
2582 btrfs_free_path(path);
2586 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2588 struct inode *inode = dentry->d_inode;
2591 struct btrfs_root *root = BTRFS_I(dir)->root;
2592 struct btrfs_trans_handle *trans;
2593 unsigned long nr = 0;
2595 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2596 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2599 ret = btrfs_reserve_metadata_space(root, 5);
2603 trans = btrfs_start_transaction(root, 1);
2604 if (IS_ERR(trans)) {
2605 btrfs_unreserve_metadata_space(root, 5);
2606 return PTR_ERR(trans);
2609 btrfs_set_trans_block_group(trans, dir);
2611 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2612 err = btrfs_unlink_subvol(trans, root, dir,
2613 BTRFS_I(inode)->location.objectid,
2614 dentry->d_name.name,
2615 dentry->d_name.len);
2619 err = btrfs_orphan_add(trans, inode);
2623 /* now the directory is empty */
2624 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2625 dentry->d_name.name, dentry->d_name.len);
2627 btrfs_i_size_write(inode, 0);
2629 nr = trans->blocks_used;
2630 ret = btrfs_end_transaction_throttle(trans, root);
2631 btrfs_unreserve_metadata_space(root, 5);
2632 btrfs_btree_balance_dirty(root, nr);
2641 * when truncating bytes in a file, it is possible to avoid reading
2642 * the leaves that contain only checksum items. This can be the
2643 * majority of the IO required to delete a large file, but it must
2644 * be done carefully.
2646 * The keys in the level just above the leaves are checked to make sure
2647 * the lowest key in a given leaf is a csum key, and starts at an offset
2648 * after the new size.
2650 * Then the key for the next leaf is checked to make sure it also has
2651 * a checksum item for the same file. If it does, we know our target leaf
2652 * contains only checksum items, and it can be safely freed without reading
2655 * This is just an optimization targeted at large files. It may do
2656 * nothing. It will return 0 unless things went badly.
2658 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2659 struct btrfs_root *root,
2660 struct btrfs_path *path,
2661 struct inode *inode, u64 new_size)
2663 struct btrfs_key key;
2666 struct btrfs_key found_key;
2667 struct btrfs_key other_key;
2668 struct btrfs_leaf_ref *ref;
2672 path->lowest_level = 1;
2673 key.objectid = inode->i_ino;
2674 key.type = BTRFS_CSUM_ITEM_KEY;
2675 key.offset = new_size;
2677 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2681 if (path->nodes[1] == NULL) {
2686 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2687 nritems = btrfs_header_nritems(path->nodes[1]);
2692 if (path->slots[1] >= nritems)
2695 /* did we find a key greater than anything we want to delete? */
2696 if (found_key.objectid > inode->i_ino ||
2697 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2700 /* we check the next key in the node to make sure the leave contains
2701 * only checksum items. This comparison doesn't work if our
2702 * leaf is the last one in the node
2704 if (path->slots[1] + 1 >= nritems) {
2706 /* search forward from the last key in the node, this
2707 * will bring us into the next node in the tree
2709 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2711 /* unlikely, but we inc below, so check to be safe */
2712 if (found_key.offset == (u64)-1)
2715 /* search_forward needs a path with locks held, do the
2716 * search again for the original key. It is possible
2717 * this will race with a balance and return a path that
2718 * we could modify, but this drop is just an optimization
2719 * and is allowed to miss some leaves.
2721 btrfs_release_path(root, path);
2724 /* setup a max key for search_forward */
2725 other_key.offset = (u64)-1;
2726 other_key.type = key.type;
2727 other_key.objectid = key.objectid;
2729 path->keep_locks = 1;
2730 ret = btrfs_search_forward(root, &found_key, &other_key,
2732 path->keep_locks = 0;
2733 if (ret || found_key.objectid != key.objectid ||
2734 found_key.type != key.type) {
2739 key.offset = found_key.offset;
2740 btrfs_release_path(root, path);
2745 /* we know there's one more slot after us in the tree,
2746 * read that key so we can verify it is also a checksum item
2748 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2750 if (found_key.objectid < inode->i_ino)
2753 if (found_key.type != key.type || found_key.offset < new_size)
2757 * if the key for the next leaf isn't a csum key from this objectid,
2758 * we can't be sure there aren't good items inside this leaf.
2761 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2764 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2765 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2767 * it is safe to delete this leaf, it contains only
2768 * csum items from this inode at an offset >= new_size
2770 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2773 if (root->ref_cows && leaf_gen < trans->transid) {
2774 ref = btrfs_alloc_leaf_ref(root, 0);
2776 ref->root_gen = root->root_key.offset;
2777 ref->bytenr = leaf_start;
2779 ref->generation = leaf_gen;
2782 btrfs_sort_leaf_ref(ref);
2784 ret = btrfs_add_leaf_ref(root, ref, 0);
2786 btrfs_free_leaf_ref(root, ref);
2792 btrfs_release_path(root, path);
2794 if (other_key.objectid == inode->i_ino &&
2795 other_key.type == key.type && other_key.offset > key.offset) {
2796 key.offset = other_key.offset;
2802 /* fixup any changes we've made to the path */
2803 path->lowest_level = 0;
2804 path->keep_locks = 0;
2805 btrfs_release_path(root, path);
2812 * this can truncate away extent items, csum items and directory items.
2813 * It starts at a high offset and removes keys until it can't find
2814 * any higher than new_size
2816 * csum items that cross the new i_size are truncated to the new size
2819 * min_type is the minimum key type to truncate down to. If set to 0, this
2820 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2822 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2823 struct btrfs_root *root,
2824 struct inode *inode,
2825 u64 new_size, u32 min_type)
2828 struct btrfs_path *path;
2829 struct btrfs_key key;
2830 struct btrfs_key found_key;
2831 u32 found_type = (u8)-1;
2832 struct extent_buffer *leaf;
2833 struct btrfs_file_extent_item *fi;
2834 u64 extent_start = 0;
2835 u64 extent_num_bytes = 0;
2836 u64 extent_offset = 0;
2840 int pending_del_nr = 0;
2841 int pending_del_slot = 0;
2842 int extent_type = -1;
2844 u64 mask = root->sectorsize - 1;
2847 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2848 path = btrfs_alloc_path();
2852 /* FIXME, add redo link to tree so we don't leak on crash */
2853 key.objectid = inode->i_ino;
2854 key.offset = (u64)-1;
2858 path->leave_spinning = 1;
2859 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2864 /* there are no items in the tree for us to truncate, we're
2867 if (path->slots[0] == 0) {
2876 leaf = path->nodes[0];
2877 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2878 found_type = btrfs_key_type(&found_key);
2881 if (found_key.objectid != inode->i_ino)
2884 if (found_type < min_type)
2887 item_end = found_key.offset;
2888 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2889 fi = btrfs_item_ptr(leaf, path->slots[0],
2890 struct btrfs_file_extent_item);
2891 extent_type = btrfs_file_extent_type(leaf, fi);
2892 encoding = btrfs_file_extent_compression(leaf, fi);
2893 encoding |= btrfs_file_extent_encryption(leaf, fi);
2894 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2896 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2898 btrfs_file_extent_num_bytes(leaf, fi);
2899 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2900 item_end += btrfs_file_extent_inline_len(leaf,
2905 if (item_end < new_size) {
2906 if (found_type == BTRFS_DIR_ITEM_KEY)
2907 found_type = BTRFS_INODE_ITEM_KEY;
2908 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2909 found_type = BTRFS_EXTENT_DATA_KEY;
2910 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2911 found_type = BTRFS_XATTR_ITEM_KEY;
2912 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2913 found_type = BTRFS_INODE_REF_KEY;
2914 else if (found_type)
2918 btrfs_set_key_type(&key, found_type);
2921 if (found_key.offset >= new_size)
2927 /* FIXME, shrink the extent if the ref count is only 1 */
2928 if (found_type != BTRFS_EXTENT_DATA_KEY)
2931 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2933 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2934 if (!del_item && !encoding) {
2935 u64 orig_num_bytes =
2936 btrfs_file_extent_num_bytes(leaf, fi);
2937 extent_num_bytes = new_size -
2938 found_key.offset + root->sectorsize - 1;
2939 extent_num_bytes = extent_num_bytes &
2940 ~((u64)root->sectorsize - 1);
2941 btrfs_set_file_extent_num_bytes(leaf, fi,
2943 num_dec = (orig_num_bytes -
2945 if (root->ref_cows && extent_start != 0)
2946 inode_sub_bytes(inode, num_dec);
2947 btrfs_mark_buffer_dirty(leaf);
2950 btrfs_file_extent_disk_num_bytes(leaf,
2952 extent_offset = found_key.offset -
2953 btrfs_file_extent_offset(leaf, fi);
2955 /* FIXME blocksize != 4096 */
2956 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2957 if (extent_start != 0) {
2960 inode_sub_bytes(inode, num_dec);
2963 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2965 * we can't truncate inline items that have had
2969 btrfs_file_extent_compression(leaf, fi) == 0 &&
2970 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2971 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2972 u32 size = new_size - found_key.offset;
2974 if (root->ref_cows) {
2975 inode_sub_bytes(inode, item_end + 1 -
2979 btrfs_file_extent_calc_inline_size(size);
2980 ret = btrfs_truncate_item(trans, root, path,
2983 } else if (root->ref_cows) {
2984 inode_sub_bytes(inode, item_end + 1 -
2990 if (!pending_del_nr) {
2991 /* no pending yet, add ourselves */
2992 pending_del_slot = path->slots[0];
2994 } else if (pending_del_nr &&
2995 path->slots[0] + 1 == pending_del_slot) {
2996 /* hop on the pending chunk */
2998 pending_del_slot = path->slots[0];
3005 if (found_extent && root->ref_cows) {
3006 btrfs_set_path_blocking(path);
3007 ret = btrfs_free_extent(trans, root, extent_start,
3008 extent_num_bytes, 0,
3009 btrfs_header_owner(leaf),
3010 inode->i_ino, extent_offset);
3014 if (path->slots[0] == 0) {
3017 btrfs_release_path(root, path);
3018 if (found_type == BTRFS_INODE_ITEM_KEY)
3024 if (pending_del_nr &&
3025 path->slots[0] + 1 != pending_del_slot) {
3026 struct btrfs_key debug;
3028 btrfs_item_key_to_cpu(path->nodes[0], &debug,
3030 ret = btrfs_del_items(trans, root, path,
3035 btrfs_release_path(root, path);
3036 if (found_type == BTRFS_INODE_ITEM_KEY)
3043 if (pending_del_nr) {
3044 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3047 btrfs_free_path(path);
3052 * taken from block_truncate_page, but does cow as it zeros out
3053 * any bytes left in the last page in the file.
3055 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3057 struct inode *inode = mapping->host;
3058 struct btrfs_root *root = BTRFS_I(inode)->root;
3059 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3060 struct btrfs_ordered_extent *ordered;
3062 u32 blocksize = root->sectorsize;
3063 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3064 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3070 if ((offset & (blocksize - 1)) == 0)
3072 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3076 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3082 page = grab_cache_page(mapping, index);
3084 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3085 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3089 page_start = page_offset(page);
3090 page_end = page_start + PAGE_CACHE_SIZE - 1;
3092 if (!PageUptodate(page)) {
3093 ret = btrfs_readpage(NULL, page);
3095 if (page->mapping != mapping) {
3097 page_cache_release(page);
3100 if (!PageUptodate(page)) {
3105 wait_on_page_writeback(page);
3107 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3108 set_page_extent_mapped(page);
3110 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3112 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3114 page_cache_release(page);
3115 btrfs_start_ordered_extent(inode, ordered, 1);
3116 btrfs_put_ordered_extent(ordered);
3120 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3121 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3124 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3126 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3131 if (offset != PAGE_CACHE_SIZE) {
3133 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3134 flush_dcache_page(page);
3137 ClearPageChecked(page);
3138 set_page_dirty(page);
3139 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3143 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3144 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3146 page_cache_release(page);
3151 int btrfs_cont_expand(struct inode *inode, loff_t size)
3153 struct btrfs_trans_handle *trans;
3154 struct btrfs_root *root = BTRFS_I(inode)->root;
3155 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3156 struct extent_map *em;
3157 u64 mask = root->sectorsize - 1;
3158 u64 hole_start = (inode->i_size + mask) & ~mask;
3159 u64 block_end = (size + mask) & ~mask;
3165 if (size <= hole_start)
3168 err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
3173 struct btrfs_ordered_extent *ordered;
3174 btrfs_wait_ordered_range(inode, hole_start,
3175 block_end - hole_start);
3176 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3177 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3180 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3181 btrfs_put_ordered_extent(ordered);
3184 trans = btrfs_start_transaction(root, 1);
3185 btrfs_set_trans_block_group(trans, inode);
3187 cur_offset = hole_start;
3189 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3190 block_end - cur_offset, 0);
3191 BUG_ON(IS_ERR(em) || !em);
3192 last_byte = min(extent_map_end(em), block_end);
3193 last_byte = (last_byte + mask) & ~mask;
3194 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
3196 hole_size = last_byte - cur_offset;
3197 err = btrfs_drop_extents(trans, root, inode,
3199 cur_offset + hole_size,
3201 cur_offset, &hint_byte, 1);
3205 err = btrfs_reserve_metadata_space(root, 1);
3209 err = btrfs_insert_file_extent(trans, root,
3210 inode->i_ino, cur_offset, 0,
3211 0, hole_size, 0, hole_size,
3213 btrfs_drop_extent_cache(inode, hole_start,
3215 btrfs_unreserve_metadata_space(root, 1);
3217 free_extent_map(em);
3218 cur_offset = last_byte;
3219 if (err || cur_offset >= block_end)
3223 btrfs_end_transaction(trans, root);
3224 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3228 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3230 struct inode *inode = dentry->d_inode;
3233 err = inode_change_ok(inode, attr);
3237 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3238 if (attr->ia_size > inode->i_size) {
3239 err = btrfs_cont_expand(inode, attr->ia_size);
3242 } else if (inode->i_size > 0 &&
3243 attr->ia_size == 0) {
3245 /* we're truncating a file that used to have good
3246 * data down to zero. Make sure it gets into
3247 * the ordered flush list so that any new writes
3248 * get down to disk quickly.
3250 BTRFS_I(inode)->ordered_data_close = 1;
3254 err = inode_setattr(inode, attr);
3256 if (!err && ((attr->ia_valid & ATTR_MODE)))
3257 err = btrfs_acl_chmod(inode);
3261 void btrfs_delete_inode(struct inode *inode)
3263 struct btrfs_trans_handle *trans;
3264 struct btrfs_root *root = BTRFS_I(inode)->root;
3268 truncate_inode_pages(&inode->i_data, 0);
3269 if (is_bad_inode(inode)) {
3270 btrfs_orphan_del(NULL, inode);
3273 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3275 if (inode->i_nlink > 0) {
3276 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3280 btrfs_i_size_write(inode, 0);
3281 trans = btrfs_join_transaction(root, 1);
3283 btrfs_set_trans_block_group(trans, inode);
3284 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3286 btrfs_orphan_del(NULL, inode);
3287 goto no_delete_lock;
3290 btrfs_orphan_del(trans, inode);
3292 nr = trans->blocks_used;
3295 btrfs_end_transaction(trans, root);
3296 btrfs_btree_balance_dirty(root, nr);
3300 nr = trans->blocks_used;
3301 btrfs_end_transaction(trans, root);
3302 btrfs_btree_balance_dirty(root, nr);
3308 * this returns the key found in the dir entry in the location pointer.
3309 * If no dir entries were found, location->objectid is 0.
3311 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3312 struct btrfs_key *location)
3314 const char *name = dentry->d_name.name;
3315 int namelen = dentry->d_name.len;
3316 struct btrfs_dir_item *di;
3317 struct btrfs_path *path;
3318 struct btrfs_root *root = BTRFS_I(dir)->root;
3321 path = btrfs_alloc_path();
3324 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3329 if (!di || IS_ERR(di))
3332 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3334 btrfs_free_path(path);
3337 location->objectid = 0;
3342 * when we hit a tree root in a directory, the btrfs part of the inode
3343 * needs to be changed to reflect the root directory of the tree root. This
3344 * is kind of like crossing a mount point.
3346 static int fixup_tree_root_location(struct btrfs_root *root,
3348 struct dentry *dentry,
3349 struct btrfs_key *location,
3350 struct btrfs_root **sub_root)
3352 struct btrfs_path *path;
3353 struct btrfs_root *new_root;
3354 struct btrfs_root_ref *ref;
3355 struct extent_buffer *leaf;
3359 path = btrfs_alloc_path();
3366 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3367 BTRFS_I(dir)->root->root_key.objectid,
3368 location->objectid);
3375 leaf = path->nodes[0];
3376 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3377 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3378 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3381 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3382 (unsigned long)(ref + 1),
3383 dentry->d_name.len);
3387 btrfs_release_path(root->fs_info->tree_root, path);
3389 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3390 if (IS_ERR(new_root)) {
3391 err = PTR_ERR(new_root);
3395 if (btrfs_root_refs(&new_root->root_item) == 0) {
3400 *sub_root = new_root;
3401 location->objectid = btrfs_root_dirid(&new_root->root_item);
3402 location->type = BTRFS_INODE_ITEM_KEY;
3403 location->offset = 0;
3406 btrfs_free_path(path);
3410 static void inode_tree_add(struct inode *inode)
3412 struct btrfs_root *root = BTRFS_I(inode)->root;
3413 struct btrfs_inode *entry;
3415 struct rb_node *parent;
3417 p = &root->inode_tree.rb_node;
3420 if (hlist_unhashed(&inode->i_hash))
3423 spin_lock(&root->inode_lock);
3426 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3428 if (inode->i_ino < entry->vfs_inode.i_ino)
3429 p = &parent->rb_left;
3430 else if (inode->i_ino > entry->vfs_inode.i_ino)
3431 p = &parent->rb_right;
3433 WARN_ON(!(entry->vfs_inode.i_state &
3434 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3435 rb_erase(parent, &root->inode_tree);
3436 RB_CLEAR_NODE(parent);
3437 spin_unlock(&root->inode_lock);
3441 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3442 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3443 spin_unlock(&root->inode_lock);
3446 static void inode_tree_del(struct inode *inode)
3448 struct btrfs_root *root = BTRFS_I(inode)->root;
3451 spin_lock(&root->inode_lock);
3452 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3453 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3454 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3455 empty = RB_EMPTY_ROOT(&root->inode_tree);
3457 spin_unlock(&root->inode_lock);
3459 if (empty && btrfs_root_refs(&root->root_item) == 0) {
3460 synchronize_srcu(&root->fs_info->subvol_srcu);
3461 spin_lock(&root->inode_lock);
3462 empty = RB_EMPTY_ROOT(&root->inode_tree);
3463 spin_unlock(&root->inode_lock);
3465 btrfs_add_dead_root(root);
3469 int btrfs_invalidate_inodes(struct btrfs_root *root)
3471 struct rb_node *node;
3472 struct rb_node *prev;
3473 struct btrfs_inode *entry;
3474 struct inode *inode;
3477 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3479 spin_lock(&root->inode_lock);
3481 node = root->inode_tree.rb_node;
3485 entry = rb_entry(node, struct btrfs_inode, rb_node);
3487 if (objectid < entry->vfs_inode.i_ino)
3488 node = node->rb_left;
3489 else if (objectid > entry->vfs_inode.i_ino)
3490 node = node->rb_right;
3496 entry = rb_entry(prev, struct btrfs_inode, rb_node);
3497 if (objectid <= entry->vfs_inode.i_ino) {
3501 prev = rb_next(prev);
3505 entry = rb_entry(node, struct btrfs_inode, rb_node);
3506 objectid = entry->vfs_inode.i_ino + 1;
3507 inode = igrab(&entry->vfs_inode);
3509 spin_unlock(&root->inode_lock);
3510 if (atomic_read(&inode->i_count) > 1)
3511 d_prune_aliases(inode);
3513 * btrfs_drop_inode will remove it from
3514 * the inode cache when its usage count
3519 spin_lock(&root->inode_lock);
3523 if (cond_resched_lock(&root->inode_lock))
3526 node = rb_next(node);
3528 spin_unlock(&root->inode_lock);
3532 static noinline void init_btrfs_i(struct inode *inode)
3534 struct btrfs_inode *bi = BTRFS_I(inode);
3539 bi->last_sub_trans = 0;
3540 bi->logged_trans = 0;
3541 bi->delalloc_bytes = 0;
3542 bi->reserved_bytes = 0;
3543 bi->disk_i_size = 0;
3545 bi->index_cnt = (u64)-1;
3546 bi->last_unlink_trans = 0;
3547 bi->ordered_data_close = 0;
3548 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3549 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3550 inode->i_mapping, GFP_NOFS);
3551 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3552 inode->i_mapping, GFP_NOFS);
3553 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3554 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3555 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3556 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3557 mutex_init(&BTRFS_I(inode)->extent_mutex);
3558 mutex_init(&BTRFS_I(inode)->log_mutex);
3561 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3563 struct btrfs_iget_args *args = p;
3564 inode->i_ino = args->ino;
3565 init_btrfs_i(inode);
3566 BTRFS_I(inode)->root = args->root;
3567 btrfs_set_inode_space_info(args->root, inode);
3571 static int btrfs_find_actor(struct inode *inode, void *opaque)
3573 struct btrfs_iget_args *args = opaque;
3574 return args->ino == inode->i_ino &&
3575 args->root == BTRFS_I(inode)->root;
3578 static struct inode *btrfs_iget_locked(struct super_block *s,
3580 struct btrfs_root *root)
3582 struct inode *inode;
3583 struct btrfs_iget_args args;
3584 args.ino = objectid;
3587 inode = iget5_locked(s, objectid, btrfs_find_actor,
3588 btrfs_init_locked_inode,
3593 /* Get an inode object given its location and corresponding root.
3594 * Returns in *is_new if the inode was read from disk
3596 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3597 struct btrfs_root *root)
3599 struct inode *inode;
3601 inode = btrfs_iget_locked(s, location->objectid, root);
3603 return ERR_PTR(-ENOMEM);
3605 if (inode->i_state & I_NEW) {
3606 BTRFS_I(inode)->root = root;
3607 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3608 btrfs_read_locked_inode(inode);
3610 inode_tree_add(inode);
3611 unlock_new_inode(inode);
3617 static struct inode *new_simple_dir(struct super_block *s,
3618 struct btrfs_key *key,
3619 struct btrfs_root *root)
3621 struct inode *inode = new_inode(s);
3624 return ERR_PTR(-ENOMEM);
3626 init_btrfs_i(inode);
3628 BTRFS_I(inode)->root = root;
3629 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3630 BTRFS_I(inode)->dummy_inode = 1;
3632 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3633 inode->i_op = &simple_dir_inode_operations;
3634 inode->i_fop = &simple_dir_operations;
3635 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3636 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3641 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3643 struct inode *inode;
3644 struct btrfs_root *root = BTRFS_I(dir)->root;
3645 struct btrfs_root *sub_root = root;
3646 struct btrfs_key location;
3650 dentry->d_op = &btrfs_dentry_operations;
3652 if (dentry->d_name.len > BTRFS_NAME_LEN)
3653 return ERR_PTR(-ENAMETOOLONG);
3655 ret = btrfs_inode_by_name(dir, dentry, &location);
3658 return ERR_PTR(ret);
3660 if (location.objectid == 0)
3663 if (location.type == BTRFS_INODE_ITEM_KEY) {
3664 inode = btrfs_iget(dir->i_sb, &location, root);
3668 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3670 index = srcu_read_lock(&root->fs_info->subvol_srcu);
3671 ret = fixup_tree_root_location(root, dir, dentry,
3672 &location, &sub_root);
3675 inode = ERR_PTR(ret);
3677 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3679 inode = btrfs_iget(dir->i_sb, &location, sub_root);
3681 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3686 static int btrfs_dentry_delete(struct dentry *dentry)
3688 struct btrfs_root *root;
3690 if (!dentry->d_inode && !IS_ROOT(dentry))
3691 dentry = dentry->d_parent;
3693 if (dentry->d_inode) {
3694 root = BTRFS_I(dentry->d_inode)->root;
3695 if (btrfs_root_refs(&root->root_item) == 0)
3701 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3702 struct nameidata *nd)
3704 struct inode *inode;
3706 inode = btrfs_lookup_dentry(dir, dentry);
3708 return ERR_CAST(inode);
3710 return d_splice_alias(inode, dentry);
3713 static unsigned char btrfs_filetype_table[] = {
3714 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3717 static int btrfs_real_readdir(struct file *filp, void *dirent,
3720 struct inode *inode = filp->f_dentry->d_inode;
3721 struct btrfs_root *root = BTRFS_I(inode)->root;
3722 struct btrfs_item *item;
3723 struct btrfs_dir_item *di;
3724 struct btrfs_key key;
3725 struct btrfs_key found_key;
3726 struct btrfs_path *path;
3729 struct extent_buffer *leaf;
3732 unsigned char d_type;
3737 int key_type = BTRFS_DIR_INDEX_KEY;
3742 /* FIXME, use a real flag for deciding about the key type */
3743 if (root->fs_info->tree_root == root)
3744 key_type = BTRFS_DIR_ITEM_KEY;
3746 /* special case for "." */
3747 if (filp->f_pos == 0) {
3748 over = filldir(dirent, ".", 1,
3755 /* special case for .., just use the back ref */
3756 if (filp->f_pos == 1) {
3757 u64 pino = parent_ino(filp->f_path.dentry);
3758 over = filldir(dirent, "..", 2,
3764 path = btrfs_alloc_path();
3767 btrfs_set_key_type(&key, key_type);
3768 key.offset = filp->f_pos;
3769 key.objectid = inode->i_ino;
3771 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3777 leaf = path->nodes[0];
3778 nritems = btrfs_header_nritems(leaf);
3779 slot = path->slots[0];
3780 if (advance || slot >= nritems) {
3781 if (slot >= nritems - 1) {
3782 ret = btrfs_next_leaf(root, path);
3785 leaf = path->nodes[0];
3786 nritems = btrfs_header_nritems(leaf);
3787 slot = path->slots[0];
3795 item = btrfs_item_nr(leaf, slot);
3796 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3798 if (found_key.objectid != key.objectid)
3800 if (btrfs_key_type(&found_key) != key_type)
3802 if (found_key.offset < filp->f_pos)
3805 filp->f_pos = found_key.offset;
3807 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3809 di_total = btrfs_item_size(leaf, item);
3811 while (di_cur < di_total) {
3812 struct btrfs_key location;
3814 name_len = btrfs_dir_name_len(leaf, di);
3815 if (name_len <= sizeof(tmp_name)) {
3816 name_ptr = tmp_name;
3818 name_ptr = kmalloc(name_len, GFP_NOFS);
3824 read_extent_buffer(leaf, name_ptr,
3825 (unsigned long)(di + 1), name_len);
3827 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3828 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3830 /* is this a reference to our own snapshot? If so
3833 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3834 location.objectid == root->root_key.objectid) {
3838 over = filldir(dirent, name_ptr, name_len,
3839 found_key.offset, location.objectid,
3843 if (name_ptr != tmp_name)
3848 di_len = btrfs_dir_name_len(leaf, di) +
3849 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3851 di = (struct btrfs_dir_item *)((char *)di + di_len);
3855 /* Reached end of directory/root. Bump pos past the last item. */
3856 if (key_type == BTRFS_DIR_INDEX_KEY)
3857 filp->f_pos = INT_LIMIT(off_t);
3863 btrfs_free_path(path);
3867 int btrfs_write_inode(struct inode *inode, int wait)
3869 struct btrfs_root *root = BTRFS_I(inode)->root;
3870 struct btrfs_trans_handle *trans;
3873 if (root->fs_info->btree_inode == inode)
3877 trans = btrfs_join_transaction(root, 1);
3878 btrfs_set_trans_block_group(trans, inode);
3879 ret = btrfs_commit_transaction(trans, root);
3885 * This is somewhat expensive, updating the tree every time the
3886 * inode changes. But, it is most likely to find the inode in cache.
3887 * FIXME, needs more benchmarking...there are no reasons other than performance
3888 * to keep or drop this code.
3890 void btrfs_dirty_inode(struct inode *inode)
3892 struct btrfs_root *root = BTRFS_I(inode)->root;
3893 struct btrfs_trans_handle *trans;
3895 trans = btrfs_join_transaction(root, 1);
3896 btrfs_set_trans_block_group(trans, inode);
3897 btrfs_update_inode(trans, root, inode);
3898 btrfs_end_transaction(trans, root);
3902 * find the highest existing sequence number in a directory
3903 * and then set the in-memory index_cnt variable to reflect
3904 * free sequence numbers
3906 static int btrfs_set_inode_index_count(struct inode *inode)
3908 struct btrfs_root *root = BTRFS_I(inode)->root;
3909 struct btrfs_key key, found_key;
3910 struct btrfs_path *path;
3911 struct extent_buffer *leaf;
3914 key.objectid = inode->i_ino;
3915 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3916 key.offset = (u64)-1;
3918 path = btrfs_alloc_path();
3922 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3925 /* FIXME: we should be able to handle this */
3931 * MAGIC NUMBER EXPLANATION:
3932 * since we search a directory based on f_pos we have to start at 2
3933 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3934 * else has to start at 2
3936 if (path->slots[0] == 0) {
3937 BTRFS_I(inode)->index_cnt = 2;
3943 leaf = path->nodes[0];
3944 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3946 if (found_key.objectid != inode->i_ino ||
3947 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3948 BTRFS_I(inode)->index_cnt = 2;
3952 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3954 btrfs_free_path(path);
3959 * helper to find a free sequence number in a given directory. This current
3960 * code is very simple, later versions will do smarter things in the btree
3962 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3966 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3967 ret = btrfs_set_inode_index_count(dir);
3972 *index = BTRFS_I(dir)->index_cnt;
3973 BTRFS_I(dir)->index_cnt++;
3978 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3979 struct btrfs_root *root,
3981 const char *name, int name_len,
3982 u64 ref_objectid, u64 objectid,
3983 u64 alloc_hint, int mode, u64 *index)
3985 struct inode *inode;
3986 struct btrfs_inode_item *inode_item;
3987 struct btrfs_key *location;
3988 struct btrfs_path *path;
3989 struct btrfs_inode_ref *ref;
3990 struct btrfs_key key[2];
3996 path = btrfs_alloc_path();
3999 inode = new_inode(root->fs_info->sb);
4001 return ERR_PTR(-ENOMEM);
4004 ret = btrfs_set_inode_index(dir, index);
4007 return ERR_PTR(ret);
4011 * index_cnt is ignored for everything but a dir,
4012 * btrfs_get_inode_index_count has an explanation for the magic
4015 init_btrfs_i(inode);
4016 BTRFS_I(inode)->index_cnt = 2;
4017 BTRFS_I(inode)->root = root;
4018 BTRFS_I(inode)->generation = trans->transid;
4019 btrfs_set_inode_space_info(root, inode);
4025 BTRFS_I(inode)->block_group =
4026 btrfs_find_block_group(root, 0, alloc_hint, owner);
4028 key[0].objectid = objectid;
4029 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4032 key[1].objectid = objectid;
4033 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4034 key[1].offset = ref_objectid;
4036 sizes[0] = sizeof(struct btrfs_inode_item);
4037 sizes[1] = name_len + sizeof(*ref);
4039 path->leave_spinning = 1;
4040 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4044 inode->i_uid = current_fsuid();
4046 if (dir && (dir->i_mode & S_ISGID)) {
4047 inode->i_gid = dir->i_gid;
4051 inode->i_gid = current_fsgid();
4053 inode->i_mode = mode;
4054 inode->i_ino = objectid;
4055 inode_set_bytes(inode, 0);
4056 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4057 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4058 struct btrfs_inode_item);
4059 fill_inode_item(trans, path->nodes[0], inode_item, inode);
4061 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4062 struct btrfs_inode_ref);
4063 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4064 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4065 ptr = (unsigned long)(ref + 1);
4066 write_extent_buffer(path->nodes[0], name, ptr, name_len);
4068 btrfs_mark_buffer_dirty(path->nodes[0]);
4069 btrfs_free_path(path);
4071 location = &BTRFS_I(inode)->location;
4072 location->objectid = objectid;
4073 location->offset = 0;
4074 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4076 btrfs_inherit_iflags(inode, dir);
4078 if ((mode & S_IFREG)) {
4079 if (btrfs_test_opt(root, NODATASUM))
4080 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4081 if (btrfs_test_opt(root, NODATACOW))
4082 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4085 insert_inode_hash(inode);
4086 inode_tree_add(inode);
4090 BTRFS_I(dir)->index_cnt--;
4091 btrfs_free_path(path);
4093 return ERR_PTR(ret);
4096 static inline u8 btrfs_inode_type(struct inode *inode)
4098 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4102 * utility function to add 'inode' into 'parent_inode' with
4103 * a give name and a given sequence number.
4104 * if 'add_backref' is true, also insert a backref from the
4105 * inode to the parent directory.
4107 int btrfs_add_link(struct btrfs_trans_handle *trans,
4108 struct inode *parent_inode, struct inode *inode,
4109 const char *name, int name_len, int add_backref, u64 index)
4112 struct btrfs_key key;
4113 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4115 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4116 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4118 key.objectid = inode->i_ino;
4119 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4123 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4124 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4125 key.objectid, root->root_key.objectid,
4126 parent_inode->i_ino,
4127 index, name, name_len);
4128 } else if (add_backref) {
4129 ret = btrfs_insert_inode_ref(trans, root,
4130 name, name_len, inode->i_ino,
4131 parent_inode->i_ino, index);
4135 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4136 parent_inode->i_ino, &key,
4137 btrfs_inode_type(inode), index);
4140 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4142 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4143 ret = btrfs_update_inode(trans, root, parent_inode);
4148 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4149 struct dentry *dentry, struct inode *inode,
4150 int backref, u64 index)
4152 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4153 inode, dentry->d_name.name,
4154 dentry->d_name.len, backref, index);
4156 d_instantiate(dentry, inode);
4164 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4165 int mode, dev_t rdev)
4167 struct btrfs_trans_handle *trans;
4168 struct btrfs_root *root = BTRFS_I(dir)->root;
4169 struct inode *inode = NULL;
4173 unsigned long nr = 0;
4176 if (!new_valid_dev(rdev))
4180 * 2 for inode item and ref
4182 * 1 for xattr if selinux is on
4184 err = btrfs_reserve_metadata_space(root, 5);
4188 trans = btrfs_start_transaction(root, 1);
4191 btrfs_set_trans_block_group(trans, dir);
4193 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4199 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4201 dentry->d_parent->d_inode->i_ino, objectid,
4202 BTRFS_I(dir)->block_group, mode, &index);
4203 err = PTR_ERR(inode);
4207 err = btrfs_init_inode_security(inode, dir);
4213 btrfs_set_trans_block_group(trans, inode);
4214 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4218 inode->i_op = &btrfs_special_inode_operations;
4219 init_special_inode(inode, inode->i_mode, rdev);
4220 btrfs_update_inode(trans, root, inode);
4222 btrfs_update_inode_block_group(trans, inode);
4223 btrfs_update_inode_block_group(trans, dir);
4225 nr = trans->blocks_used;
4226 btrfs_end_transaction_throttle(trans, root);
4228 btrfs_unreserve_metadata_space(root, 5);
4230 inode_dec_link_count(inode);
4233 btrfs_btree_balance_dirty(root, nr);
4237 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4238 int mode, struct nameidata *nd)
4240 struct btrfs_trans_handle *trans;
4241 struct btrfs_root *root = BTRFS_I(dir)->root;
4242 struct inode *inode = NULL;
4245 unsigned long nr = 0;
4250 * 2 for inode item and ref
4252 * 1 for xattr if selinux is on
4254 err = btrfs_reserve_metadata_space(root, 5);
4258 trans = btrfs_start_transaction(root, 1);
4261 btrfs_set_trans_block_group(trans, dir);
4263 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4269 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4271 dentry->d_parent->d_inode->i_ino,
4272 objectid, BTRFS_I(dir)->block_group, mode,
4274 err = PTR_ERR(inode);
4278 err = btrfs_init_inode_security(inode, dir);
4284 btrfs_set_trans_block_group(trans, inode);
4285 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4289 inode->i_mapping->a_ops = &btrfs_aops;
4290 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4291 inode->i_fop = &btrfs_file_operations;
4292 inode->i_op = &btrfs_file_inode_operations;
4293 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4295 btrfs_update_inode_block_group(trans, inode);
4296 btrfs_update_inode_block_group(trans, dir);
4298 nr = trans->blocks_used;
4299 btrfs_end_transaction_throttle(trans, root);
4301 btrfs_unreserve_metadata_space(root, 5);
4303 inode_dec_link_count(inode);
4306 btrfs_btree_balance_dirty(root, nr);
4310 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4311 struct dentry *dentry)
4313 struct btrfs_trans_handle *trans;
4314 struct btrfs_root *root = BTRFS_I(dir)->root;
4315 struct inode *inode = old_dentry->d_inode;
4317 unsigned long nr = 0;
4321 if (inode->i_nlink == 0)
4325 * 1 item for inode ref
4326 * 2 items for dir items
4328 err = btrfs_reserve_metadata_space(root, 3);
4332 btrfs_inc_nlink(inode);
4334 err = btrfs_set_inode_index(dir, &index);
4338 trans = btrfs_start_transaction(root, 1);
4340 btrfs_set_trans_block_group(trans, dir);
4341 atomic_inc(&inode->i_count);
4343 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4348 btrfs_update_inode_block_group(trans, dir);
4349 err = btrfs_update_inode(trans, root, inode);
4351 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4354 nr = trans->blocks_used;
4355 btrfs_end_transaction_throttle(trans, root);
4357 btrfs_unreserve_metadata_space(root, 3);
4359 inode_dec_link_count(inode);
4362 btrfs_btree_balance_dirty(root, nr);
4366 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4368 struct inode *inode = NULL;
4369 struct btrfs_trans_handle *trans;
4370 struct btrfs_root *root = BTRFS_I(dir)->root;
4372 int drop_on_err = 0;
4375 unsigned long nr = 1;
4378 * 2 items for inode and ref
4379 * 2 items for dir items
4380 * 1 for xattr if selinux is on
4382 err = btrfs_reserve_metadata_space(root, 5);
4386 trans = btrfs_start_transaction(root, 1);
4391 btrfs_set_trans_block_group(trans, dir);
4393 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4399 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4401 dentry->d_parent->d_inode->i_ino, objectid,
4402 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4404 if (IS_ERR(inode)) {
4405 err = PTR_ERR(inode);
4411 err = btrfs_init_inode_security(inode, dir);
4415 inode->i_op = &btrfs_dir_inode_operations;
4416 inode->i_fop = &btrfs_dir_file_operations;
4417 btrfs_set_trans_block_group(trans, inode);
4419 btrfs_i_size_write(inode, 0);
4420 err = btrfs_update_inode(trans, root, inode);
4424 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4425 inode, dentry->d_name.name,
4426 dentry->d_name.len, 0, index);
4430 d_instantiate(dentry, inode);
4432 btrfs_update_inode_block_group(trans, inode);
4433 btrfs_update_inode_block_group(trans, dir);
4436 nr = trans->blocks_used;
4437 btrfs_end_transaction_throttle(trans, root);
4440 btrfs_unreserve_metadata_space(root, 5);
4443 btrfs_btree_balance_dirty(root, nr);
4447 /* helper for btfs_get_extent. Given an existing extent in the tree,
4448 * and an extent that you want to insert, deal with overlap and insert
4449 * the new extent into the tree.
4451 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4452 struct extent_map *existing,
4453 struct extent_map *em,
4454 u64 map_start, u64 map_len)
4458 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4459 start_diff = map_start - em->start;
4460 em->start = map_start;
4462 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4463 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4464 em->block_start += start_diff;
4465 em->block_len -= start_diff;
4467 return add_extent_mapping(em_tree, em);
4470 static noinline int uncompress_inline(struct btrfs_path *path,
4471 struct inode *inode, struct page *page,
4472 size_t pg_offset, u64 extent_offset,
4473 struct btrfs_file_extent_item *item)
4476 struct extent_buffer *leaf = path->nodes[0];
4479 unsigned long inline_size;
4482 WARN_ON(pg_offset != 0);
4483 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4484 inline_size = btrfs_file_extent_inline_item_len(leaf,
4485 btrfs_item_nr(leaf, path->slots[0]));
4486 tmp = kmalloc(inline_size, GFP_NOFS);
4487 ptr = btrfs_file_extent_inline_start(item);
4489 read_extent_buffer(leaf, tmp, ptr, inline_size);
4491 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4492 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4493 inline_size, max_size);
4495 char *kaddr = kmap_atomic(page, KM_USER0);
4496 unsigned long copy_size = min_t(u64,
4497 PAGE_CACHE_SIZE - pg_offset,
4498 max_size - extent_offset);
4499 memset(kaddr + pg_offset, 0, copy_size);
4500 kunmap_atomic(kaddr, KM_USER0);
4507 * a bit scary, this does extent mapping from logical file offset to the disk.
4508 * the ugly parts come from merging extents from the disk with the in-ram
4509 * representation. This gets more complex because of the data=ordered code,
4510 * where the in-ram extents might be locked pending data=ordered completion.
4512 * This also copies inline extents directly into the page.
4515 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4516 size_t pg_offset, u64 start, u64 len,
4522 u64 extent_start = 0;
4524 u64 objectid = inode->i_ino;
4526 struct btrfs_path *path = NULL;
4527 struct btrfs_root *root = BTRFS_I(inode)->root;
4528 struct btrfs_file_extent_item *item;
4529 struct extent_buffer *leaf;
4530 struct btrfs_key found_key;
4531 struct extent_map *em = NULL;
4532 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4533 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4534 struct btrfs_trans_handle *trans = NULL;
4538 read_lock(&em_tree->lock);
4539 em = lookup_extent_mapping(em_tree, start, len);
4541 em->bdev = root->fs_info->fs_devices->latest_bdev;
4542 read_unlock(&em_tree->lock);
4545 if (em->start > start || em->start + em->len <= start)
4546 free_extent_map(em);
4547 else if (em->block_start == EXTENT_MAP_INLINE && page)
4548 free_extent_map(em);
4552 em = alloc_extent_map(GFP_NOFS);
4557 em->bdev = root->fs_info->fs_devices->latest_bdev;
4558 em->start = EXTENT_MAP_HOLE;
4559 em->orig_start = EXTENT_MAP_HOLE;
4561 em->block_len = (u64)-1;
4564 path = btrfs_alloc_path();
4568 ret = btrfs_lookup_file_extent(trans, root, path,
4569 objectid, start, trans != NULL);
4576 if (path->slots[0] == 0)
4581 leaf = path->nodes[0];
4582 item = btrfs_item_ptr(leaf, path->slots[0],
4583 struct btrfs_file_extent_item);
4584 /* are we inside the extent that was found? */
4585 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4586 found_type = btrfs_key_type(&found_key);
4587 if (found_key.objectid != objectid ||
4588 found_type != BTRFS_EXTENT_DATA_KEY) {
4592 found_type = btrfs_file_extent_type(leaf, item);
4593 extent_start = found_key.offset;
4594 compressed = btrfs_file_extent_compression(leaf, item);
4595 if (found_type == BTRFS_FILE_EXTENT_REG ||
4596 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4597 extent_end = extent_start +
4598 btrfs_file_extent_num_bytes(leaf, item);
4599 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4601 size = btrfs_file_extent_inline_len(leaf, item);
4602 extent_end = (extent_start + size + root->sectorsize - 1) &
4603 ~((u64)root->sectorsize - 1);
4606 if (start >= extent_end) {
4608 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4609 ret = btrfs_next_leaf(root, path);
4616 leaf = path->nodes[0];
4618 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4619 if (found_key.objectid != objectid ||
4620 found_key.type != BTRFS_EXTENT_DATA_KEY)
4622 if (start + len <= found_key.offset)
4625 em->len = found_key.offset - start;
4629 if (found_type == BTRFS_FILE_EXTENT_REG ||
4630 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4631 em->start = extent_start;
4632 em->len = extent_end - extent_start;
4633 em->orig_start = extent_start -
4634 btrfs_file_extent_offset(leaf, item);
4635 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4637 em->block_start = EXTENT_MAP_HOLE;
4641 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4642 em->block_start = bytenr;
4643 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4646 bytenr += btrfs_file_extent_offset(leaf, item);
4647 em->block_start = bytenr;
4648 em->block_len = em->len;
4649 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4650 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4653 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4657 size_t extent_offset;
4660 em->block_start = EXTENT_MAP_INLINE;
4661 if (!page || create) {
4662 em->start = extent_start;
4663 em->len = extent_end - extent_start;
4667 size = btrfs_file_extent_inline_len(leaf, item);
4668 extent_offset = page_offset(page) + pg_offset - extent_start;
4669 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4670 size - extent_offset);
4671 em->start = extent_start + extent_offset;
4672 em->len = (copy_size + root->sectorsize - 1) &
4673 ~((u64)root->sectorsize - 1);
4674 em->orig_start = EXTENT_MAP_INLINE;
4676 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4677 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4678 if (create == 0 && !PageUptodate(page)) {
4679 if (btrfs_file_extent_compression(leaf, item) ==
4680 BTRFS_COMPRESS_ZLIB) {
4681 ret = uncompress_inline(path, inode, page,
4683 extent_offset, item);
4687 read_extent_buffer(leaf, map + pg_offset, ptr,
4689 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4690 memset(map + pg_offset + copy_size, 0,
4691 PAGE_CACHE_SIZE - pg_offset -
4696 flush_dcache_page(page);
4697 } else if (create && PageUptodate(page)) {
4700 free_extent_map(em);
4702 btrfs_release_path(root, path);
4703 trans = btrfs_join_transaction(root, 1);
4707 write_extent_buffer(leaf, map + pg_offset, ptr,
4710 btrfs_mark_buffer_dirty(leaf);
4712 set_extent_uptodate(io_tree, em->start,
4713 extent_map_end(em) - 1, GFP_NOFS);
4716 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4723 em->block_start = EXTENT_MAP_HOLE;
4724 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4726 btrfs_release_path(root, path);
4727 if (em->start > start || extent_map_end(em) <= start) {
4728 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4729 "[%llu %llu]\n", (unsigned long long)em->start,
4730 (unsigned long long)em->len,
4731 (unsigned long long)start,
4732 (unsigned long long)len);
4738 write_lock(&em_tree->lock);
4739 ret = add_extent_mapping(em_tree, em);
4740 /* it is possible that someone inserted the extent into the tree
4741 * while we had the lock dropped. It is also possible that
4742 * an overlapping map exists in the tree
4744 if (ret == -EEXIST) {
4745 struct extent_map *existing;
4749 existing = lookup_extent_mapping(em_tree, start, len);
4750 if (existing && (existing->start > start ||
4751 existing->start + existing->len <= start)) {
4752 free_extent_map(existing);
4756 existing = lookup_extent_mapping(em_tree, em->start,
4759 err = merge_extent_mapping(em_tree, existing,
4762 free_extent_map(existing);
4764 free_extent_map(em);
4769 free_extent_map(em);
4773 free_extent_map(em);
4778 write_unlock(&em_tree->lock);
4781 btrfs_free_path(path);
4783 ret = btrfs_end_transaction(trans, root);
4788 free_extent_map(em);
4789 return ERR_PTR(err);
4794 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4795 const struct iovec *iov, loff_t offset,
4796 unsigned long nr_segs)
4801 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4802 __u64 start, __u64 len)
4804 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4807 int btrfs_readpage(struct file *file, struct page *page)
4809 struct extent_io_tree *tree;
4810 tree = &BTRFS_I(page->mapping->host)->io_tree;
4811 return extent_read_full_page(tree, page, btrfs_get_extent);
4814 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4816 struct extent_io_tree *tree;
4819 if (current->flags & PF_MEMALLOC) {
4820 redirty_page_for_writepage(wbc, page);
4824 tree = &BTRFS_I(page->mapping->host)->io_tree;
4825 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4828 int btrfs_writepages(struct address_space *mapping,
4829 struct writeback_control *wbc)
4831 struct extent_io_tree *tree;
4833 tree = &BTRFS_I(mapping->host)->io_tree;
4834 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4838 btrfs_readpages(struct file *file, struct address_space *mapping,
4839 struct list_head *pages, unsigned nr_pages)
4841 struct extent_io_tree *tree;
4842 tree = &BTRFS_I(mapping->host)->io_tree;
4843 return extent_readpages(tree, mapping, pages, nr_pages,
4846 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4848 struct extent_io_tree *tree;
4849 struct extent_map_tree *map;
4852 tree = &BTRFS_I(page->mapping->host)->io_tree;
4853 map = &BTRFS_I(page->mapping->host)->extent_tree;
4854 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4856 ClearPagePrivate(page);
4857 set_page_private(page, 0);
4858 page_cache_release(page);
4863 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4865 if (PageWriteback(page) || PageDirty(page))
4867 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4870 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4872 struct extent_io_tree *tree;
4873 struct btrfs_ordered_extent *ordered;
4874 u64 page_start = page_offset(page);
4875 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4879 * we have the page locked, so new writeback can't start,
4880 * and the dirty bit won't be cleared while we are here.
4882 * Wait for IO on this page so that we can safely clear
4883 * the PagePrivate2 bit and do ordered accounting
4885 wait_on_page_writeback(page);
4887 tree = &BTRFS_I(page->mapping->host)->io_tree;
4889 btrfs_releasepage(page, GFP_NOFS);
4892 lock_extent(tree, page_start, page_end, GFP_NOFS);
4893 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4897 * IO on this page will never be started, so we need
4898 * to account for any ordered extents now
4900 clear_extent_bit(tree, page_start, page_end,
4901 EXTENT_DIRTY | EXTENT_DELALLOC |
4902 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
4905 * whoever cleared the private bit is responsible
4906 * for the finish_ordered_io
4908 if (TestClearPagePrivate2(page)) {
4909 btrfs_finish_ordered_io(page->mapping->host,
4910 page_start, page_end);
4912 btrfs_put_ordered_extent(ordered);
4913 lock_extent(tree, page_start, page_end, GFP_NOFS);
4915 clear_extent_bit(tree, page_start, page_end,
4916 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4917 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
4918 __btrfs_releasepage(page, GFP_NOFS);
4920 ClearPageChecked(page);
4921 if (PagePrivate(page)) {
4922 ClearPagePrivate(page);
4923 set_page_private(page, 0);
4924 page_cache_release(page);
4929 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4930 * called from a page fault handler when a page is first dirtied. Hence we must
4931 * be careful to check for EOF conditions here. We set the page up correctly
4932 * for a written page which means we get ENOSPC checking when writing into
4933 * holes and correct delalloc and unwritten extent mapping on filesystems that
4934 * support these features.
4936 * We are not allowed to take the i_mutex here so we have to play games to
4937 * protect against truncate races as the page could now be beyond EOF. Because
4938 * vmtruncate() writes the inode size before removing pages, once we have the
4939 * page lock we can determine safely if the page is beyond EOF. If it is not
4940 * beyond EOF, then the page is guaranteed safe against truncation until we
4943 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4945 struct page *page = vmf->page;
4946 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4947 struct btrfs_root *root = BTRFS_I(inode)->root;
4948 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4949 struct btrfs_ordered_extent *ordered;
4951 unsigned long zero_start;
4957 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4961 else /* -ENOSPC, -EIO, etc */
4962 ret = VM_FAULT_SIGBUS;
4966 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
4968 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4969 ret = VM_FAULT_SIGBUS;
4973 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4976 size = i_size_read(inode);
4977 page_start = page_offset(page);
4978 page_end = page_start + PAGE_CACHE_SIZE - 1;
4980 if ((page->mapping != inode->i_mapping) ||
4981 (page_start >= size)) {
4982 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4983 /* page got truncated out from underneath us */
4986 wait_on_page_writeback(page);
4988 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4989 set_page_extent_mapped(page);
4992 * we can't set the delalloc bits if there are pending ordered
4993 * extents. Drop our locks and wait for them to finish
4995 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4997 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4999 btrfs_start_ordered_extent(inode, ordered, 1);
5000 btrfs_put_ordered_extent(ordered);
5005 * XXX - page_mkwrite gets called every time the page is dirtied, even
5006 * if it was already dirty, so for space accounting reasons we need to
5007 * clear any delalloc bits for the range we are fixing to save. There
5008 * is probably a better way to do this, but for now keep consistent with
5009 * prepare_pages in the normal write path.
5011 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
5012 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5015 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
5017 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5018 ret = VM_FAULT_SIGBUS;
5019 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5024 /* page is wholly or partially inside EOF */
5025 if (page_start + PAGE_CACHE_SIZE > size)
5026 zero_start = size & ~PAGE_CACHE_MASK;
5028 zero_start = PAGE_CACHE_SIZE;
5030 if (zero_start != PAGE_CACHE_SIZE) {
5032 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5033 flush_dcache_page(page);
5036 ClearPageChecked(page);
5037 set_page_dirty(page);
5038 SetPageUptodate(page);
5040 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5041 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5043 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5046 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5048 return VM_FAULT_LOCKED;
5054 static void btrfs_truncate(struct inode *inode)
5056 struct btrfs_root *root = BTRFS_I(inode)->root;
5058 struct btrfs_trans_handle *trans;
5060 u64 mask = root->sectorsize - 1;
5062 if (!S_ISREG(inode->i_mode))
5064 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
5067 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5070 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5072 trans = btrfs_start_transaction(root, 1);
5075 * setattr is responsible for setting the ordered_data_close flag,
5076 * but that is only tested during the last file release. That
5077 * could happen well after the next commit, leaving a great big
5078 * window where new writes may get lost if someone chooses to write
5079 * to this file after truncating to zero
5081 * The inode doesn't have any dirty data here, and so if we commit
5082 * this is a noop. If someone immediately starts writing to the inode
5083 * it is very likely we'll catch some of their writes in this
5084 * transaction, and the commit will find this file on the ordered
5085 * data list with good things to send down.
5087 * This is a best effort solution, there is still a window where
5088 * using truncate to replace the contents of the file will
5089 * end up with a zero length file after a crash.
5091 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5092 btrfs_add_ordered_operation(trans, root, inode);
5094 btrfs_set_trans_block_group(trans, inode);
5095 btrfs_i_size_write(inode, inode->i_size);
5097 ret = btrfs_orphan_add(trans, inode);
5100 /* FIXME, add redo link to tree so we don't leak on crash */
5101 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
5102 BTRFS_EXTENT_DATA_KEY);
5103 btrfs_update_inode(trans, root, inode);
5105 ret = btrfs_orphan_del(trans, inode);
5109 nr = trans->blocks_used;
5110 ret = btrfs_end_transaction_throttle(trans, root);
5112 btrfs_btree_balance_dirty(root, nr);
5116 * create a new subvolume directory/inode (helper for the ioctl).
5118 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5119 struct btrfs_root *new_root,
5120 u64 new_dirid, u64 alloc_hint)
5122 struct inode *inode;
5126 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5127 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5129 return PTR_ERR(inode);
5130 inode->i_op = &btrfs_dir_inode_operations;
5131 inode->i_fop = &btrfs_dir_file_operations;
5134 btrfs_i_size_write(inode, 0);
5136 err = btrfs_update_inode(trans, new_root, inode);
5143 /* helper function for file defrag and space balancing. This
5144 * forces readahead on a given range of bytes in an inode
5146 unsigned long btrfs_force_ra(struct address_space *mapping,
5147 struct file_ra_state *ra, struct file *file,
5148 pgoff_t offset, pgoff_t last_index)
5150 pgoff_t req_size = last_index - offset + 1;
5152 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5153 return offset + req_size;
5156 struct inode *btrfs_alloc_inode(struct super_block *sb)
5158 struct btrfs_inode *ei;
5160 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5164 ei->last_sub_trans = 0;
5165 ei->logged_trans = 0;
5166 ei->outstanding_extents = 0;
5167 ei->reserved_extents = 0;
5168 spin_lock_init(&ei->accounting_lock);
5169 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5170 INIT_LIST_HEAD(&ei->i_orphan);
5171 INIT_LIST_HEAD(&ei->ordered_operations);
5172 return &ei->vfs_inode;
5175 void btrfs_destroy_inode(struct inode *inode)
5177 struct btrfs_ordered_extent *ordered;
5178 struct btrfs_root *root = BTRFS_I(inode)->root;
5180 WARN_ON(!list_empty(&inode->i_dentry));
5181 WARN_ON(inode->i_data.nrpages);
5184 * Make sure we're properly removed from the ordered operation
5188 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5189 spin_lock(&root->fs_info->ordered_extent_lock);
5190 list_del_init(&BTRFS_I(inode)->ordered_operations);
5191 spin_unlock(&root->fs_info->ordered_extent_lock);
5194 spin_lock(&root->list_lock);
5195 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5196 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
5197 " list\n", inode->i_ino);
5200 spin_unlock(&root->list_lock);
5203 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5207 printk(KERN_ERR "btrfs found ordered "
5208 "extent %llu %llu on inode cleanup\n",
5209 (unsigned long long)ordered->file_offset,
5210 (unsigned long long)ordered->len);
5211 btrfs_remove_ordered_extent(inode, ordered);
5212 btrfs_put_ordered_extent(ordered);
5213 btrfs_put_ordered_extent(ordered);
5216 inode_tree_del(inode);
5217 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5218 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5221 void btrfs_drop_inode(struct inode *inode)
5223 struct btrfs_root *root = BTRFS_I(inode)->root;
5225 if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5226 generic_delete_inode(inode);
5228 generic_drop_inode(inode);
5231 static void init_once(void *foo)
5233 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5235 inode_init_once(&ei->vfs_inode);
5238 void btrfs_destroy_cachep(void)
5240 if (btrfs_inode_cachep)
5241 kmem_cache_destroy(btrfs_inode_cachep);
5242 if (btrfs_trans_handle_cachep)
5243 kmem_cache_destroy(btrfs_trans_handle_cachep);
5244 if (btrfs_transaction_cachep)
5245 kmem_cache_destroy(btrfs_transaction_cachep);
5246 if (btrfs_path_cachep)
5247 kmem_cache_destroy(btrfs_path_cachep);
5250 int btrfs_init_cachep(void)
5252 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5253 sizeof(struct btrfs_inode), 0,
5254 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5255 if (!btrfs_inode_cachep)
5258 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5259 sizeof(struct btrfs_trans_handle), 0,
5260 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5261 if (!btrfs_trans_handle_cachep)
5264 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5265 sizeof(struct btrfs_transaction), 0,
5266 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5267 if (!btrfs_transaction_cachep)
5270 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5271 sizeof(struct btrfs_path), 0,
5272 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5273 if (!btrfs_path_cachep)
5278 btrfs_destroy_cachep();
5282 static int btrfs_getattr(struct vfsmount *mnt,
5283 struct dentry *dentry, struct kstat *stat)
5285 struct inode *inode = dentry->d_inode;
5286 generic_fillattr(inode, stat);
5287 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5288 stat->blksize = PAGE_CACHE_SIZE;
5289 stat->blocks = (inode_get_bytes(inode) +
5290 BTRFS_I(inode)->delalloc_bytes) >> 9;
5294 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5295 struct inode *new_dir, struct dentry *new_dentry)
5297 struct btrfs_trans_handle *trans;
5298 struct btrfs_root *root = BTRFS_I(old_dir)->root;
5299 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5300 struct inode *new_inode = new_dentry->d_inode;
5301 struct inode *old_inode = old_dentry->d_inode;
5302 struct timespec ctime = CURRENT_TIME;
5307 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5310 /* we only allow rename subvolume link between subvolumes */
5311 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5314 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5315 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5318 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5319 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5323 * We want to reserve the absolute worst case amount of items. So if
5324 * both inodes are subvols and we need to unlink them then that would
5325 * require 4 item modifications, but if they are both normal inodes it
5326 * would require 5 item modifications, so we'll assume their normal
5327 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5328 * should cover the worst case number of items we'll modify.
5330 ret = btrfs_reserve_metadata_space(root, 11);
5335 * we're using rename to replace one file with another.
5336 * and the replacement file is large. Start IO on it now so
5337 * we don't add too much work to the end of the transaction
5339 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5340 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5341 filemap_flush(old_inode->i_mapping);
5343 /* close the racy window with snapshot create/destroy ioctl */
5344 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5345 down_read(&root->fs_info->subvol_sem);
5347 trans = btrfs_start_transaction(root, 1);
5348 btrfs_set_trans_block_group(trans, new_dir);
5351 btrfs_record_root_in_trans(trans, dest);
5353 ret = btrfs_set_inode_index(new_dir, &index);
5357 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5358 /* force full log commit if subvolume involved. */
5359 root->fs_info->last_trans_log_full_commit = trans->transid;
5361 ret = btrfs_insert_inode_ref(trans, dest,
5362 new_dentry->d_name.name,
5363 new_dentry->d_name.len,
5365 new_dir->i_ino, index);
5369 * this is an ugly little race, but the rename is required
5370 * to make sure that if we crash, the inode is either at the
5371 * old name or the new one. pinning the log transaction lets
5372 * us make sure we don't allow a log commit to come in after
5373 * we unlink the name but before we add the new name back in.
5375 btrfs_pin_log_trans(root);
5378 * make sure the inode gets flushed if it is replacing
5381 if (new_inode && new_inode->i_size &&
5382 old_inode && S_ISREG(old_inode->i_mode)) {
5383 btrfs_add_ordered_operation(trans, root, old_inode);
5386 old_dir->i_ctime = old_dir->i_mtime = ctime;
5387 new_dir->i_ctime = new_dir->i_mtime = ctime;
5388 old_inode->i_ctime = ctime;
5390 if (old_dentry->d_parent != new_dentry->d_parent)
5391 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5393 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5394 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5395 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5396 old_dentry->d_name.name,
5397 old_dentry->d_name.len);
5399 btrfs_inc_nlink(old_dentry->d_inode);
5400 ret = btrfs_unlink_inode(trans, root, old_dir,
5401 old_dentry->d_inode,
5402 old_dentry->d_name.name,
5403 old_dentry->d_name.len);
5408 new_inode->i_ctime = CURRENT_TIME;
5409 if (unlikely(new_inode->i_ino ==
5410 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5411 root_objectid = BTRFS_I(new_inode)->location.objectid;
5412 ret = btrfs_unlink_subvol(trans, dest, new_dir,
5414 new_dentry->d_name.name,
5415 new_dentry->d_name.len);
5416 BUG_ON(new_inode->i_nlink == 0);
5418 ret = btrfs_unlink_inode(trans, dest, new_dir,
5419 new_dentry->d_inode,
5420 new_dentry->d_name.name,
5421 new_dentry->d_name.len);
5424 if (new_inode->i_nlink == 0) {
5425 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5430 ret = btrfs_add_link(trans, new_dir, old_inode,
5431 new_dentry->d_name.name,
5432 new_dentry->d_name.len, 0, index);
5435 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5436 btrfs_log_new_name(trans, old_inode, old_dir,
5437 new_dentry->d_parent);
5438 btrfs_end_log_trans(root);
5441 btrfs_end_transaction_throttle(trans, root);
5443 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5444 up_read(&root->fs_info->subvol_sem);
5446 btrfs_unreserve_metadata_space(root, 11);
5451 * some fairly slow code that needs optimization. This walks the list
5452 * of all the inodes with pending delalloc and forces them to disk.
5454 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
5456 struct list_head *head = &root->fs_info->delalloc_inodes;
5457 struct btrfs_inode *binode;
5458 struct inode *inode;
5460 if (root->fs_info->sb->s_flags & MS_RDONLY)
5463 spin_lock(&root->fs_info->delalloc_lock);
5464 while (!list_empty(head)) {
5465 binode = list_entry(head->next, struct btrfs_inode,
5467 inode = igrab(&binode->vfs_inode);
5469 list_del_init(&binode->delalloc_inodes);
5470 spin_unlock(&root->fs_info->delalloc_lock);
5472 filemap_flush(inode->i_mapping);
5476 spin_lock(&root->fs_info->delalloc_lock);
5478 spin_unlock(&root->fs_info->delalloc_lock);
5480 /* the filemap_flush will queue IO into the worker threads, but
5481 * we have to make sure the IO is actually started and that
5482 * ordered extents get created before we return
5484 atomic_inc(&root->fs_info->async_submit_draining);
5485 while (atomic_read(&root->fs_info->nr_async_submits) ||
5486 atomic_read(&root->fs_info->async_delalloc_pages)) {
5487 wait_event(root->fs_info->async_submit_wait,
5488 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5489 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5491 atomic_dec(&root->fs_info->async_submit_draining);
5495 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5496 const char *symname)
5498 struct btrfs_trans_handle *trans;
5499 struct btrfs_root *root = BTRFS_I(dir)->root;
5500 struct btrfs_path *path;
5501 struct btrfs_key key;
5502 struct inode *inode = NULL;
5510 struct btrfs_file_extent_item *ei;
5511 struct extent_buffer *leaf;
5512 unsigned long nr = 0;
5514 name_len = strlen(symname) + 1;
5515 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5516 return -ENAMETOOLONG;
5519 * 2 items for inode item and ref
5520 * 2 items for dir items
5521 * 1 item for xattr if selinux is on
5523 err = btrfs_reserve_metadata_space(root, 5);
5527 trans = btrfs_start_transaction(root, 1);
5530 btrfs_set_trans_block_group(trans, dir);
5532 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5538 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5540 dentry->d_parent->d_inode->i_ino, objectid,
5541 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5543 err = PTR_ERR(inode);
5547 err = btrfs_init_inode_security(inode, dir);
5553 btrfs_set_trans_block_group(trans, inode);
5554 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5558 inode->i_mapping->a_ops = &btrfs_aops;
5559 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5560 inode->i_fop = &btrfs_file_operations;
5561 inode->i_op = &btrfs_file_inode_operations;
5562 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5564 btrfs_update_inode_block_group(trans, inode);
5565 btrfs_update_inode_block_group(trans, dir);
5569 path = btrfs_alloc_path();
5571 key.objectid = inode->i_ino;
5573 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5574 datasize = btrfs_file_extent_calc_inline_size(name_len);
5575 err = btrfs_insert_empty_item(trans, root, path, &key,
5581 leaf = path->nodes[0];
5582 ei = btrfs_item_ptr(leaf, path->slots[0],
5583 struct btrfs_file_extent_item);
5584 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5585 btrfs_set_file_extent_type(leaf, ei,
5586 BTRFS_FILE_EXTENT_INLINE);
5587 btrfs_set_file_extent_encryption(leaf, ei, 0);
5588 btrfs_set_file_extent_compression(leaf, ei, 0);
5589 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5590 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5592 ptr = btrfs_file_extent_inline_start(ei);
5593 write_extent_buffer(leaf, symname, ptr, name_len);
5594 btrfs_mark_buffer_dirty(leaf);
5595 btrfs_free_path(path);
5597 inode->i_op = &btrfs_symlink_inode_operations;
5598 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5599 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5600 inode_set_bytes(inode, name_len);
5601 btrfs_i_size_write(inode, name_len - 1);
5602 err = btrfs_update_inode(trans, root, inode);
5607 nr = trans->blocks_used;
5608 btrfs_end_transaction_throttle(trans, root);
5610 btrfs_unreserve_metadata_space(root, 5);
5612 inode_dec_link_count(inode);
5615 btrfs_btree_balance_dirty(root, nr);
5619 static int prealloc_file_range(struct btrfs_trans_handle *trans,
5620 struct inode *inode, u64 start, u64 end,
5621 u64 locked_end, u64 alloc_hint, int mode)
5623 struct btrfs_root *root = BTRFS_I(inode)->root;
5624 struct btrfs_key ins;
5626 u64 cur_offset = start;
5627 u64 num_bytes = end - start;
5630 while (num_bytes > 0) {
5631 alloc_size = min(num_bytes, root->fs_info->max_extent);
5633 ret = btrfs_reserve_metadata_space(root, 1);
5637 ret = btrfs_reserve_extent(trans, root, alloc_size,
5638 root->sectorsize, 0, alloc_hint,
5644 ret = insert_reserved_file_extent(trans, inode,
5645 cur_offset, ins.objectid,
5646 ins.offset, ins.offset,
5647 ins.offset, locked_end,
5649 BTRFS_FILE_EXTENT_PREALLOC);
5651 btrfs_drop_extent_cache(inode, cur_offset,
5652 cur_offset + ins.offset -1, 0);
5653 num_bytes -= ins.offset;
5654 cur_offset += ins.offset;
5655 alloc_hint = ins.objectid + ins.offset;
5656 btrfs_unreserve_metadata_space(root, 1);
5659 if (cur_offset > start) {
5660 inode->i_ctime = CURRENT_TIME;
5661 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5662 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5663 cur_offset > i_size_read(inode))
5664 btrfs_i_size_write(inode, cur_offset);
5665 ret = btrfs_update_inode(trans, root, inode);
5672 static long btrfs_fallocate(struct inode *inode, int mode,
5673 loff_t offset, loff_t len)
5681 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5682 struct extent_map *em;
5683 struct btrfs_trans_handle *trans;
5684 struct btrfs_root *root;
5687 alloc_start = offset & ~mask;
5688 alloc_end = (offset + len + mask) & ~mask;
5691 * wait for ordered IO before we have any locks. We'll loop again
5692 * below with the locks held.
5694 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5696 mutex_lock(&inode->i_mutex);
5697 if (alloc_start > inode->i_size) {
5698 ret = btrfs_cont_expand(inode, alloc_start);
5703 root = BTRFS_I(inode)->root;
5705 ret = btrfs_check_data_free_space(root, inode,
5706 alloc_end - alloc_start);
5710 locked_end = alloc_end - 1;
5712 struct btrfs_ordered_extent *ordered;
5714 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5720 /* the extent lock is ordered inside the running
5723 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5725 ordered = btrfs_lookup_first_ordered_extent(inode,
5728 ordered->file_offset + ordered->len > alloc_start &&
5729 ordered->file_offset < alloc_end) {
5730 btrfs_put_ordered_extent(ordered);
5731 unlock_extent(&BTRFS_I(inode)->io_tree,
5732 alloc_start, locked_end, GFP_NOFS);
5733 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5736 * we can't wait on the range with the transaction
5737 * running or with the extent lock held
5739 btrfs_wait_ordered_range(inode, alloc_start,
5740 alloc_end - alloc_start);
5743 btrfs_put_ordered_extent(ordered);
5748 cur_offset = alloc_start;
5750 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5751 alloc_end - cur_offset, 0);
5752 BUG_ON(IS_ERR(em) || !em);
5753 last_byte = min(extent_map_end(em), alloc_end);
5754 last_byte = (last_byte + mask) & ~mask;
5755 if (em->block_start == EXTENT_MAP_HOLE) {
5756 ret = prealloc_file_range(trans, inode, cur_offset,
5757 last_byte, locked_end + 1,
5760 free_extent_map(em);
5764 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5765 alloc_hint = em->block_start;
5766 free_extent_map(em);
5768 cur_offset = last_byte;
5769 if (cur_offset >= alloc_end) {
5774 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5777 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5779 btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
5781 mutex_unlock(&inode->i_mutex);
5785 static int btrfs_set_page_dirty(struct page *page)
5787 return __set_page_dirty_nobuffers(page);
5790 static int btrfs_permission(struct inode *inode, int mask)
5792 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5794 return generic_permission(inode, mask, btrfs_check_acl);
5797 static struct inode_operations btrfs_dir_inode_operations = {
5798 .getattr = btrfs_getattr,
5799 .lookup = btrfs_lookup,
5800 .create = btrfs_create,
5801 .unlink = btrfs_unlink,
5803 .mkdir = btrfs_mkdir,
5804 .rmdir = btrfs_rmdir,
5805 .rename = btrfs_rename,
5806 .symlink = btrfs_symlink,
5807 .setattr = btrfs_setattr,
5808 .mknod = btrfs_mknod,
5809 .setxattr = btrfs_setxattr,
5810 .getxattr = btrfs_getxattr,
5811 .listxattr = btrfs_listxattr,
5812 .removexattr = btrfs_removexattr,
5813 .permission = btrfs_permission,
5815 static struct inode_operations btrfs_dir_ro_inode_operations = {
5816 .lookup = btrfs_lookup,
5817 .permission = btrfs_permission,
5820 static struct file_operations btrfs_dir_file_operations = {
5821 .llseek = generic_file_llseek,
5822 .read = generic_read_dir,
5823 .readdir = btrfs_real_readdir,
5824 .unlocked_ioctl = btrfs_ioctl,
5825 #ifdef CONFIG_COMPAT
5826 .compat_ioctl = btrfs_ioctl,
5828 .release = btrfs_release_file,
5829 .fsync = btrfs_sync_file,
5832 static struct extent_io_ops btrfs_extent_io_ops = {
5833 .fill_delalloc = run_delalloc_range,
5834 .submit_bio_hook = btrfs_submit_bio_hook,
5835 .merge_bio_hook = btrfs_merge_bio_hook,
5836 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5837 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5838 .writepage_start_hook = btrfs_writepage_start_hook,
5839 .readpage_io_failed_hook = btrfs_io_failed_hook,
5840 .set_bit_hook = btrfs_set_bit_hook,
5841 .clear_bit_hook = btrfs_clear_bit_hook,
5842 .merge_extent_hook = btrfs_merge_extent_hook,
5843 .split_extent_hook = btrfs_split_extent_hook,
5847 * btrfs doesn't support the bmap operation because swapfiles
5848 * use bmap to make a mapping of extents in the file. They assume
5849 * these extents won't change over the life of the file and they
5850 * use the bmap result to do IO directly to the drive.
5852 * the btrfs bmap call would return logical addresses that aren't
5853 * suitable for IO and they also will change frequently as COW
5854 * operations happen. So, swapfile + btrfs == corruption.
5856 * For now we're avoiding this by dropping bmap.
5858 static struct address_space_operations btrfs_aops = {
5859 .readpage = btrfs_readpage,
5860 .writepage = btrfs_writepage,
5861 .writepages = btrfs_writepages,
5862 .readpages = btrfs_readpages,
5863 .sync_page = block_sync_page,
5864 .direct_IO = btrfs_direct_IO,
5865 .invalidatepage = btrfs_invalidatepage,
5866 .releasepage = btrfs_releasepage,
5867 .set_page_dirty = btrfs_set_page_dirty,
5870 static struct address_space_operations btrfs_symlink_aops = {
5871 .readpage = btrfs_readpage,
5872 .writepage = btrfs_writepage,
5873 .invalidatepage = btrfs_invalidatepage,
5874 .releasepage = btrfs_releasepage,
5877 static struct inode_operations btrfs_file_inode_operations = {
5878 .truncate = btrfs_truncate,
5879 .getattr = btrfs_getattr,
5880 .setattr = btrfs_setattr,
5881 .setxattr = btrfs_setxattr,
5882 .getxattr = btrfs_getxattr,
5883 .listxattr = btrfs_listxattr,
5884 .removexattr = btrfs_removexattr,
5885 .permission = btrfs_permission,
5886 .fallocate = btrfs_fallocate,
5887 .fiemap = btrfs_fiemap,
5889 static struct inode_operations btrfs_special_inode_operations = {
5890 .getattr = btrfs_getattr,
5891 .setattr = btrfs_setattr,
5892 .permission = btrfs_permission,
5893 .setxattr = btrfs_setxattr,
5894 .getxattr = btrfs_getxattr,
5895 .listxattr = btrfs_listxattr,
5896 .removexattr = btrfs_removexattr,
5898 static struct inode_operations btrfs_symlink_inode_operations = {
5899 .readlink = generic_readlink,
5900 .follow_link = page_follow_link_light,
5901 .put_link = page_put_link,
5902 .permission = btrfs_permission,
5903 .setxattr = btrfs_setxattr,
5904 .getxattr = btrfs_getxattr,
5905 .listxattr = btrfs_listxattr,
5906 .removexattr = btrfs_removexattr,
5909 const struct dentry_operations btrfs_dentry_operations = {
5910 .d_delete = btrfs_dentry_delete,