2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/statfs.h>
30 #include <linux/compat.h>
31 #include <linux/slab.h>
34 #include "transaction.h"
35 #include "btrfs_inode.h"
37 #include "print-tree.h"
43 /* simple helper to fault in pages and copy. This should go away
44 * and be replaced with calls into generic code.
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48 struct page **prepared_pages,
53 int offset = pos & (PAGE_CACHE_SIZE - 1);
56 while (write_bytes > 0) {
57 size_t count = min_t(size_t,
58 PAGE_CACHE_SIZE - offset, write_bytes);
59 struct page *page = prepared_pages[pg];
61 * Copy data from userspace to the current page
63 * Disable pagefault to avoid recursive lock since
64 * the pages are already locked
67 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
70 /* Flush processor's dcache for this page */
71 flush_dcache_page(page);
72 iov_iter_advance(i, copied);
73 write_bytes -= copied;
74 total_copied += copied;
76 /* Return to btrfs_file_aio_write to fault page */
77 if (unlikely(copied == 0)) {
81 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
92 * unlocks pages after btrfs_file_write is done with them
94 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
97 for (i = 0; i < num_pages; i++) {
100 /* page checked is some magic around finding pages that
101 * have been modified without going through btrfs_set_page_dirty
104 ClearPageChecked(pages[i]);
105 unlock_page(pages[i]);
106 mark_page_accessed(pages[i]);
107 page_cache_release(pages[i]);
112 * after copy_from_user, pages need to be dirtied and we need to make
113 * sure holes are created between the current EOF and the start of
114 * any next extents (if required).
116 * this also makes the decision about creating an inline extent vs
117 * doing real data extents, marking pages dirty and delalloc as required.
119 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
120 struct btrfs_root *root,
129 struct inode *inode = fdentry(file)->d_inode;
132 u64 end_of_last_block;
133 u64 end_pos = pos + write_bytes;
134 loff_t isize = i_size_read(inode);
136 start_pos = pos & ~((u64)root->sectorsize - 1);
137 num_bytes = (write_bytes + pos - start_pos +
138 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
140 end_of_last_block = start_pos + num_bytes - 1;
141 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
145 for (i = 0; i < num_pages; i++) {
146 struct page *p = pages[i];
151 if (end_pos > isize) {
152 i_size_write(inode, end_pos);
153 /* we've only changed i_size in ram, and we haven't updated
154 * the disk i_size. There is no need to log the inode
162 * this drops all the extents in the cache that intersect the range
163 * [start, end]. Existing extents are split as required.
165 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
168 struct extent_map *em;
169 struct extent_map *split = NULL;
170 struct extent_map *split2 = NULL;
171 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
172 u64 len = end - start + 1;
178 WARN_ON(end < start);
179 if (end == (u64)-1) {
185 split = alloc_extent_map(GFP_NOFS);
187 split2 = alloc_extent_map(GFP_NOFS);
189 write_lock(&em_tree->lock);
190 em = lookup_extent_mapping(em_tree, start, len);
192 write_unlock(&em_tree->lock);
196 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
197 if (testend && em->start + em->len >= start + len) {
199 write_unlock(&em_tree->lock);
202 start = em->start + em->len;
204 len = start + len - (em->start + em->len);
206 write_unlock(&em_tree->lock);
209 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
210 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
211 remove_extent_mapping(em_tree, em);
213 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
215 split->start = em->start;
216 split->len = start - em->start;
217 split->orig_start = em->orig_start;
218 split->block_start = em->block_start;
221 split->block_len = em->block_len;
223 split->block_len = split->len;
225 split->bdev = em->bdev;
226 split->flags = flags;
227 ret = add_extent_mapping(em_tree, split);
229 free_extent_map(split);
233 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
234 testend && em->start + em->len > start + len) {
235 u64 diff = start + len - em->start;
237 split->start = start + len;
238 split->len = em->start + em->len - (start + len);
239 split->bdev = em->bdev;
240 split->flags = flags;
243 split->block_len = em->block_len;
244 split->block_start = em->block_start;
245 split->orig_start = em->orig_start;
247 split->block_len = split->len;
248 split->block_start = em->block_start + diff;
249 split->orig_start = split->start;
252 ret = add_extent_mapping(em_tree, split);
254 free_extent_map(split);
257 write_unlock(&em_tree->lock);
261 /* once for the tree*/
265 free_extent_map(split);
267 free_extent_map(split2);
272 * this is very complex, but the basic idea is to drop all extents
273 * in the range start - end. hint_block is filled in with a block number
274 * that would be a good hint to the block allocator for this file.
276 * If an extent intersects the range but is not entirely inside the range
277 * it is either truncated or split. Anything entirely inside the range
278 * is deleted from the tree.
280 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
281 u64 start, u64 end, u64 *hint_byte, int drop_cache)
283 struct btrfs_root *root = BTRFS_I(inode)->root;
284 struct extent_buffer *leaf;
285 struct btrfs_file_extent_item *fi;
286 struct btrfs_path *path;
287 struct btrfs_key key;
288 struct btrfs_key new_key;
289 u64 search_start = start;
292 u64 extent_offset = 0;
301 btrfs_drop_extent_cache(inode, start, end - 1, 0);
303 path = btrfs_alloc_path();
309 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
313 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
314 leaf = path->nodes[0];
315 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
316 if (key.objectid == inode->i_ino &&
317 key.type == BTRFS_EXTENT_DATA_KEY)
322 leaf = path->nodes[0];
323 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
325 ret = btrfs_next_leaf(root, path);
332 leaf = path->nodes[0];
336 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
337 if (key.objectid > inode->i_ino ||
338 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
341 fi = btrfs_item_ptr(leaf, path->slots[0],
342 struct btrfs_file_extent_item);
343 extent_type = btrfs_file_extent_type(leaf, fi);
345 if (extent_type == BTRFS_FILE_EXTENT_REG ||
346 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
347 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
348 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
349 extent_offset = btrfs_file_extent_offset(leaf, fi);
350 extent_end = key.offset +
351 btrfs_file_extent_num_bytes(leaf, fi);
352 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
353 extent_end = key.offset +
354 btrfs_file_extent_inline_len(leaf, fi);
357 extent_end = search_start;
360 if (extent_end <= search_start) {
365 search_start = max(key.offset, start);
367 btrfs_release_path(root, path);
372 * | - range to drop - |
373 * | -------- extent -------- |
375 if (start > key.offset && end < extent_end) {
377 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
379 memcpy(&new_key, &key, sizeof(new_key));
380 new_key.offset = start;
381 ret = btrfs_duplicate_item(trans, root, path,
383 if (ret == -EAGAIN) {
384 btrfs_release_path(root, path);
390 leaf = path->nodes[0];
391 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
392 struct btrfs_file_extent_item);
393 btrfs_set_file_extent_num_bytes(leaf, fi,
396 fi = btrfs_item_ptr(leaf, path->slots[0],
397 struct btrfs_file_extent_item);
399 extent_offset += start - key.offset;
400 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
401 btrfs_set_file_extent_num_bytes(leaf, fi,
403 btrfs_mark_buffer_dirty(leaf);
405 if (disk_bytenr > 0) {
406 ret = btrfs_inc_extent_ref(trans, root,
407 disk_bytenr, num_bytes, 0,
408 root->root_key.objectid,
410 start - extent_offset);
412 *hint_byte = disk_bytenr;
417 * | ---- range to drop ----- |
418 * | -------- extent -------- |
420 if (start <= key.offset && end < extent_end) {
421 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
423 memcpy(&new_key, &key, sizeof(new_key));
424 new_key.offset = end;
425 btrfs_set_item_key_safe(trans, root, path, &new_key);
427 extent_offset += end - key.offset;
428 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
429 btrfs_set_file_extent_num_bytes(leaf, fi,
431 btrfs_mark_buffer_dirty(leaf);
432 if (disk_bytenr > 0) {
433 inode_sub_bytes(inode, end - key.offset);
434 *hint_byte = disk_bytenr;
439 search_start = extent_end;
441 * | ---- range to drop ----- |
442 * | -------- extent -------- |
444 if (start > key.offset && end >= extent_end) {
446 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
448 btrfs_set_file_extent_num_bytes(leaf, fi,
450 btrfs_mark_buffer_dirty(leaf);
451 if (disk_bytenr > 0) {
452 inode_sub_bytes(inode, extent_end - start);
453 *hint_byte = disk_bytenr;
455 if (end == extent_end)
463 * | ---- range to drop ----- |
464 * | ------ extent ------ |
466 if (start <= key.offset && end >= extent_end) {
468 del_slot = path->slots[0];
471 BUG_ON(del_slot + del_nr != path->slots[0]);
475 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
476 inode_sub_bytes(inode,
477 extent_end - key.offset);
478 extent_end = ALIGN(extent_end,
480 } else if (disk_bytenr > 0) {
481 ret = btrfs_free_extent(trans, root,
482 disk_bytenr, num_bytes, 0,
483 root->root_key.objectid,
484 key.objectid, key.offset -
487 inode_sub_bytes(inode,
488 extent_end - key.offset);
489 *hint_byte = disk_bytenr;
492 if (end == extent_end)
495 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
500 ret = btrfs_del_items(trans, root, path, del_slot,
507 btrfs_release_path(root, path);
515 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
519 btrfs_free_path(path);
523 static int extent_mergeable(struct extent_buffer *leaf, int slot,
524 u64 objectid, u64 bytenr, u64 orig_offset,
525 u64 *start, u64 *end)
527 struct btrfs_file_extent_item *fi;
528 struct btrfs_key key;
531 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
534 btrfs_item_key_to_cpu(leaf, &key, slot);
535 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
538 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
539 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
540 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
541 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
542 btrfs_file_extent_compression(leaf, fi) ||
543 btrfs_file_extent_encryption(leaf, fi) ||
544 btrfs_file_extent_other_encoding(leaf, fi))
547 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
548 if ((*start && *start != key.offset) || (*end && *end != extent_end))
557 * Mark extent in the range start - end as written.
559 * This changes extent type from 'pre-allocated' to 'regular'. If only
560 * part of extent is marked as written, the extent will be split into
563 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
564 struct inode *inode, u64 start, u64 end)
566 struct btrfs_root *root = BTRFS_I(inode)->root;
567 struct extent_buffer *leaf;
568 struct btrfs_path *path;
569 struct btrfs_file_extent_item *fi;
570 struct btrfs_key key;
571 struct btrfs_key new_key;
584 btrfs_drop_extent_cache(inode, start, end - 1, 0);
586 path = btrfs_alloc_path();
591 key.objectid = inode->i_ino;
592 key.type = BTRFS_EXTENT_DATA_KEY;
595 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
596 if (ret > 0 && path->slots[0] > 0)
599 leaf = path->nodes[0];
600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
601 BUG_ON(key.objectid != inode->i_ino ||
602 key.type != BTRFS_EXTENT_DATA_KEY);
603 fi = btrfs_item_ptr(leaf, path->slots[0],
604 struct btrfs_file_extent_item);
605 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
606 BTRFS_FILE_EXTENT_PREALLOC);
607 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
608 BUG_ON(key.offset > start || extent_end < end);
610 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
611 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
612 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
613 memcpy(&new_key, &key, sizeof(new_key));
615 if (start == key.offset && end < extent_end) {
618 if (extent_mergeable(leaf, path->slots[0] - 1,
619 inode->i_ino, bytenr, orig_offset,
620 &other_start, &other_end)) {
621 new_key.offset = end;
622 btrfs_set_item_key_safe(trans, root, path, &new_key);
623 fi = btrfs_item_ptr(leaf, path->slots[0],
624 struct btrfs_file_extent_item);
625 btrfs_set_file_extent_num_bytes(leaf, fi,
627 btrfs_set_file_extent_offset(leaf, fi,
629 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
630 struct btrfs_file_extent_item);
631 btrfs_set_file_extent_num_bytes(leaf, fi,
633 btrfs_mark_buffer_dirty(leaf);
638 if (start > key.offset && end == extent_end) {
641 if (extent_mergeable(leaf, path->slots[0] + 1,
642 inode->i_ino, bytenr, orig_offset,
643 &other_start, &other_end)) {
644 fi = btrfs_item_ptr(leaf, path->slots[0],
645 struct btrfs_file_extent_item);
646 btrfs_set_file_extent_num_bytes(leaf, fi,
649 new_key.offset = start;
650 btrfs_set_item_key_safe(trans, root, path, &new_key);
652 fi = btrfs_item_ptr(leaf, path->slots[0],
653 struct btrfs_file_extent_item);
654 btrfs_set_file_extent_num_bytes(leaf, fi,
656 btrfs_set_file_extent_offset(leaf, fi,
657 start - orig_offset);
658 btrfs_mark_buffer_dirty(leaf);
663 while (start > key.offset || end < extent_end) {
664 if (key.offset == start)
667 new_key.offset = split;
668 ret = btrfs_duplicate_item(trans, root, path, &new_key);
669 if (ret == -EAGAIN) {
670 btrfs_release_path(root, path);
675 leaf = path->nodes[0];
676 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
677 struct btrfs_file_extent_item);
678 btrfs_set_file_extent_num_bytes(leaf, fi,
681 fi = btrfs_item_ptr(leaf, path->slots[0],
682 struct btrfs_file_extent_item);
684 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
685 btrfs_set_file_extent_num_bytes(leaf, fi,
687 btrfs_mark_buffer_dirty(leaf);
689 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
690 root->root_key.objectid,
691 inode->i_ino, orig_offset);
694 if (split == start) {
697 BUG_ON(start != key.offset);
706 if (extent_mergeable(leaf, path->slots[0] + 1,
707 inode->i_ino, bytenr, orig_offset,
708 &other_start, &other_end)) {
710 btrfs_release_path(root, path);
713 extent_end = other_end;
714 del_slot = path->slots[0] + 1;
716 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
717 0, root->root_key.objectid,
718 inode->i_ino, orig_offset);
723 if (extent_mergeable(leaf, path->slots[0] - 1,
724 inode->i_ino, bytenr, orig_offset,
725 &other_start, &other_end)) {
727 btrfs_release_path(root, path);
730 key.offset = other_start;
731 del_slot = path->slots[0];
733 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
734 0, root->root_key.objectid,
735 inode->i_ino, orig_offset);
739 fi = btrfs_item_ptr(leaf, path->slots[0],
740 struct btrfs_file_extent_item);
741 btrfs_set_file_extent_type(leaf, fi,
742 BTRFS_FILE_EXTENT_REG);
743 btrfs_mark_buffer_dirty(leaf);
745 fi = btrfs_item_ptr(leaf, del_slot - 1,
746 struct btrfs_file_extent_item);
747 btrfs_set_file_extent_type(leaf, fi,
748 BTRFS_FILE_EXTENT_REG);
749 btrfs_set_file_extent_num_bytes(leaf, fi,
750 extent_end - key.offset);
751 btrfs_mark_buffer_dirty(leaf);
753 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
757 btrfs_free_path(path);
762 * this gets pages into the page cache and locks them down, it also properly
763 * waits for data=ordered extents to finish before allowing the pages to be
766 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
767 struct page **pages, size_t num_pages,
768 loff_t pos, unsigned long first_index,
769 unsigned long last_index, size_t write_bytes)
771 struct extent_state *cached_state = NULL;
773 unsigned long index = pos >> PAGE_CACHE_SHIFT;
774 struct inode *inode = fdentry(file)->d_inode;
779 start_pos = pos & ~((u64)root->sectorsize - 1);
780 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
782 if (start_pos > inode->i_size) {
783 err = btrfs_cont_expand(inode, start_pos);
788 memset(pages, 0, num_pages * sizeof(struct page *));
790 for (i = 0; i < num_pages; i++) {
791 pages[i] = grab_cache_page(inode->i_mapping, index + i);
796 wait_on_page_writeback(pages[i]);
798 if (start_pos < inode->i_size) {
799 struct btrfs_ordered_extent *ordered;
800 lock_extent_bits(&BTRFS_I(inode)->io_tree,
801 start_pos, last_pos - 1, 0, &cached_state,
803 ordered = btrfs_lookup_first_ordered_extent(inode,
806 ordered->file_offset + ordered->len > start_pos &&
807 ordered->file_offset < last_pos) {
808 btrfs_put_ordered_extent(ordered);
809 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
810 start_pos, last_pos - 1,
811 &cached_state, GFP_NOFS);
812 for (i = 0; i < num_pages; i++) {
813 unlock_page(pages[i]);
814 page_cache_release(pages[i]);
816 btrfs_wait_ordered_range(inode, start_pos,
817 last_pos - start_pos);
821 btrfs_put_ordered_extent(ordered);
823 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
824 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
825 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
827 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
828 start_pos, last_pos - 1, &cached_state,
831 for (i = 0; i < num_pages; i++) {
832 clear_page_dirty_for_io(pages[i]);
833 set_page_extent_mapped(pages[i]);
834 WARN_ON(!PageLocked(pages[i]));
839 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
840 const struct iovec *iov,
841 unsigned long nr_segs, loff_t pos)
843 struct file *file = iocb->ki_filp;
844 struct inode *inode = fdentry(file)->d_inode;
845 struct btrfs_root *root = BTRFS_I(inode)->root;
846 struct page *pinned[2];
847 struct page **pages = NULL;
849 loff_t *ppos = &iocb->ki_pos;
851 ssize_t num_written = 0;
857 unsigned long first_index;
858 unsigned long last_index;
864 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
865 (file->f_flags & O_DIRECT));
872 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
874 mutex_lock(&inode->i_mutex);
876 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
881 current->backing_dev_info = inode->i_mapping->backing_dev_info;
882 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
889 err = file_remove_suid(file);
893 file_update_time(file);
894 BTRFS_I(inode)->sequence++;
896 if (unlikely(file->f_flags & O_DIRECT)) {
897 num_written = generic_file_direct_write(iocb, iov, &nr_segs,
901 * the generic O_DIRECT will update in-memory i_size after the
902 * DIOs are done. But our endio handlers that update the on
903 * disk i_size never update past the in memory i_size. So we
904 * need one more update here to catch any additions to the
907 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
908 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
909 mark_inode_dirty(inode);
912 if (num_written < 0) {
916 } else if (num_written == count) {
917 /* pick up pos changes done by the generic code */
922 * We are going to do buffered for the rest of the range, so we
923 * need to make sure to invalidate the buffered pages when we're
930 iov_iter_init(&i, iov, nr_segs, count, num_written);
931 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
932 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
933 (sizeof(struct page *)));
934 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
936 /* generic_write_checks can change our pos */
939 first_index = pos >> PAGE_CACHE_SHIFT;
940 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
943 * there are lots of better ways to do this, but this code
944 * makes sure the first and last page in the file range are
945 * up to date and ready for cow
947 if ((pos & (PAGE_CACHE_SIZE - 1))) {
948 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
949 if (!PageUptodate(pinned[0])) {
950 ret = btrfs_readpage(NULL, pinned[0]);
952 wait_on_page_locked(pinned[0]);
954 unlock_page(pinned[0]);
957 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
958 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
959 if (!PageUptodate(pinned[1])) {
960 ret = btrfs_readpage(NULL, pinned[1]);
962 wait_on_page_locked(pinned[1]);
964 unlock_page(pinned[1]);
968 while (iov_iter_count(&i) > 0) {
969 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
970 size_t write_bytes = min(iov_iter_count(&i),
971 nrptrs * (size_t)PAGE_CACHE_SIZE -
973 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
976 WARN_ON(num_pages > nrptrs);
977 memset(pages, 0, sizeof(struct page *) * nrptrs);
980 * Fault pages before locking them in prepare_pages
981 * to avoid recursive lock
983 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
988 ret = btrfs_delalloc_reserve_space(inode,
989 num_pages << PAGE_CACHE_SHIFT);
993 ret = prepare_pages(root, file, pages, num_pages,
994 pos, first_index, last_index,
997 btrfs_delalloc_release_space(inode,
998 num_pages << PAGE_CACHE_SHIFT);
1002 copied = btrfs_copy_from_user(pos, num_pages,
1003 write_bytes, pages, &i);
1004 dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
1007 if (num_pages > dirty_pages) {
1010 &BTRFS_I(inode)->outstanding_extents);
1011 btrfs_delalloc_release_space(inode,
1012 (num_pages - dirty_pages) <<
1017 dirty_and_release_pages(NULL, root, file, pages,
1018 dirty_pages, pos, copied);
1021 btrfs_drop_pages(pages, num_pages);
1025 filemap_fdatawrite_range(inode->i_mapping, pos,
1028 balance_dirty_pages_ratelimited_nr(
1032 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1033 btrfs_btree_balance_dirty(root, 1);
1034 btrfs_throttle(root);
1039 num_written += copied;
1044 mutex_unlock(&inode->i_mutex);
1050 page_cache_release(pinned[0]);
1052 page_cache_release(pinned[1]);
1056 * we want to make sure fsync finds this change
1057 * but we haven't joined a transaction running right now.
1059 * Later on, someone is sure to update the inode and get the
1060 * real transid recorded.
1062 * We set last_trans now to the fs_info generation + 1,
1063 * this will either be one more than the running transaction
1064 * or the generation used for the next transaction if there isn't
1065 * one running right now.
1067 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1069 if (num_written > 0 && will_write) {
1070 struct btrfs_trans_handle *trans;
1072 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1076 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1077 trans = btrfs_start_transaction(root, 0);
1078 if (IS_ERR(trans)) {
1079 num_written = PTR_ERR(trans);
1082 mutex_lock(&inode->i_mutex);
1083 ret = btrfs_log_dentry_safe(trans, root,
1085 mutex_unlock(&inode->i_mutex);
1087 ret = btrfs_sync_log(trans, root);
1089 btrfs_end_transaction(trans, root);
1091 btrfs_commit_transaction(trans, root);
1092 } else if (ret != BTRFS_NO_LOG_SYNC) {
1093 btrfs_commit_transaction(trans, root);
1095 btrfs_end_transaction(trans, root);
1098 if (file->f_flags & O_DIRECT && buffered) {
1099 invalidate_mapping_pages(inode->i_mapping,
1100 start_pos >> PAGE_CACHE_SHIFT,
1101 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1105 current->backing_dev_info = NULL;
1106 return num_written ? num_written : err;
1109 int btrfs_release_file(struct inode *inode, struct file *filp)
1112 * ordered_data_close is set by settattr when we are about to truncate
1113 * a file from a non-zero size to a zero size. This tries to
1114 * flush down new bytes that may have been written if the
1115 * application were using truncate to replace a file in place.
1117 if (BTRFS_I(inode)->ordered_data_close) {
1118 BTRFS_I(inode)->ordered_data_close = 0;
1119 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1120 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1121 filemap_flush(inode->i_mapping);
1123 if (filp->private_data)
1124 btrfs_ioctl_trans_end(filp);
1129 * fsync call for both files and directories. This logs the inode into
1130 * the tree log instead of forcing full commits whenever possible.
1132 * It needs to call filemap_fdatawait so that all ordered extent updates are
1133 * in the metadata btree are up to date for copying to the log.
1135 * It drops the inode mutex before doing the tree log commit. This is an
1136 * important optimization for directories because holding the mutex prevents
1137 * new operations on the dir while we write to disk.
1139 int btrfs_sync_file(struct file *file, int datasync)
1141 struct dentry *dentry = file->f_path.dentry;
1142 struct inode *inode = dentry->d_inode;
1143 struct btrfs_root *root = BTRFS_I(inode)->root;
1145 struct btrfs_trans_handle *trans;
1148 /* we wait first, since the writeback may change the inode */
1150 /* the VFS called filemap_fdatawrite for us */
1151 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1155 * check the transaction that last modified this inode
1156 * and see if its already been committed
1158 if (!BTRFS_I(inode)->last_trans)
1162 * if the last transaction that changed this file was before
1163 * the current transaction, we can bail out now without any
1166 mutex_lock(&root->fs_info->trans_mutex);
1167 if (BTRFS_I(inode)->last_trans <=
1168 root->fs_info->last_trans_committed) {
1169 BTRFS_I(inode)->last_trans = 0;
1170 mutex_unlock(&root->fs_info->trans_mutex);
1173 mutex_unlock(&root->fs_info->trans_mutex);
1176 * ok we haven't committed the transaction yet, lets do a commit
1178 if (file->private_data)
1179 btrfs_ioctl_trans_end(file);
1181 trans = btrfs_start_transaction(root, 0);
1182 if (IS_ERR(trans)) {
1183 ret = PTR_ERR(trans);
1187 ret = btrfs_log_dentry_safe(trans, root, dentry);
1191 /* we've logged all the items and now have a consistent
1192 * version of the file in the log. It is possible that
1193 * someone will come in and modify the file, but that's
1194 * fine because the log is consistent on disk, and we
1195 * have references to all of the file's extents
1197 * It is possible that someone will come in and log the
1198 * file again, but that will end up using the synchronization
1199 * inside btrfs_sync_log to keep things safe.
1201 mutex_unlock(&dentry->d_inode->i_mutex);
1203 if (ret != BTRFS_NO_LOG_SYNC) {
1205 ret = btrfs_commit_transaction(trans, root);
1207 ret = btrfs_sync_log(trans, root);
1209 ret = btrfs_end_transaction(trans, root);
1211 ret = btrfs_commit_transaction(trans, root);
1214 ret = btrfs_end_transaction(trans, root);
1216 mutex_lock(&dentry->d_inode->i_mutex);
1218 return ret > 0 ? -EIO : ret;
1221 static const struct vm_operations_struct btrfs_file_vm_ops = {
1222 .fault = filemap_fault,
1223 .page_mkwrite = btrfs_page_mkwrite,
1226 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1228 struct address_space *mapping = filp->f_mapping;
1230 if (!mapping->a_ops->readpage)
1233 file_accessed(filp);
1234 vma->vm_ops = &btrfs_file_vm_ops;
1235 vma->vm_flags |= VM_CAN_NONLINEAR;
1240 const struct file_operations btrfs_file_operations = {
1241 .llseek = generic_file_llseek,
1242 .read = do_sync_read,
1243 .write = do_sync_write,
1244 .aio_read = generic_file_aio_read,
1245 .splice_read = generic_file_splice_read,
1246 .aio_write = btrfs_file_aio_write,
1247 .mmap = btrfs_file_mmap,
1248 .open = generic_file_open,
1249 .release = btrfs_release_file,
1250 .fsync = btrfs_sync_file,
1251 .unlocked_ioctl = btrfs_ioctl,
1252 #ifdef CONFIG_COMPAT
1253 .compat_ioctl = btrfs_ioctl,