2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
39 /* flags for direct write completions */
40 #define XFS_DIO_FLAG_UNWRITTEN (1 << 0)
41 #define XFS_DIO_FLAG_APPEND (1 << 1)
49 struct buffer_head *bh, *head;
51 *delalloc = *unwritten = 0;
53 bh = head = page_buffers(page);
55 if (buffer_unwritten(bh))
57 else if (buffer_delay(bh))
59 } while ((bh = bh->b_this_page) != head);
62 STATIC struct block_device *
63 xfs_find_bdev_for_inode(
66 struct xfs_inode *ip = XFS_I(inode);
67 struct xfs_mount *mp = ip->i_mount;
69 if (XFS_IS_REALTIME_INODE(ip))
70 return mp->m_rtdev_targp->bt_bdev;
72 return mp->m_ddev_targp->bt_bdev;
76 * We're now finished for good with this ioend structure.
77 * Update the page state via the associated buffer_heads,
78 * release holds on the inode and bio, and finally free
79 * up memory. Do not use the ioend after this.
85 struct buffer_head *bh, *next;
87 for (bh = ioend->io_buffer_head; bh; bh = next) {
89 bh->b_end_io(bh, !ioend->io_error);
92 mempool_free(ioend, xfs_ioend_pool);
96 * Fast and loose check if this write could update the on-disk inode size.
98 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
100 return ioend->io_offset + ioend->io_size >
101 XFS_I(ioend->io_inode)->i_d.di_size;
105 xfs_setfilesize_trans_alloc(
106 struct xfs_ioend *ioend)
108 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
109 struct xfs_trans *tp;
112 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
114 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
116 xfs_trans_cancel(tp);
120 ioend->io_append_trans = tp;
123 * We may pass freeze protection with a transaction. So tell lockdep
126 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
128 * We hand off the transaction to the completion thread now, so
129 * clear the flag here.
131 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
136 * Update on-disk file size now that data has been written to disk.
140 struct xfs_inode *ip,
141 struct xfs_trans *tp,
147 xfs_ilock(ip, XFS_ILOCK_EXCL);
148 isize = xfs_new_eof(ip, offset + size);
150 xfs_iunlock(ip, XFS_ILOCK_EXCL);
151 xfs_trans_cancel(tp);
155 trace_xfs_setfilesize(ip, offset, size);
157 ip->i_d.di_size = isize;
158 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
159 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
161 return xfs_trans_commit(tp);
165 xfs_setfilesize_ioend(
166 struct xfs_ioend *ioend)
168 struct xfs_inode *ip = XFS_I(ioend->io_inode);
169 struct xfs_trans *tp = ioend->io_append_trans;
172 * The transaction may have been allocated in the I/O submission thread,
173 * thus we need to mark ourselves as being in a transaction manually.
174 * Similarly for freeze protection.
176 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
177 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
179 /* we abort the update if there was an IO error */
180 if (ioend->io_error) {
181 xfs_trans_cancel(tp);
182 return ioend->io_error;
185 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
189 * Schedule IO completion handling on the final put of an ioend.
191 * If there is no work to do we might as well call it a day and free the
196 struct xfs_ioend *ioend)
198 if (atomic_dec_and_test(&ioend->io_remaining)) {
199 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
201 if (ioend->io_type == XFS_IO_UNWRITTEN)
202 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
203 else if (ioend->io_append_trans)
204 queue_work(mp->m_data_workqueue, &ioend->io_work);
206 xfs_destroy_ioend(ioend);
211 * IO write completion.
215 struct work_struct *work)
217 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
218 struct xfs_inode *ip = XFS_I(ioend->io_inode);
222 * Set an error if the mount has shut down and proceed with end I/O
223 * processing so it can perform whatever cleanups are necessary.
225 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
226 ioend->io_error = -EIO;
229 * For unwritten extents we need to issue transactions to convert a
230 * range to normal written extens after the data I/O has finished.
231 * Detecting and handling completion IO errors is done individually
232 * for each case as different cleanup operations need to be performed
235 if (ioend->io_type == XFS_IO_UNWRITTEN) {
238 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
240 } else if (ioend->io_append_trans) {
241 error = xfs_setfilesize_ioend(ioend);
243 ASSERT(!xfs_ioend_is_append(ioend));
248 ioend->io_error = error;
249 xfs_destroy_ioend(ioend);
253 * Allocate and initialise an IO completion structure.
254 * We need to track unwritten extent write completion here initially.
255 * We'll need to extend this for updating the ondisk inode size later
265 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
268 * Set the count to 1 initially, which will prevent an I/O
269 * completion callback from happening before we have started
270 * all the I/O from calling the completion routine too early.
272 atomic_set(&ioend->io_remaining, 1);
274 ioend->io_list = NULL;
275 ioend->io_type = type;
276 ioend->io_inode = inode;
277 ioend->io_buffer_head = NULL;
278 ioend->io_buffer_tail = NULL;
279 ioend->io_offset = 0;
281 ioend->io_append_trans = NULL;
283 INIT_WORK(&ioend->io_work, xfs_end_io);
291 struct xfs_bmbt_irec *imap,
295 struct xfs_inode *ip = XFS_I(inode);
296 struct xfs_mount *mp = ip->i_mount;
297 ssize_t count = 1 << inode->i_blkbits;
298 xfs_fileoff_t offset_fsb, end_fsb;
300 int bmapi_flags = XFS_BMAPI_ENTIRE;
303 if (XFS_FORCED_SHUTDOWN(mp))
306 if (type == XFS_IO_UNWRITTEN)
307 bmapi_flags |= XFS_BMAPI_IGSTATE;
309 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
312 xfs_ilock(ip, XFS_ILOCK_SHARED);
315 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
316 (ip->i_df.if_flags & XFS_IFEXTENTS));
317 ASSERT(offset <= mp->m_super->s_maxbytes);
319 if (offset + count > mp->m_super->s_maxbytes)
320 count = mp->m_super->s_maxbytes - offset;
321 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
322 offset_fsb = XFS_B_TO_FSBT(mp, offset);
323 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
324 imap, &nimaps, bmapi_flags);
325 xfs_iunlock(ip, XFS_ILOCK_SHARED);
330 if (type == XFS_IO_DELALLOC &&
331 (!nimaps || isnullstartblock(imap->br_startblock))) {
332 error = xfs_iomap_write_allocate(ip, offset, imap);
334 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
339 if (type == XFS_IO_UNWRITTEN) {
341 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
342 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
346 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
353 struct xfs_bmbt_irec *imap,
356 offset >>= inode->i_blkbits;
358 return offset >= imap->br_startoff &&
359 offset < imap->br_startoff + imap->br_blockcount;
363 * BIO completion handler for buffered IO.
369 xfs_ioend_t *ioend = bio->bi_private;
371 if (!ioend->io_error)
372 ioend->io_error = bio->bi_error;
374 /* Toss bio and pass work off to an xfsdatad thread */
375 bio->bi_private = NULL;
376 bio->bi_end_io = NULL;
379 xfs_finish_ioend(ioend);
383 xfs_submit_ioend_bio(
384 struct writeback_control *wbc,
388 atomic_inc(&ioend->io_remaining);
389 bio->bi_private = ioend;
390 bio->bi_end_io = xfs_end_bio;
391 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
396 struct buffer_head *bh)
398 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
400 ASSERT(bio->bi_private == NULL);
401 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
402 bio->bi_bdev = bh->b_bdev;
407 xfs_start_buffer_writeback(
408 struct buffer_head *bh)
410 ASSERT(buffer_mapped(bh));
411 ASSERT(buffer_locked(bh));
412 ASSERT(!buffer_delay(bh));
413 ASSERT(!buffer_unwritten(bh));
415 mark_buffer_async_write(bh);
416 set_buffer_uptodate(bh);
417 clear_buffer_dirty(bh);
421 xfs_start_page_writeback(
426 ASSERT(PageLocked(page));
427 ASSERT(!PageWriteback(page));
430 * if the page was not fully cleaned, we need to ensure that the higher
431 * layers come back to it correctly. That means we need to keep the page
432 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
433 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
434 * write this page in this writeback sweep will be made.
437 clear_page_dirty_for_io(page);
438 set_page_writeback(page);
440 set_page_writeback_keepwrite(page);
444 /* If no buffers on the page are to be written, finish it here */
446 end_page_writeback(page);
449 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
451 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
455 * Submit all of the bios for all of the ioends we have saved up, covering the
456 * initial writepage page and also any probed pages.
458 * Because we may have multiple ioends spanning a page, we need to start
459 * writeback on all the buffers before we submit them for I/O. If we mark the
460 * buffers as we got, then we can end up with a page that only has buffers
461 * marked async write and I/O complete on can occur before we mark the other
462 * buffers async write.
464 * The end result of this is that we trip a bug in end_page_writeback() because
465 * we call it twice for the one page as the code in end_buffer_async_write()
466 * assumes that all buffers on the page are started at the same time.
468 * The fix is two passes across the ioend list - one to start writeback on the
469 * buffer_heads, and then submit them for I/O on the second pass.
471 * If @fail is non-zero, it means that we have a situation where some part of
472 * the submission process has failed after we have marked paged for writeback
473 * and unlocked them. In this situation, we need to fail the ioend chain rather
474 * than submit it to IO. This typically only happens on a filesystem shutdown.
478 struct writeback_control *wbc,
482 xfs_ioend_t *head = ioend;
484 struct buffer_head *bh;
486 sector_t lastblock = 0;
488 /* Pass 1 - start writeback */
490 next = ioend->io_list;
491 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
492 xfs_start_buffer_writeback(bh);
493 } while ((ioend = next) != NULL);
495 /* Pass 2 - submit I/O */
498 next = ioend->io_list;
502 * If we are failing the IO now, just mark the ioend with an
503 * error and finish it. This will run IO completion immediately
504 * as there is only one reference to the ioend at this point in
508 ioend->io_error = fail;
509 xfs_finish_ioend(ioend);
513 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
517 bio = xfs_alloc_ioend_bio(bh);
518 } else if (bh->b_blocknr != lastblock + 1) {
519 xfs_submit_ioend_bio(wbc, ioend, bio);
523 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
524 xfs_submit_ioend_bio(wbc, ioend, bio);
528 lastblock = bh->b_blocknr;
531 xfs_submit_ioend_bio(wbc, ioend, bio);
532 xfs_finish_ioend(ioend);
533 } while ((ioend = next) != NULL);
537 * Cancel submission of all buffer_heads so far in this endio.
538 * Toss the endio too. Only ever called for the initial page
539 * in a writepage request, so only ever one page.
546 struct buffer_head *bh, *next_bh;
549 next = ioend->io_list;
550 bh = ioend->io_buffer_head;
552 next_bh = bh->b_private;
553 clear_buffer_async_write(bh);
555 * The unwritten flag is cleared when added to the
556 * ioend. We're not submitting for I/O so mark the
557 * buffer unwritten again for next time around.
559 if (ioend->io_type == XFS_IO_UNWRITTEN)
560 set_buffer_unwritten(bh);
562 } while ((bh = next_bh) != NULL);
564 mempool_free(ioend, xfs_ioend_pool);
565 } while ((ioend = next) != NULL);
569 * Test to see if we've been building up a completion structure for
570 * earlier buffers -- if so, we try to append to this ioend if we
571 * can, otherwise we finish off any current ioend and start another.
572 * Return true if we've finished the given ioend.
577 struct buffer_head *bh,
580 xfs_ioend_t **result,
583 xfs_ioend_t *ioend = *result;
585 if (!ioend || need_ioend || type != ioend->io_type) {
586 xfs_ioend_t *previous = *result;
588 ioend = xfs_alloc_ioend(inode, type);
589 ioend->io_offset = offset;
590 ioend->io_buffer_head = bh;
591 ioend->io_buffer_tail = bh;
593 previous->io_list = ioend;
596 ioend->io_buffer_tail->b_private = bh;
597 ioend->io_buffer_tail = bh;
600 bh->b_private = NULL;
601 ioend->io_size += bh->b_size;
607 struct buffer_head *bh,
608 struct xfs_bmbt_irec *imap,
612 struct xfs_mount *m = XFS_I(inode)->i_mount;
613 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
614 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
616 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
617 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
619 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
620 ((offset - iomap_offset) >> inode->i_blkbits);
622 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
625 set_buffer_mapped(bh);
631 struct buffer_head *bh,
632 struct xfs_bmbt_irec *imap,
635 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
636 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
638 xfs_map_buffer(inode, bh, imap, offset);
639 set_buffer_mapped(bh);
640 clear_buffer_delay(bh);
641 clear_buffer_unwritten(bh);
645 * Test if a given page contains at least one buffer of a given @type.
646 * If @check_all_buffers is true, then we walk all the buffers in the page to
647 * try to find one of the type passed in. If it is not set, then the caller only
648 * needs to check the first buffer on the page for a match.
654 bool check_all_buffers)
656 struct buffer_head *bh;
657 struct buffer_head *head;
659 if (PageWriteback(page))
663 if (!page_has_buffers(page))
666 bh = head = page_buffers(page);
668 if (buffer_unwritten(bh)) {
669 if (type == XFS_IO_UNWRITTEN)
671 } else if (buffer_delay(bh)) {
672 if (type == XFS_IO_DELALLOC)
674 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
675 if (type == XFS_IO_OVERWRITE)
679 /* If we are only checking the first buffer, we are done now. */
680 if (!check_all_buffers)
682 } while ((bh = bh->b_this_page) != head);
688 * Allocate & map buffers for page given the extent map. Write it out.
689 * except for the original page of a writepage, this is called on
690 * delalloc/unwritten pages only, for the original page it is possible
691 * that the page has no mapping at all.
698 struct xfs_bmbt_irec *imap,
699 xfs_ioend_t **ioendp,
700 struct writeback_control *wbc)
702 struct buffer_head *bh, *head;
703 xfs_off_t end_offset;
704 unsigned long p_offset;
707 int count = 0, done = 0, uptodate = 1;
708 xfs_off_t offset = page_offset(page);
710 if (page->index != tindex)
712 if (!trylock_page(page))
714 if (PageWriteback(page))
715 goto fail_unlock_page;
716 if (page->mapping != inode->i_mapping)
717 goto fail_unlock_page;
718 if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
719 goto fail_unlock_page;
722 * page_dirty is initially a count of buffers on the page before
723 * EOF and is decremented as we move each into a cleanable state.
727 * End offset is the highest offset that this page should represent.
728 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
729 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
730 * hence give us the correct page_dirty count. On any other page,
731 * it will be zero and in that case we need page_dirty to be the
732 * count of buffers on the page.
734 end_offset = min_t(unsigned long long,
735 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
739 * If the current map does not span the entire page we are about to try
740 * to write, then give up. The only way we can write a page that spans
741 * multiple mappings in a single writeback iteration is via the
742 * xfs_vm_writepage() function. Data integrity writeback requires the
743 * entire page to be written in a single attempt, otherwise the part of
744 * the page we don't write here doesn't get written as part of the data
747 * For normal writeback, we also don't attempt to write partial pages
748 * here as it simply means that write_cache_pages() will see it under
749 * writeback and ignore the page until some point in the future, at
750 * which time this will be the only page in the file that needs
751 * writeback. Hence for more optimal IO patterns, we should always
752 * avoid partial page writeback due to multiple mappings on a page here.
754 if (!xfs_imap_valid(inode, imap, end_offset))
755 goto fail_unlock_page;
757 len = 1 << inode->i_blkbits;
758 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
760 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
761 page_dirty = p_offset / len;
764 * The moment we find a buffer that doesn't match our current type
765 * specification or can't be written, abort the loop and start
766 * writeback. As per the above xfs_imap_valid() check, only
767 * xfs_vm_writepage() can handle partial page writeback fully - we are
768 * limited here to the buffers that are contiguous with the current
769 * ioend, and hence a buffer we can't write breaks that contiguity and
770 * we have to defer the rest of the IO to xfs_vm_writepage().
772 bh = head = page_buffers(page);
774 if (offset >= end_offset)
776 if (!buffer_uptodate(bh))
778 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
783 if (buffer_unwritten(bh) || buffer_delay(bh) ||
785 if (buffer_unwritten(bh))
786 type = XFS_IO_UNWRITTEN;
787 else if (buffer_delay(bh))
788 type = XFS_IO_DELALLOC;
790 type = XFS_IO_OVERWRITE;
793 * imap should always be valid because of the above
794 * partial page end_offset check on the imap.
796 ASSERT(xfs_imap_valid(inode, imap, offset));
799 if (type != XFS_IO_OVERWRITE)
800 xfs_map_at_offset(inode, bh, imap, offset);
801 xfs_add_to_ioend(inode, bh, offset, type,
810 } while (offset += len, (bh = bh->b_this_page) != head);
812 if (uptodate && bh == head)
813 SetPageUptodate(page);
816 if (--wbc->nr_to_write <= 0 &&
817 wbc->sync_mode == WB_SYNC_NONE)
820 xfs_start_page_writeback(page, !page_dirty, count);
830 * Convert & write out a cluster of pages in the same extent as defined
831 * by mp and following the start page.
837 struct xfs_bmbt_irec *imap,
838 xfs_ioend_t **ioendp,
839 struct writeback_control *wbc,
845 pagevec_init(&pvec, 0);
846 while (!done && tindex <= tlast) {
847 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
849 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
852 for (i = 0; i < pagevec_count(&pvec); i++) {
853 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
859 pagevec_release(&pvec);
865 xfs_vm_invalidatepage(
870 trace_xfs_invalidatepage(page->mapping->host, page, offset,
872 block_invalidatepage(page, offset, length);
876 * If the page has delalloc buffers on it, we need to punch them out before we
877 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
878 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
879 * is done on that same region - the delalloc extent is returned when none is
880 * supposed to be there.
882 * We prevent this by truncating away the delalloc regions on the page before
883 * invalidating it. Because they are delalloc, we can do this without needing a
884 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
885 * truncation without a transaction as there is no space left for block
886 * reservation (typically why we see a ENOSPC in writeback).
888 * This is not a performance critical path, so for now just do the punching a
889 * buffer head at a time.
892 xfs_aops_discard_page(
895 struct inode *inode = page->mapping->host;
896 struct xfs_inode *ip = XFS_I(inode);
897 struct buffer_head *bh, *head;
898 loff_t offset = page_offset(page);
900 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
903 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
906 xfs_alert(ip->i_mount,
907 "page discard on page %p, inode 0x%llx, offset %llu.",
908 page, ip->i_ino, offset);
910 xfs_ilock(ip, XFS_ILOCK_EXCL);
911 bh = head = page_buffers(page);
914 xfs_fileoff_t start_fsb;
916 if (!buffer_delay(bh))
919 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
920 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
922 /* something screwed, just bail */
923 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
924 xfs_alert(ip->i_mount,
925 "page discard unable to remove delalloc mapping.");
930 offset += 1 << inode->i_blkbits;
932 } while ((bh = bh->b_this_page) != head);
934 xfs_iunlock(ip, XFS_ILOCK_EXCL);
936 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
941 * Write out a dirty page.
943 * For delalloc space on the page we need to allocate space and flush it.
944 * For unwritten space on the page we need to start the conversion to
945 * regular allocated space.
946 * For any other dirty buffer heads on the page we should flush them.
951 struct writeback_control *wbc)
953 struct inode *inode = page->mapping->host;
954 struct buffer_head *bh, *head;
955 struct xfs_bmbt_irec imap;
956 xfs_ioend_t *ioend = NULL, *iohead = NULL;
959 __uint64_t end_offset;
960 pgoff_t end_index, last_index;
962 int err, imap_valid = 0, uptodate = 1;
966 trace_xfs_writepage(inode, page, 0, 0);
968 ASSERT(page_has_buffers(page));
971 * Refuse to write the page out if we are called from reclaim context.
973 * This avoids stack overflows when called from deeply used stacks in
974 * random callers for direct reclaim or memcg reclaim. We explicitly
975 * allow reclaim from kswapd as the stack usage there is relatively low.
977 * This should never happen except in the case of a VM regression so
980 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
985 * Given that we do not allow direct reclaim to call us, we should
986 * never be called while in a filesystem transaction.
988 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
991 /* Is this page beyond the end of the file? */
992 offset = i_size_read(inode);
993 end_index = offset >> PAGE_CACHE_SHIFT;
994 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
997 * The page index is less than the end_index, adjust the end_offset
998 * to the highest offset that this page should represent.
999 * -----------------------------------------------------
1000 * | file mapping | <EOF> |
1001 * -----------------------------------------------------
1002 * | Page ... | Page N-2 | Page N-1 | Page N | |
1003 * ^--------------------------------^----------|--------
1004 * | desired writeback range | see else |
1005 * ---------------------------------^------------------|
1007 if (page->index < end_index)
1008 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
1011 * Check whether the page to write out is beyond or straddles
1013 * -------------------------------------------------------
1014 * | file mapping | <EOF> |
1015 * -------------------------------------------------------
1016 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1017 * ^--------------------------------^-----------|---------
1019 * ---------------------------------^-----------|--------|
1021 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
1024 * Skip the page if it is fully outside i_size, e.g. due to a
1025 * truncate operation that is in progress. We must redirty the
1026 * page so that reclaim stops reclaiming it. Otherwise
1027 * xfs_vm_releasepage() is called on it and gets confused.
1029 * Note that the end_index is unsigned long, it would overflow
1030 * if the given offset is greater than 16TB on 32-bit system
1031 * and if we do check the page is fully outside i_size or not
1032 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1033 * will be evaluated to 0. Hence this page will be redirtied
1034 * and be written out repeatedly which would result in an
1035 * infinite loop, the user program that perform this operation
1036 * will hang. Instead, we can verify this situation by checking
1037 * if the page to write is totally beyond the i_size or if it's
1038 * offset is just equal to the EOF.
1040 if (page->index > end_index ||
1041 (page->index == end_index && offset_into_page == 0))
1045 * The page straddles i_size. It must be zeroed out on each
1046 * and every writepage invocation because it may be mmapped.
1047 * "A file is mapped in multiples of the page size. For a file
1048 * that is not a multiple of the page size, the remaining
1049 * memory is zeroed when mapped, and writes to that region are
1050 * not written out to the file."
1052 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1054 /* Adjust the end_offset to the end of file */
1055 end_offset = offset;
1058 len = 1 << inode->i_blkbits;
1060 bh = head = page_buffers(page);
1061 offset = page_offset(page);
1062 type = XFS_IO_OVERWRITE;
1064 if (wbc->sync_mode == WB_SYNC_NONE)
1070 if (offset >= end_offset)
1072 if (!buffer_uptodate(bh))
1076 * set_page_dirty dirties all buffers in a page, independent
1077 * of their state. The dirty state however is entirely
1078 * meaningless for holes (!mapped && uptodate), so skip
1079 * buffers covering holes here.
1081 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1086 if (buffer_unwritten(bh)) {
1087 if (type != XFS_IO_UNWRITTEN) {
1088 type = XFS_IO_UNWRITTEN;
1091 } else if (buffer_delay(bh)) {
1092 if (type != XFS_IO_DELALLOC) {
1093 type = XFS_IO_DELALLOC;
1096 } else if (buffer_uptodate(bh)) {
1097 if (type != XFS_IO_OVERWRITE) {
1098 type = XFS_IO_OVERWRITE;
1102 if (PageUptodate(page))
1103 ASSERT(buffer_mapped(bh));
1105 * This buffer is not uptodate and will not be
1106 * written to disk. Ensure that we will put any
1107 * subsequent writeable buffers into a new
1115 imap_valid = xfs_imap_valid(inode, &imap, offset);
1118 * If we didn't have a valid mapping then we need to
1119 * put the new mapping into a separate ioend structure.
1120 * This ensures non-contiguous extents always have
1121 * separate ioends, which is particularly important
1122 * for unwritten extent conversion at I/O completion
1126 err = xfs_map_blocks(inode, offset, &imap, type,
1130 imap_valid = xfs_imap_valid(inode, &imap, offset);
1134 if (type != XFS_IO_OVERWRITE)
1135 xfs_map_at_offset(inode, bh, &imap, offset);
1136 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1144 } while (offset += len, ((bh = bh->b_this_page) != head));
1146 if (uptodate && bh == head)
1147 SetPageUptodate(page);
1149 xfs_start_page_writeback(page, 1, count);
1151 /* if there is no IO to be submitted for this page, we are done */
1158 * Any errors from this point onwards need tobe reported through the IO
1159 * completion path as we have marked the initial page as under writeback
1163 xfs_off_t end_index;
1165 end_index = imap.br_startoff + imap.br_blockcount;
1168 end_index <<= inode->i_blkbits;
1171 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1173 /* check against file size */
1174 if (end_index > last_index)
1175 end_index = last_index;
1177 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1183 * Reserve log space if we might write beyond the on-disk inode size.
1186 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1187 err = xfs_setfilesize_trans_alloc(ioend);
1189 xfs_submit_ioend(wbc, iohead, err);
1195 xfs_cancel_ioend(iohead);
1200 xfs_aops_discard_page(page);
1201 ClearPageUptodate(page);
1206 redirty_page_for_writepage(wbc, page);
1213 struct address_space *mapping,
1214 struct writeback_control *wbc)
1216 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1217 return generic_writepages(mapping, wbc);
1221 * Called to move a page into cleanable state - and from there
1222 * to be released. The page should already be clean. We always
1223 * have buffer heads in this call.
1225 * Returns 1 if the page is ok to release, 0 otherwise.
1232 int delalloc, unwritten;
1234 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1236 xfs_count_page_state(page, &delalloc, &unwritten);
1238 if (WARN_ON_ONCE(delalloc))
1240 if (WARN_ON_ONCE(unwritten))
1243 return try_to_free_buffers(page);
1247 * When we map a DIO buffer, we may need to pass flags to
1248 * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
1250 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1251 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1252 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1253 * extending the file size. We won't know for sure until IO completion is run
1254 * and the actual max write offset is communicated to the IO completion
1259 struct inode *inode,
1260 struct buffer_head *bh_result,
1261 struct xfs_bmbt_irec *imap,
1264 uintptr_t *flags = (uintptr_t *)&bh_result->b_private;
1265 xfs_off_t size = bh_result->b_size;
1267 trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
1268 ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
1270 if (ISUNWRITTEN(imap)) {
1271 *flags |= XFS_DIO_FLAG_UNWRITTEN;
1272 set_buffer_defer_completion(bh_result);
1273 } else if (offset + size > i_size_read(inode) || offset + size < 0) {
1274 *flags |= XFS_DIO_FLAG_APPEND;
1275 set_buffer_defer_completion(bh_result);
1280 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1281 * is, so that we can avoid repeated get_blocks calls.
1283 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1284 * for blocks beyond EOF must be marked new so that sub block regions can be
1285 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1286 * was just allocated or is unwritten, otherwise the callers would overwrite
1287 * existing data with zeros. Hence we have to split the mapping into a range up
1288 * to and including EOF, and a second mapping for beyond EOF.
1292 struct inode *inode,
1294 struct buffer_head *bh_result,
1295 struct xfs_bmbt_irec *imap,
1299 xfs_off_t mapping_size;
1301 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1302 mapping_size <<= inode->i_blkbits;
1304 ASSERT(mapping_size > 0);
1305 if (mapping_size > size)
1306 mapping_size = size;
1307 if (offset < i_size_read(inode) &&
1308 offset + mapping_size >= i_size_read(inode)) {
1309 /* limit mapping to block that spans EOF */
1310 mapping_size = roundup_64(i_size_read(inode) - offset,
1311 1 << inode->i_blkbits);
1313 if (mapping_size > LONG_MAX)
1314 mapping_size = LONG_MAX;
1316 bh_result->b_size = mapping_size;
1321 struct inode *inode,
1323 struct buffer_head *bh_result,
1328 struct xfs_inode *ip = XFS_I(inode);
1329 struct xfs_mount *mp = ip->i_mount;
1330 xfs_fileoff_t offset_fsb, end_fsb;
1333 struct xfs_bmbt_irec imap;
1339 if (XFS_FORCED_SHUTDOWN(mp))
1342 offset = (xfs_off_t)iblock << inode->i_blkbits;
1343 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1344 size = bh_result->b_size;
1346 if (!create && direct && offset >= i_size_read(inode))
1350 * Direct I/O is usually done on preallocated files, so try getting
1351 * a block mapping without an exclusive lock first. For buffered
1352 * writes we already have the exclusive iolock anyway, so avoiding
1353 * a lock roundtrip here by taking the ilock exclusive from the
1354 * beginning is a useful micro optimization.
1356 if (create && !direct) {
1357 lockmode = XFS_ILOCK_EXCL;
1358 xfs_ilock(ip, lockmode);
1360 lockmode = xfs_ilock_data_map_shared(ip);
1363 ASSERT(offset <= mp->m_super->s_maxbytes);
1364 if (offset + size > mp->m_super->s_maxbytes)
1365 size = mp->m_super->s_maxbytes - offset;
1366 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1367 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1369 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1370 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1374 /* for DAX, we convert unwritten extents directly */
1377 (imap.br_startblock == HOLESTARTBLOCK ||
1378 imap.br_startblock == DELAYSTARTBLOCK) ||
1379 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
1380 if (direct || xfs_get_extsz_hint(ip)) {
1382 * xfs_iomap_write_direct() expects the shared lock. It
1383 * is unlocked on return.
1385 if (lockmode == XFS_ILOCK_EXCL)
1386 xfs_ilock_demote(ip, lockmode);
1388 error = xfs_iomap_write_direct(ip, offset, size,
1396 * Delalloc reservations do not require a transaction,
1397 * we can go on without dropping the lock here. If we
1398 * are allocating a new delalloc block, make sure that
1399 * we set the new flag so that we mark the buffer new so
1400 * that we know that it is newly allocated if the write
1403 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1405 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1409 xfs_iunlock(ip, lockmode);
1411 trace_xfs_get_blocks_alloc(ip, offset, size,
1412 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1413 : XFS_IO_DELALLOC, &imap);
1414 } else if (nimaps) {
1415 trace_xfs_get_blocks_found(ip, offset, size,
1416 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1417 : XFS_IO_OVERWRITE, &imap);
1418 xfs_iunlock(ip, lockmode);
1420 trace_xfs_get_blocks_notfound(ip, offset, size);
1424 if (IS_DAX(inode) && create) {
1425 ASSERT(!ISUNWRITTEN(&imap));
1426 /* zeroing is not needed at a higher layer */
1430 /* trim mapping down to size requested */
1431 if (direct || size > (1 << inode->i_blkbits))
1432 xfs_map_trim_size(inode, iblock, bh_result,
1433 &imap, offset, size);
1436 * For unwritten extents do not report a disk address in the buffered
1437 * read case (treat as if we're reading into a hole).
1439 if (imap.br_startblock != HOLESTARTBLOCK &&
1440 imap.br_startblock != DELAYSTARTBLOCK &&
1441 (create || !ISUNWRITTEN(&imap))) {
1442 xfs_map_buffer(inode, bh_result, &imap, offset);
1443 if (ISUNWRITTEN(&imap))
1444 set_buffer_unwritten(bh_result);
1445 /* direct IO needs special help */
1446 if (create && direct) {
1448 ASSERT(!ISUNWRITTEN(&imap));
1450 xfs_map_direct(inode, bh_result, &imap, offset);
1455 * If this is a realtime file, data may be on a different device.
1456 * to that pointed to from the buffer_head b_bdev currently.
1458 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1461 * If we previously allocated a block out beyond eof and we are now
1462 * coming back to use it then we will need to flag it as new even if it
1463 * has a disk address.
1465 * With sub-block writes into unwritten extents we also need to mark
1466 * the buffer as new so that the unwritten parts of the buffer gets
1470 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1471 (offset >= i_size_read(inode)) ||
1472 (new || ISUNWRITTEN(&imap))))
1473 set_buffer_new(bh_result);
1475 if (imap.br_startblock == DELAYSTARTBLOCK) {
1478 set_buffer_uptodate(bh_result);
1479 set_buffer_mapped(bh_result);
1480 set_buffer_delay(bh_result);
1487 xfs_iunlock(ip, lockmode);
1493 struct inode *inode,
1495 struct buffer_head *bh_result,
1498 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
1502 xfs_get_blocks_direct(
1503 struct inode *inode,
1505 struct buffer_head *bh_result,
1508 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1512 xfs_get_blocks_dax_fault(
1513 struct inode *inode,
1515 struct buffer_head *bh_result,
1518 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
1522 * Complete a direct I/O write request.
1524 * xfs_map_direct passes us some flags in the private data to tell us what to
1525 * do. If no flags are set, then the write IO is an overwrite wholly within
1526 * the existing allocated file size and so there is nothing for us to do.
1528 * Note that in this case the completion can be called in interrupt context,
1529 * whereas if we have flags set we will always be called in task context
1530 * (i.e. from a workqueue).
1533 xfs_end_io_direct_write(
1539 struct inode *inode = file_inode(iocb->ki_filp);
1540 struct xfs_inode *ip = XFS_I(inode);
1541 struct xfs_mount *mp = ip->i_mount;
1542 uintptr_t flags = (uintptr_t)private;
1545 trace_xfs_end_io_direct_write(ip, offset, size);
1547 if (XFS_FORCED_SHUTDOWN(mp))
1554 * The flags tell us whether we are doing unwritten extent conversions
1555 * or an append transaction that updates the on-disk file size. These
1556 * cases are the only cases where we should *potentially* be needing
1557 * to update the VFS inode size.
1560 ASSERT(offset + size <= i_size_read(inode));
1565 * We need to update the in-core inode size here so that we don't end up
1566 * with the on-disk inode size being outside the in-core inode size. We
1567 * have no other method of updating EOF for AIO, so always do it here
1570 * We need to lock the test/set EOF update as we can be racing with
1571 * other IO completions here to update the EOF. Failing to serialise
1572 * here can result in EOF moving backwards and Bad Things Happen when
1575 spin_lock(&ip->i_flags_lock);
1576 if (offset + size > i_size_read(inode))
1577 i_size_write(inode, offset + size);
1578 spin_unlock(&ip->i_flags_lock);
1580 if (flags & XFS_DIO_FLAG_UNWRITTEN) {
1581 trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
1583 error = xfs_iomap_write_unwritten(ip, offset, size);
1584 } else if (flags & XFS_DIO_FLAG_APPEND) {
1585 struct xfs_trans *tp;
1587 trace_xfs_end_io_direct_write_append(ip, offset, size);
1589 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
1590 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
1592 xfs_trans_cancel(tp);
1595 error = xfs_setfilesize(ip, tp, offset, size);
1604 struct iov_iter *iter,
1607 struct inode *inode = iocb->ki_filp->f_mapping->host;
1608 dio_iodone_t *endio = NULL;
1610 struct block_device *bdev;
1612 if (iov_iter_rw(iter) == WRITE) {
1613 endio = xfs_end_io_direct_write;
1614 flags = DIO_ASYNC_EXTEND;
1617 if (IS_DAX(inode)) {
1618 return dax_do_io(iocb, inode, iter, offset,
1619 xfs_get_blocks_direct, endio, 0);
1622 bdev = xfs_find_bdev_for_inode(inode);
1623 return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
1624 xfs_get_blocks_direct, endio, NULL, flags);
1628 * Punch out the delalloc blocks we have already allocated.
1630 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1631 * as the page is still locked at this point.
1634 xfs_vm_kill_delalloc_range(
1635 struct inode *inode,
1639 struct xfs_inode *ip = XFS_I(inode);
1640 xfs_fileoff_t start_fsb;
1641 xfs_fileoff_t end_fsb;
1644 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1645 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1646 if (end_fsb <= start_fsb)
1649 xfs_ilock(ip, XFS_ILOCK_EXCL);
1650 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1651 end_fsb - start_fsb);
1653 /* something screwed, just bail */
1654 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1655 xfs_alert(ip->i_mount,
1656 "xfs_vm_write_failed: unable to clean up ino %lld",
1660 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1664 xfs_vm_write_failed(
1665 struct inode *inode,
1670 loff_t block_offset;
1673 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1674 loff_t to = from + len;
1675 struct buffer_head *bh, *head;
1678 * The request pos offset might be 32 or 64 bit, this is all fine
1679 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1680 * platform, the high 32-bit will be masked off if we evaluate the
1681 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1682 * 0xfffff000 as an unsigned long, hence the result is incorrect
1683 * which could cause the following ASSERT failed in most cases.
1684 * In order to avoid this, we can evaluate the block_offset of the
1685 * start of the page by using shifts rather than masks the mismatch
1688 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1690 ASSERT(block_offset + from == pos);
1692 head = page_buffers(page);
1694 for (bh = head; bh != head || !block_start;
1695 bh = bh->b_this_page, block_start = block_end,
1696 block_offset += bh->b_size) {
1697 block_end = block_start + bh->b_size;
1699 /* skip buffers before the write */
1700 if (block_end <= from)
1703 /* if the buffer is after the write, we're done */
1704 if (block_start >= to)
1708 * Process delalloc and unwritten buffers beyond EOF. We can
1709 * encounter unwritten buffers in the event that a file has
1710 * post-EOF unwritten extents and an extending write happens to
1711 * fail (e.g., an unaligned write that also involves a delalloc
1712 * to the same page).
1714 if (!buffer_delay(bh) && !buffer_unwritten(bh))
1717 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1720 if (buffer_delay(bh))
1721 xfs_vm_kill_delalloc_range(inode, block_offset,
1722 block_offset + bh->b_size);
1725 * This buffer does not contain data anymore. make sure anyone
1726 * who finds it knows that for certain.
1728 clear_buffer_delay(bh);
1729 clear_buffer_uptodate(bh);
1730 clear_buffer_mapped(bh);
1731 clear_buffer_new(bh);
1732 clear_buffer_dirty(bh);
1733 clear_buffer_unwritten(bh);
1739 * This used to call block_write_begin(), but it unlocks and releases the page
1740 * on error, and we need that page to be able to punch stale delalloc blocks out
1741 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1742 * the appropriate point.
1747 struct address_space *mapping,
1751 struct page **pagep,
1754 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1758 ASSERT(len <= PAGE_CACHE_SIZE);
1760 page = grab_cache_page_write_begin(mapping, index, flags);
1764 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1765 if (unlikely(status)) {
1766 struct inode *inode = mapping->host;
1767 size_t isize = i_size_read(inode);
1769 xfs_vm_write_failed(inode, page, pos, len);
1773 * If the write is beyond EOF, we only want to kill blocks
1774 * allocated in this write, not blocks that were previously
1775 * written successfully.
1777 if (pos + len > isize) {
1778 ssize_t start = max_t(ssize_t, pos, isize);
1780 truncate_pagecache_range(inode, start, pos + len);
1783 page_cache_release(page);
1792 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1793 * this specific write because they will never be written. Previous writes
1794 * beyond EOF where block allocation succeeded do not need to be trashed, so
1795 * only new blocks from this write should be trashed. For blocks within
1796 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1797 * written with all the other valid data.
1802 struct address_space *mapping,
1811 ASSERT(len <= PAGE_CACHE_SIZE);
1813 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1814 if (unlikely(ret < len)) {
1815 struct inode *inode = mapping->host;
1816 size_t isize = i_size_read(inode);
1817 loff_t to = pos + len;
1820 /* only kill blocks in this write beyond EOF */
1823 xfs_vm_kill_delalloc_range(inode, isize, to);
1824 truncate_pagecache_range(inode, isize, to);
1832 struct address_space *mapping,
1835 struct inode *inode = (struct inode *)mapping->host;
1836 struct xfs_inode *ip = XFS_I(inode);
1838 trace_xfs_vm_bmap(XFS_I(inode));
1839 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1840 filemap_write_and_wait(mapping);
1841 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1842 return generic_block_bmap(mapping, block, xfs_get_blocks);
1847 struct file *unused,
1850 trace_xfs_vm_readpage(page->mapping->host, 1);
1851 return mpage_readpage(page, xfs_get_blocks);
1856 struct file *unused,
1857 struct address_space *mapping,
1858 struct list_head *pages,
1861 trace_xfs_vm_readpages(mapping->host, nr_pages);
1862 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1866 * This is basically a copy of __set_page_dirty_buffers() with one
1867 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1868 * dirty, we'll never be able to clean them because we don't write buffers
1869 * beyond EOF, and that means we can't invalidate pages that span EOF
1870 * that have been marked dirty. Further, the dirty state can leak into
1871 * the file interior if the file is extended, resulting in all sorts of
1872 * bad things happening as the state does not match the underlying data.
1874 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1875 * this only exist because of bufferheads and how the generic code manages them.
1878 xfs_vm_set_page_dirty(
1881 struct address_space *mapping = page->mapping;
1882 struct inode *inode = mapping->host;
1886 struct mem_cgroup *memcg;
1888 if (unlikely(!mapping))
1889 return !TestSetPageDirty(page);
1891 end_offset = i_size_read(inode);
1892 offset = page_offset(page);
1894 spin_lock(&mapping->private_lock);
1895 if (page_has_buffers(page)) {
1896 struct buffer_head *head = page_buffers(page);
1897 struct buffer_head *bh = head;
1900 if (offset < end_offset)
1901 set_buffer_dirty(bh);
1902 bh = bh->b_this_page;
1903 offset += 1 << inode->i_blkbits;
1904 } while (bh != head);
1907 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
1908 * per-memcg dirty page counters.
1910 memcg = mem_cgroup_begin_page_stat(page);
1911 newly_dirty = !TestSetPageDirty(page);
1912 spin_unlock(&mapping->private_lock);
1915 /* sigh - __set_page_dirty() is static, so copy it here, too */
1916 unsigned long flags;
1918 spin_lock_irqsave(&mapping->tree_lock, flags);
1919 if (page->mapping) { /* Race with truncate? */
1920 WARN_ON_ONCE(!PageUptodate(page));
1921 account_page_dirtied(page, mapping, memcg);
1922 radix_tree_tag_set(&mapping->page_tree,
1923 page_index(page), PAGECACHE_TAG_DIRTY);
1925 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1927 mem_cgroup_end_page_stat(memcg);
1929 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1933 const struct address_space_operations xfs_address_space_operations = {
1934 .readpage = xfs_vm_readpage,
1935 .readpages = xfs_vm_readpages,
1936 .writepage = xfs_vm_writepage,
1937 .writepages = xfs_vm_writepages,
1938 .set_page_dirty = xfs_vm_set_page_dirty,
1939 .releasepage = xfs_vm_releasepage,
1940 .invalidatepage = xfs_vm_invalidatepage,
1941 .write_begin = xfs_vm_write_begin,
1942 .write_end = xfs_vm_write_end,
1943 .bmap = xfs_vm_bmap,
1944 .direct_IO = xfs_vm_direct_IO,
1945 .migratepage = buffer_migrate_page,
1946 .is_partially_uptodate = block_is_partially_uptodate,
1947 .error_remove_page = generic_error_remove_page,