2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
45 struct buffer_head *bh, *head;
47 *delalloc = *unwritten = 0;
49 bh = head = page_buffers(page);
51 if (buffer_unwritten(bh))
53 else if (buffer_delay(bh))
55 } while ((bh = bh->b_this_page) != head);
58 STATIC struct block_device *
59 xfs_find_bdev_for_inode(
62 struct xfs_inode *ip = XFS_I(inode);
63 struct xfs_mount *mp = ip->i_mount;
65 if (XFS_IS_REALTIME_INODE(ip))
66 return mp->m_rtdev_targp->bt_bdev;
68 return mp->m_ddev_targp->bt_bdev;
72 * We're now finished for good with this ioend structure.
73 * Update the page state via the associated buffer_heads,
74 * release holds on the inode and bio, and finally free
75 * up memory. Do not use the ioend after this.
81 struct buffer_head *bh, *next;
83 for (bh = ioend->io_buffer_head; bh; bh = next) {
85 bh->b_end_io(bh, !ioend->io_error);
88 mempool_free(ioend, xfs_ioend_pool);
92 * Fast and loose check if this write could update the on-disk inode size.
94 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
96 return ioend->io_offset + ioend->io_size >
97 XFS_I(ioend->io_inode)->i_d.di_size;
101 xfs_setfilesize_trans_alloc(
102 struct xfs_ioend *ioend)
104 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
105 struct xfs_trans *tp;
108 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
110 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
112 xfs_trans_cancel(tp);
116 ioend->io_append_trans = tp;
119 * We may pass freeze protection with a transaction. So tell lockdep
122 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
124 * We hand off the transaction to the completion thread now, so
125 * clear the flag here.
127 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
132 * Update on-disk file size now that data has been written to disk.
136 struct xfs_inode *ip,
137 struct xfs_trans *tp,
143 xfs_ilock(ip, XFS_ILOCK_EXCL);
144 isize = xfs_new_eof(ip, offset + size);
146 xfs_iunlock(ip, XFS_ILOCK_EXCL);
147 xfs_trans_cancel(tp);
151 trace_xfs_setfilesize(ip, offset, size);
153 ip->i_d.di_size = isize;
154 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
155 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
157 return xfs_trans_commit(tp);
161 xfs_setfilesize_ioend(
162 struct xfs_ioend *ioend)
164 struct xfs_inode *ip = XFS_I(ioend->io_inode);
165 struct xfs_trans *tp = ioend->io_append_trans;
168 * The transaction may have been allocated in the I/O submission thread,
169 * thus we need to mark ourselves as being in a transaction manually.
170 * Similarly for freeze protection.
172 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
173 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
175 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
179 * Schedule IO completion handling on the final put of an ioend.
181 * If there is no work to do we might as well call it a day and free the
186 struct xfs_ioend *ioend)
188 if (atomic_dec_and_test(&ioend->io_remaining)) {
189 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
191 if (ioend->io_type == XFS_IO_UNWRITTEN)
192 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
193 else if (ioend->io_append_trans)
194 queue_work(mp->m_data_workqueue, &ioend->io_work);
196 xfs_destroy_ioend(ioend);
201 * IO write completion.
205 struct work_struct *work)
207 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
208 struct xfs_inode *ip = XFS_I(ioend->io_inode);
211 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
212 ioend->io_error = -EIO;
219 * For unwritten extents we need to issue transactions to convert a
220 * range to normal written extens after the data I/O has finished.
222 if (ioend->io_type == XFS_IO_UNWRITTEN) {
223 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
225 } else if (ioend->io_append_trans) {
226 error = xfs_setfilesize_ioend(ioend);
228 ASSERT(!xfs_ioend_is_append(ioend));
233 ioend->io_error = error;
234 xfs_destroy_ioend(ioend);
238 * Allocate and initialise an IO completion structure.
239 * We need to track unwritten extent write completion here initially.
240 * We'll need to extend this for updating the ondisk inode size later
250 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
253 * Set the count to 1 initially, which will prevent an I/O
254 * completion callback from happening before we have started
255 * all the I/O from calling the completion routine too early.
257 atomic_set(&ioend->io_remaining, 1);
259 ioend->io_list = NULL;
260 ioend->io_type = type;
261 ioend->io_inode = inode;
262 ioend->io_buffer_head = NULL;
263 ioend->io_buffer_tail = NULL;
264 ioend->io_offset = 0;
266 ioend->io_append_trans = NULL;
268 INIT_WORK(&ioend->io_work, xfs_end_io);
276 struct xfs_bmbt_irec *imap,
280 struct xfs_inode *ip = XFS_I(inode);
281 struct xfs_mount *mp = ip->i_mount;
282 ssize_t count = 1 << inode->i_blkbits;
283 xfs_fileoff_t offset_fsb, end_fsb;
285 int bmapi_flags = XFS_BMAPI_ENTIRE;
288 if (XFS_FORCED_SHUTDOWN(mp))
291 if (type == XFS_IO_UNWRITTEN)
292 bmapi_flags |= XFS_BMAPI_IGSTATE;
294 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
297 xfs_ilock(ip, XFS_ILOCK_SHARED);
300 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
301 (ip->i_df.if_flags & XFS_IFEXTENTS));
302 ASSERT(offset <= mp->m_super->s_maxbytes);
304 if (offset + count > mp->m_super->s_maxbytes)
305 count = mp->m_super->s_maxbytes - offset;
306 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
307 offset_fsb = XFS_B_TO_FSBT(mp, offset);
308 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
309 imap, &nimaps, bmapi_flags);
310 xfs_iunlock(ip, XFS_ILOCK_SHARED);
315 if (type == XFS_IO_DELALLOC &&
316 (!nimaps || isnullstartblock(imap->br_startblock))) {
317 error = xfs_iomap_write_allocate(ip, offset, imap);
319 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
324 if (type == XFS_IO_UNWRITTEN) {
326 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
327 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
331 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
338 struct xfs_bmbt_irec *imap,
341 offset >>= inode->i_blkbits;
343 return offset >= imap->br_startoff &&
344 offset < imap->br_startoff + imap->br_blockcount;
348 * BIO completion handler for buffered IO.
354 xfs_ioend_t *ioend = bio->bi_private;
356 ioend->io_error = bio->bi_error;
358 /* Toss bio and pass work off to an xfsdatad thread */
359 bio->bi_private = NULL;
360 bio->bi_end_io = NULL;
363 xfs_finish_ioend(ioend);
367 xfs_submit_ioend_bio(
368 struct writeback_control *wbc,
372 atomic_inc(&ioend->io_remaining);
373 bio->bi_private = ioend;
374 bio->bi_end_io = xfs_end_bio;
375 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
380 struct buffer_head *bh)
382 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
384 ASSERT(bio->bi_private == NULL);
385 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
386 bio->bi_bdev = bh->b_bdev;
391 xfs_start_buffer_writeback(
392 struct buffer_head *bh)
394 ASSERT(buffer_mapped(bh));
395 ASSERT(buffer_locked(bh));
396 ASSERT(!buffer_delay(bh));
397 ASSERT(!buffer_unwritten(bh));
399 mark_buffer_async_write(bh);
400 set_buffer_uptodate(bh);
401 clear_buffer_dirty(bh);
405 xfs_start_page_writeback(
410 ASSERT(PageLocked(page));
411 ASSERT(!PageWriteback(page));
414 * if the page was not fully cleaned, we need to ensure that the higher
415 * layers come back to it correctly. That means we need to keep the page
416 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
417 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
418 * write this page in this writeback sweep will be made.
421 clear_page_dirty_for_io(page);
422 set_page_writeback(page);
424 set_page_writeback_keepwrite(page);
428 /* If no buffers on the page are to be written, finish it here */
430 end_page_writeback(page);
433 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
435 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
439 * Submit all of the bios for all of the ioends we have saved up, covering the
440 * initial writepage page and also any probed pages.
442 * Because we may have multiple ioends spanning a page, we need to start
443 * writeback on all the buffers before we submit them for I/O. If we mark the
444 * buffers as we got, then we can end up with a page that only has buffers
445 * marked async write and I/O complete on can occur before we mark the other
446 * buffers async write.
448 * The end result of this is that we trip a bug in end_page_writeback() because
449 * we call it twice for the one page as the code in end_buffer_async_write()
450 * assumes that all buffers on the page are started at the same time.
452 * The fix is two passes across the ioend list - one to start writeback on the
453 * buffer_heads, and then submit them for I/O on the second pass.
455 * If @fail is non-zero, it means that we have a situation where some part of
456 * the submission process has failed after we have marked paged for writeback
457 * and unlocked them. In this situation, we need to fail the ioend chain rather
458 * than submit it to IO. This typically only happens on a filesystem shutdown.
462 struct writeback_control *wbc,
466 xfs_ioend_t *head = ioend;
468 struct buffer_head *bh;
470 sector_t lastblock = 0;
472 /* Pass 1 - start writeback */
474 next = ioend->io_list;
475 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
476 xfs_start_buffer_writeback(bh);
477 } while ((ioend = next) != NULL);
479 /* Pass 2 - submit I/O */
482 next = ioend->io_list;
486 * If we are failing the IO now, just mark the ioend with an
487 * error and finish it. This will run IO completion immediately
488 * as there is only one reference to the ioend at this point in
492 ioend->io_error = fail;
493 xfs_finish_ioend(ioend);
497 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
501 bio = xfs_alloc_ioend_bio(bh);
502 } else if (bh->b_blocknr != lastblock + 1) {
503 xfs_submit_ioend_bio(wbc, ioend, bio);
507 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
508 xfs_submit_ioend_bio(wbc, ioend, bio);
512 lastblock = bh->b_blocknr;
515 xfs_submit_ioend_bio(wbc, ioend, bio);
516 xfs_finish_ioend(ioend);
517 } while ((ioend = next) != NULL);
521 * Cancel submission of all buffer_heads so far in this endio.
522 * Toss the endio too. Only ever called for the initial page
523 * in a writepage request, so only ever one page.
530 struct buffer_head *bh, *next_bh;
533 next = ioend->io_list;
534 bh = ioend->io_buffer_head;
536 next_bh = bh->b_private;
537 clear_buffer_async_write(bh);
539 * The unwritten flag is cleared when added to the
540 * ioend. We're not submitting for I/O so mark the
541 * buffer unwritten again for next time around.
543 if (ioend->io_type == XFS_IO_UNWRITTEN)
544 set_buffer_unwritten(bh);
546 } while ((bh = next_bh) != NULL);
548 mempool_free(ioend, xfs_ioend_pool);
549 } while ((ioend = next) != NULL);
553 * Test to see if we've been building up a completion structure for
554 * earlier buffers -- if so, we try to append to this ioend if we
555 * can, otherwise we finish off any current ioend and start another.
556 * Return true if we've finished the given ioend.
561 struct buffer_head *bh,
564 xfs_ioend_t **result,
567 xfs_ioend_t *ioend = *result;
569 if (!ioend || need_ioend || type != ioend->io_type) {
570 xfs_ioend_t *previous = *result;
572 ioend = xfs_alloc_ioend(inode, type);
573 ioend->io_offset = offset;
574 ioend->io_buffer_head = bh;
575 ioend->io_buffer_tail = bh;
577 previous->io_list = ioend;
580 ioend->io_buffer_tail->b_private = bh;
581 ioend->io_buffer_tail = bh;
584 bh->b_private = NULL;
585 ioend->io_size += bh->b_size;
591 struct buffer_head *bh,
592 struct xfs_bmbt_irec *imap,
596 struct xfs_mount *m = XFS_I(inode)->i_mount;
597 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
598 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
600 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
601 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
603 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
604 ((offset - iomap_offset) >> inode->i_blkbits);
606 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
609 set_buffer_mapped(bh);
615 struct buffer_head *bh,
616 struct xfs_bmbt_irec *imap,
619 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
620 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
622 xfs_map_buffer(inode, bh, imap, offset);
623 set_buffer_mapped(bh);
624 clear_buffer_delay(bh);
625 clear_buffer_unwritten(bh);
629 * Test if a given page contains at least one buffer of a given @type.
630 * If @check_all_buffers is true, then we walk all the buffers in the page to
631 * try to find one of the type passed in. If it is not set, then the caller only
632 * needs to check the first buffer on the page for a match.
638 bool check_all_buffers)
640 struct buffer_head *bh;
641 struct buffer_head *head;
643 if (PageWriteback(page))
647 if (!page_has_buffers(page))
650 bh = head = page_buffers(page);
652 if (buffer_unwritten(bh)) {
653 if (type == XFS_IO_UNWRITTEN)
655 } else if (buffer_delay(bh)) {
656 if (type == XFS_IO_DELALLOC)
658 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
659 if (type == XFS_IO_OVERWRITE)
663 /* If we are only checking the first buffer, we are done now. */
664 if (!check_all_buffers)
666 } while ((bh = bh->b_this_page) != head);
672 * Allocate & map buffers for page given the extent map. Write it out.
673 * except for the original page of a writepage, this is called on
674 * delalloc/unwritten pages only, for the original page it is possible
675 * that the page has no mapping at all.
682 struct xfs_bmbt_irec *imap,
683 xfs_ioend_t **ioendp,
684 struct writeback_control *wbc)
686 struct buffer_head *bh, *head;
687 xfs_off_t end_offset;
688 unsigned long p_offset;
691 int count = 0, done = 0, uptodate = 1;
692 xfs_off_t offset = page_offset(page);
694 if (page->index != tindex)
696 if (!trylock_page(page))
698 if (PageWriteback(page))
699 goto fail_unlock_page;
700 if (page->mapping != inode->i_mapping)
701 goto fail_unlock_page;
702 if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
703 goto fail_unlock_page;
706 * page_dirty is initially a count of buffers on the page before
707 * EOF and is decremented as we move each into a cleanable state.
711 * End offset is the highest offset that this page should represent.
712 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
713 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
714 * hence give us the correct page_dirty count. On any other page,
715 * it will be zero and in that case we need page_dirty to be the
716 * count of buffers on the page.
718 end_offset = min_t(unsigned long long,
719 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
723 * If the current map does not span the entire page we are about to try
724 * to write, then give up. The only way we can write a page that spans
725 * multiple mappings in a single writeback iteration is via the
726 * xfs_vm_writepage() function. Data integrity writeback requires the
727 * entire page to be written in a single attempt, otherwise the part of
728 * the page we don't write here doesn't get written as part of the data
731 * For normal writeback, we also don't attempt to write partial pages
732 * here as it simply means that write_cache_pages() will see it under
733 * writeback and ignore the page until some point in the future, at
734 * which time this will be the only page in the file that needs
735 * writeback. Hence for more optimal IO patterns, we should always
736 * avoid partial page writeback due to multiple mappings on a page here.
738 if (!xfs_imap_valid(inode, imap, end_offset))
739 goto fail_unlock_page;
741 len = 1 << inode->i_blkbits;
742 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
744 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
745 page_dirty = p_offset / len;
748 * The moment we find a buffer that doesn't match our current type
749 * specification or can't be written, abort the loop and start
750 * writeback. As per the above xfs_imap_valid() check, only
751 * xfs_vm_writepage() can handle partial page writeback fully - we are
752 * limited here to the buffers that are contiguous with the current
753 * ioend, and hence a buffer we can't write breaks that contiguity and
754 * we have to defer the rest of the IO to xfs_vm_writepage().
756 bh = head = page_buffers(page);
758 if (offset >= end_offset)
760 if (!buffer_uptodate(bh))
762 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
767 if (buffer_unwritten(bh) || buffer_delay(bh) ||
769 if (buffer_unwritten(bh))
770 type = XFS_IO_UNWRITTEN;
771 else if (buffer_delay(bh))
772 type = XFS_IO_DELALLOC;
774 type = XFS_IO_OVERWRITE;
777 * imap should always be valid because of the above
778 * partial page end_offset check on the imap.
780 ASSERT(xfs_imap_valid(inode, imap, offset));
783 if (type != XFS_IO_OVERWRITE)
784 xfs_map_at_offset(inode, bh, imap, offset);
785 xfs_add_to_ioend(inode, bh, offset, type,
794 } while (offset += len, (bh = bh->b_this_page) != head);
796 if (uptodate && bh == head)
797 SetPageUptodate(page);
800 if (--wbc->nr_to_write <= 0 &&
801 wbc->sync_mode == WB_SYNC_NONE)
804 xfs_start_page_writeback(page, !page_dirty, count);
814 * Convert & write out a cluster of pages in the same extent as defined
815 * by mp and following the start page.
821 struct xfs_bmbt_irec *imap,
822 xfs_ioend_t **ioendp,
823 struct writeback_control *wbc,
829 pagevec_init(&pvec, 0);
830 while (!done && tindex <= tlast) {
831 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
833 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
836 for (i = 0; i < pagevec_count(&pvec); i++) {
837 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
843 pagevec_release(&pvec);
849 xfs_vm_invalidatepage(
854 trace_xfs_invalidatepage(page->mapping->host, page, offset,
856 block_invalidatepage(page, offset, length);
860 * If the page has delalloc buffers on it, we need to punch them out before we
861 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
862 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
863 * is done on that same region - the delalloc extent is returned when none is
864 * supposed to be there.
866 * We prevent this by truncating away the delalloc regions on the page before
867 * invalidating it. Because they are delalloc, we can do this without needing a
868 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
869 * truncation without a transaction as there is no space left for block
870 * reservation (typically why we see a ENOSPC in writeback).
872 * This is not a performance critical path, so for now just do the punching a
873 * buffer head at a time.
876 xfs_aops_discard_page(
879 struct inode *inode = page->mapping->host;
880 struct xfs_inode *ip = XFS_I(inode);
881 struct buffer_head *bh, *head;
882 loff_t offset = page_offset(page);
884 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
887 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
890 xfs_alert(ip->i_mount,
891 "page discard on page %p, inode 0x%llx, offset %llu.",
892 page, ip->i_ino, offset);
894 xfs_ilock(ip, XFS_ILOCK_EXCL);
895 bh = head = page_buffers(page);
898 xfs_fileoff_t start_fsb;
900 if (!buffer_delay(bh))
903 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
904 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
906 /* something screwed, just bail */
907 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
908 xfs_alert(ip->i_mount,
909 "page discard unable to remove delalloc mapping.");
914 offset += 1 << inode->i_blkbits;
916 } while ((bh = bh->b_this_page) != head);
918 xfs_iunlock(ip, XFS_ILOCK_EXCL);
920 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
925 * Write out a dirty page.
927 * For delalloc space on the page we need to allocate space and flush it.
928 * For unwritten space on the page we need to start the conversion to
929 * regular allocated space.
930 * For any other dirty buffer heads on the page we should flush them.
935 struct writeback_control *wbc)
937 struct inode *inode = page->mapping->host;
938 struct buffer_head *bh, *head;
939 struct xfs_bmbt_irec imap;
940 xfs_ioend_t *ioend = NULL, *iohead = NULL;
943 __uint64_t end_offset;
944 pgoff_t end_index, last_index;
946 int err, imap_valid = 0, uptodate = 1;
950 trace_xfs_writepage(inode, page, 0, 0);
952 ASSERT(page_has_buffers(page));
955 * Refuse to write the page out if we are called from reclaim context.
957 * This avoids stack overflows when called from deeply used stacks in
958 * random callers for direct reclaim or memcg reclaim. We explicitly
959 * allow reclaim from kswapd as the stack usage there is relatively low.
961 * This should never happen except in the case of a VM regression so
964 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
969 * Given that we do not allow direct reclaim to call us, we should
970 * never be called while in a filesystem transaction.
972 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
975 /* Is this page beyond the end of the file? */
976 offset = i_size_read(inode);
977 end_index = offset >> PAGE_CACHE_SHIFT;
978 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
981 * The page index is less than the end_index, adjust the end_offset
982 * to the highest offset that this page should represent.
983 * -----------------------------------------------------
984 * | file mapping | <EOF> |
985 * -----------------------------------------------------
986 * | Page ... | Page N-2 | Page N-1 | Page N | |
987 * ^--------------------------------^----------|--------
988 * | desired writeback range | see else |
989 * ---------------------------------^------------------|
991 if (page->index < end_index)
992 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
995 * Check whether the page to write out is beyond or straddles
997 * -------------------------------------------------------
998 * | file mapping | <EOF> |
999 * -------------------------------------------------------
1000 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1001 * ^--------------------------------^-----------|---------
1003 * ---------------------------------^-----------|--------|
1005 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
1008 * Skip the page if it is fully outside i_size, e.g. due to a
1009 * truncate operation that is in progress. We must redirty the
1010 * page so that reclaim stops reclaiming it. Otherwise
1011 * xfs_vm_releasepage() is called on it and gets confused.
1013 * Note that the end_index is unsigned long, it would overflow
1014 * if the given offset is greater than 16TB on 32-bit system
1015 * and if we do check the page is fully outside i_size or not
1016 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1017 * will be evaluated to 0. Hence this page will be redirtied
1018 * and be written out repeatedly which would result in an
1019 * infinite loop, the user program that perform this operation
1020 * will hang. Instead, we can verify this situation by checking
1021 * if the page to write is totally beyond the i_size or if it's
1022 * offset is just equal to the EOF.
1024 if (page->index > end_index ||
1025 (page->index == end_index && offset_into_page == 0))
1029 * The page straddles i_size. It must be zeroed out on each
1030 * and every writepage invocation because it may be mmapped.
1031 * "A file is mapped in multiples of the page size. For a file
1032 * that is not a multiple of the page size, the remaining
1033 * memory is zeroed when mapped, and writes to that region are
1034 * not written out to the file."
1036 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1038 /* Adjust the end_offset to the end of file */
1039 end_offset = offset;
1042 len = 1 << inode->i_blkbits;
1044 bh = head = page_buffers(page);
1045 offset = page_offset(page);
1046 type = XFS_IO_OVERWRITE;
1048 if (wbc->sync_mode == WB_SYNC_NONE)
1054 if (offset >= end_offset)
1056 if (!buffer_uptodate(bh))
1060 * set_page_dirty dirties all buffers in a page, independent
1061 * of their state. The dirty state however is entirely
1062 * meaningless for holes (!mapped && uptodate), so skip
1063 * buffers covering holes here.
1065 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1070 if (buffer_unwritten(bh)) {
1071 if (type != XFS_IO_UNWRITTEN) {
1072 type = XFS_IO_UNWRITTEN;
1075 } else if (buffer_delay(bh)) {
1076 if (type != XFS_IO_DELALLOC) {
1077 type = XFS_IO_DELALLOC;
1080 } else if (buffer_uptodate(bh)) {
1081 if (type != XFS_IO_OVERWRITE) {
1082 type = XFS_IO_OVERWRITE;
1086 if (PageUptodate(page))
1087 ASSERT(buffer_mapped(bh));
1089 * This buffer is not uptodate and will not be
1090 * written to disk. Ensure that we will put any
1091 * subsequent writeable buffers into a new
1099 imap_valid = xfs_imap_valid(inode, &imap, offset);
1102 * If we didn't have a valid mapping then we need to
1103 * put the new mapping into a separate ioend structure.
1104 * This ensures non-contiguous extents always have
1105 * separate ioends, which is particularly important
1106 * for unwritten extent conversion at I/O completion
1110 err = xfs_map_blocks(inode, offset, &imap, type,
1114 imap_valid = xfs_imap_valid(inode, &imap, offset);
1118 if (type != XFS_IO_OVERWRITE)
1119 xfs_map_at_offset(inode, bh, &imap, offset);
1120 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1128 } while (offset += len, ((bh = bh->b_this_page) != head));
1130 if (uptodate && bh == head)
1131 SetPageUptodate(page);
1133 xfs_start_page_writeback(page, 1, count);
1135 /* if there is no IO to be submitted for this page, we are done */
1142 * Any errors from this point onwards need tobe reported through the IO
1143 * completion path as we have marked the initial page as under writeback
1147 xfs_off_t end_index;
1149 end_index = imap.br_startoff + imap.br_blockcount;
1152 end_index <<= inode->i_blkbits;
1155 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1157 /* check against file size */
1158 if (end_index > last_index)
1159 end_index = last_index;
1161 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1167 * Reserve log space if we might write beyond the on-disk inode size.
1170 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1171 err = xfs_setfilesize_trans_alloc(ioend);
1173 xfs_submit_ioend(wbc, iohead, err);
1179 xfs_cancel_ioend(iohead);
1184 xfs_aops_discard_page(page);
1185 ClearPageUptodate(page);
1190 redirty_page_for_writepage(wbc, page);
1197 struct address_space *mapping,
1198 struct writeback_control *wbc)
1200 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1201 return generic_writepages(mapping, wbc);
1205 * Called to move a page into cleanable state - and from there
1206 * to be released. The page should already be clean. We always
1207 * have buffer heads in this call.
1209 * Returns 1 if the page is ok to release, 0 otherwise.
1216 int delalloc, unwritten;
1218 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1220 xfs_count_page_state(page, &delalloc, &unwritten);
1222 if (WARN_ON_ONCE(delalloc))
1224 if (WARN_ON_ONCE(unwritten))
1227 return try_to_free_buffers(page);
1231 * When we map a DIO buffer, we may need to attach an ioend that describes the
1232 * type of write IO we are doing. This passes to the completion function the
1233 * operations it needs to perform. If the mapping is for an overwrite wholly
1234 * within the EOF then we don't need an ioend and so we don't allocate one.
1235 * This avoids the unnecessary overhead of allocating and freeing ioends for
1236 * workloads that don't require transactions on IO completion.
1238 * If we get multiple mappings in a single IO, we might be mapping different
1239 * types. But because the direct IO can only have a single private pointer, we
1240 * need to ensure that:
1242 * a) i) the ioend spans the entire region of unwritten mappings; or
1243 * ii) the ioend spans all the mappings that cross or are beyond EOF; and
1244 * b) if it contains unwritten extents, it is *permanently* marked as such
1246 * We could do this by chaining ioends like buffered IO does, but we only
1247 * actually get one IO completion callback from the direct IO, and that spans
1248 * the entire IO regardless of how many mappings and IOs are needed to complete
1249 * the DIO. There is only going to be one reference to the ioend and its life
1250 * cycle is constrained by the DIO completion code. hence we don't need
1251 * reference counting here.
1255 struct inode *inode,
1256 struct buffer_head *bh_result,
1257 struct xfs_bmbt_irec *imap,
1260 struct xfs_ioend *ioend;
1261 xfs_off_t size = bh_result->b_size;
1264 if (ISUNWRITTEN(imap))
1265 type = XFS_IO_UNWRITTEN;
1267 type = XFS_IO_OVERWRITE;
1269 trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
1271 if (bh_result->b_private) {
1272 ioend = bh_result->b_private;
1273 ASSERT(ioend->io_size > 0);
1274 ASSERT(offset >= ioend->io_offset);
1275 if (offset + size > ioend->io_offset + ioend->io_size)
1276 ioend->io_size = offset - ioend->io_offset + size;
1278 if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
1279 ioend->io_type = XFS_IO_UNWRITTEN;
1281 trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
1282 ioend->io_size, ioend->io_type,
1284 } else if (type == XFS_IO_UNWRITTEN ||
1285 offset + size > i_size_read(inode)) {
1286 ioend = xfs_alloc_ioend(inode, type);
1287 ioend->io_offset = offset;
1288 ioend->io_size = size;
1290 bh_result->b_private = ioend;
1291 set_buffer_defer_completion(bh_result);
1293 trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
1296 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1302 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1303 * is, so that we can avoid repeated get_blocks calls.
1305 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1306 * for blocks beyond EOF must be marked new so that sub block regions can be
1307 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1308 * was just allocated or is unwritten, otherwise the callers would overwrite
1309 * existing data with zeros. Hence we have to split the mapping into a range up
1310 * to and including EOF, and a second mapping for beyond EOF.
1314 struct inode *inode,
1316 struct buffer_head *bh_result,
1317 struct xfs_bmbt_irec *imap,
1321 xfs_off_t mapping_size;
1323 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1324 mapping_size <<= inode->i_blkbits;
1326 ASSERT(mapping_size > 0);
1327 if (mapping_size > size)
1328 mapping_size = size;
1329 if (offset < i_size_read(inode) &&
1330 offset + mapping_size >= i_size_read(inode)) {
1331 /* limit mapping to block that spans EOF */
1332 mapping_size = roundup_64(i_size_read(inode) - offset,
1333 1 << inode->i_blkbits);
1335 if (mapping_size > LONG_MAX)
1336 mapping_size = LONG_MAX;
1338 bh_result->b_size = mapping_size;
1343 struct inode *inode,
1345 struct buffer_head *bh_result,
1349 struct xfs_inode *ip = XFS_I(inode);
1350 struct xfs_mount *mp = ip->i_mount;
1351 xfs_fileoff_t offset_fsb, end_fsb;
1354 struct xfs_bmbt_irec imap;
1360 if (XFS_FORCED_SHUTDOWN(mp))
1363 offset = (xfs_off_t)iblock << inode->i_blkbits;
1364 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1365 size = bh_result->b_size;
1367 if (!create && direct && offset >= i_size_read(inode))
1371 * Direct I/O is usually done on preallocated files, so try getting
1372 * a block mapping without an exclusive lock first. For buffered
1373 * writes we already have the exclusive iolock anyway, so avoiding
1374 * a lock roundtrip here by taking the ilock exclusive from the
1375 * beginning is a useful micro optimization.
1377 if (create && !direct) {
1378 lockmode = XFS_ILOCK_EXCL;
1379 xfs_ilock(ip, lockmode);
1381 lockmode = xfs_ilock_data_map_shared(ip);
1384 ASSERT(offset <= mp->m_super->s_maxbytes);
1385 if (offset + size > mp->m_super->s_maxbytes)
1386 size = mp->m_super->s_maxbytes - offset;
1387 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1388 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1390 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1391 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1397 (imap.br_startblock == HOLESTARTBLOCK ||
1398 imap.br_startblock == DELAYSTARTBLOCK))) {
1399 if (direct || xfs_get_extsz_hint(ip)) {
1401 * Drop the ilock in preparation for starting the block
1402 * allocation transaction. It will be retaken
1403 * exclusively inside xfs_iomap_write_direct for the
1404 * actual allocation.
1406 xfs_iunlock(ip, lockmode);
1407 error = xfs_iomap_write_direct(ip, offset, size,
1415 * Delalloc reservations do not require a transaction,
1416 * we can go on without dropping the lock here. If we
1417 * are allocating a new delalloc block, make sure that
1418 * we set the new flag so that we mark the buffer new so
1419 * that we know that it is newly allocated if the write
1422 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1424 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1428 xfs_iunlock(ip, lockmode);
1430 trace_xfs_get_blocks_alloc(ip, offset, size,
1431 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1432 : XFS_IO_DELALLOC, &imap);
1433 } else if (nimaps) {
1434 trace_xfs_get_blocks_found(ip, offset, size,
1435 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1436 : XFS_IO_OVERWRITE, &imap);
1437 xfs_iunlock(ip, lockmode);
1439 trace_xfs_get_blocks_notfound(ip, offset, size);
1443 /* trim mapping down to size requested */
1444 if (direct || size > (1 << inode->i_blkbits))
1445 xfs_map_trim_size(inode, iblock, bh_result,
1446 &imap, offset, size);
1449 * For unwritten extents do not report a disk address in the buffered
1450 * read case (treat as if we're reading into a hole).
1452 if (imap.br_startblock != HOLESTARTBLOCK &&
1453 imap.br_startblock != DELAYSTARTBLOCK &&
1454 (create || !ISUNWRITTEN(&imap))) {
1455 xfs_map_buffer(inode, bh_result, &imap, offset);
1456 if (ISUNWRITTEN(&imap))
1457 set_buffer_unwritten(bh_result);
1458 /* direct IO needs special help */
1459 if (create && direct)
1460 xfs_map_direct(inode, bh_result, &imap, offset);
1464 * If this is a realtime file, data may be on a different device.
1465 * to that pointed to from the buffer_head b_bdev currently.
1467 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1470 * If we previously allocated a block out beyond eof and we are now
1471 * coming back to use it then we will need to flag it as new even if it
1472 * has a disk address.
1474 * With sub-block writes into unwritten extents we also need to mark
1475 * the buffer as new so that the unwritten parts of the buffer gets
1479 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1480 (offset >= i_size_read(inode)) ||
1481 (new || ISUNWRITTEN(&imap))))
1482 set_buffer_new(bh_result);
1484 if (imap.br_startblock == DELAYSTARTBLOCK) {
1487 set_buffer_uptodate(bh_result);
1488 set_buffer_mapped(bh_result);
1489 set_buffer_delay(bh_result);
1496 xfs_iunlock(ip, lockmode);
1502 struct inode *inode,
1504 struct buffer_head *bh_result,
1507 return __xfs_get_blocks(inode, iblock, bh_result, create, false);
1511 xfs_get_blocks_direct(
1512 struct inode *inode,
1514 struct buffer_head *bh_result,
1517 return __xfs_get_blocks(inode, iblock, bh_result, create, true);
1521 __xfs_end_io_direct_write(
1522 struct inode *inode,
1523 struct xfs_ioend *ioend,
1527 struct xfs_mount *mp = XFS_I(inode)->i_mount;
1529 if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
1533 * dio completion end_io functions are only called on writes if more
1534 * than 0 bytes was written.
1539 * The ioend only maps whole blocks, while the IO may be sector aligned.
1540 * Hence the ioend offset/size may not match the IO offset/size exactly.
1541 * Because we don't map overwrites within EOF into the ioend, the offset
1542 * may not match, but only if the endio spans EOF. Either way, write
1543 * the IO sizes into the ioend so that completion processing does the
1546 ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
1547 ioend->io_size = size;
1548 ioend->io_offset = offset;
1551 * The ioend tells us whether we are doing unwritten extent conversion
1552 * or an append transaction that updates the on-disk file size. These
1553 * cases are the only cases where we should *potentially* be needing
1554 * to update the VFS inode size.
1556 * We need to update the in-core inode size here so that we don't end up
1557 * with the on-disk inode size being outside the in-core inode size. We
1558 * have no other method of updating EOF for AIO, so always do it here
1561 * We need to lock the test/set EOF update as we can be racing with
1562 * other IO completions here to update the EOF. Failing to serialise
1563 * here can result in EOF moving backwards and Bad Things Happen when
1566 spin_lock(&XFS_I(inode)->i_flags_lock);
1567 if (offset + size > i_size_read(inode))
1568 i_size_write(inode, offset + size);
1569 spin_unlock(&XFS_I(inode)->i_flags_lock);
1572 * If we are doing an append IO that needs to update the EOF on disk,
1573 * do the transaction reserve now so we can use common end io
1574 * processing. Stashing the error (if there is one) in the ioend will
1575 * result in the ioend processing passing on the error if it is
1576 * possible as we can't return it from here.
1578 if (ioend->io_type == XFS_IO_OVERWRITE)
1579 ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
1582 xfs_end_io(&ioend->io_work);
1587 * Complete a direct I/O write request.
1589 * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
1590 * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
1591 * wholly within the EOF and so there is nothing for us to do. Note that in this
1592 * case the completion can be called in interrupt context, whereas if we have an
1593 * ioend we will always be called in task context (i.e. from a workqueue).
1596 xfs_end_io_direct_write(
1602 struct inode *inode = file_inode(iocb->ki_filp);
1603 struct xfs_ioend *ioend = private;
1605 trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
1606 ioend ? ioend->io_type : 0, NULL);
1609 ASSERT(offset + size <= i_size_read(inode));
1613 __xfs_end_io_direct_write(inode, ioend, offset, size);
1617 * For DAX we need a mapping buffer callback for unwritten extent conversion
1618 * when page faults allocate blocks and then zero them. Note that in this
1619 * case the mapping indicated by the ioend may extend beyond EOF. We most
1620 * definitely do not want to extend EOF here, so we trim back the ioend size to
1623 #ifdef CONFIG_FS_DAX
1625 xfs_end_io_dax_write(
1626 struct buffer_head *bh,
1629 struct xfs_ioend *ioend = bh->b_private;
1630 struct inode *inode = ioend->io_inode;
1631 ssize_t size = ioend->io_size;
1633 ASSERT(IS_DAX(ioend->io_inode));
1635 /* if there was an error zeroing, then don't convert it */
1637 ioend->io_error = -EIO;
1640 * Trim update to EOF, so we don't extend EOF during unwritten extent
1641 * conversion of partial EOF blocks.
1643 spin_lock(&XFS_I(inode)->i_flags_lock);
1644 if (ioend->io_offset + size > i_size_read(inode))
1645 size = i_size_read(inode) - ioend->io_offset;
1646 spin_unlock(&XFS_I(inode)->i_flags_lock);
1648 __xfs_end_io_direct_write(inode, ioend, ioend->io_offset, size);
1652 void xfs_end_io_dax_write(struct buffer_head *bh, int uptodate) { }
1655 static inline ssize_t
1657 struct inode *inode,
1659 struct iov_iter *iter,
1661 void (*endio)(struct kiocb *iocb,
1667 struct block_device *bdev;
1670 return dax_do_io(iocb, inode, iter, offset,
1671 xfs_get_blocks_direct, endio, 0);
1673 bdev = xfs_find_bdev_for_inode(inode);
1674 return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
1675 xfs_get_blocks_direct, endio, NULL, flags);
1681 struct iov_iter *iter,
1684 struct inode *inode = iocb->ki_filp->f_mapping->host;
1686 if (iov_iter_rw(iter) == WRITE)
1687 return xfs_vm_do_dio(inode, iocb, iter, offset,
1688 xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
1689 return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
1693 * Punch out the delalloc blocks we have already allocated.
1695 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1696 * as the page is still locked at this point.
1699 xfs_vm_kill_delalloc_range(
1700 struct inode *inode,
1704 struct xfs_inode *ip = XFS_I(inode);
1705 xfs_fileoff_t start_fsb;
1706 xfs_fileoff_t end_fsb;
1709 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1710 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1711 if (end_fsb <= start_fsb)
1714 xfs_ilock(ip, XFS_ILOCK_EXCL);
1715 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1716 end_fsb - start_fsb);
1718 /* something screwed, just bail */
1719 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1720 xfs_alert(ip->i_mount,
1721 "xfs_vm_write_failed: unable to clean up ino %lld",
1725 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1729 xfs_vm_write_failed(
1730 struct inode *inode,
1735 loff_t block_offset;
1738 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1739 loff_t to = from + len;
1740 struct buffer_head *bh, *head;
1743 * The request pos offset might be 32 or 64 bit, this is all fine
1744 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1745 * platform, the high 32-bit will be masked off if we evaluate the
1746 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1747 * 0xfffff000 as an unsigned long, hence the result is incorrect
1748 * which could cause the following ASSERT failed in most cases.
1749 * In order to avoid this, we can evaluate the block_offset of the
1750 * start of the page by using shifts rather than masks the mismatch
1753 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1755 ASSERT(block_offset + from == pos);
1757 head = page_buffers(page);
1759 for (bh = head; bh != head || !block_start;
1760 bh = bh->b_this_page, block_start = block_end,
1761 block_offset += bh->b_size) {
1762 block_end = block_start + bh->b_size;
1764 /* skip buffers before the write */
1765 if (block_end <= from)
1768 /* if the buffer is after the write, we're done */
1769 if (block_start >= to)
1772 if (!buffer_delay(bh))
1775 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1778 xfs_vm_kill_delalloc_range(inode, block_offset,
1779 block_offset + bh->b_size);
1782 * This buffer does not contain data anymore. make sure anyone
1783 * who finds it knows that for certain.
1785 clear_buffer_delay(bh);
1786 clear_buffer_uptodate(bh);
1787 clear_buffer_mapped(bh);
1788 clear_buffer_new(bh);
1789 clear_buffer_dirty(bh);
1795 * This used to call block_write_begin(), but it unlocks and releases the page
1796 * on error, and we need that page to be able to punch stale delalloc blocks out
1797 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1798 * the appropriate point.
1803 struct address_space *mapping,
1807 struct page **pagep,
1810 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1814 ASSERT(len <= PAGE_CACHE_SIZE);
1816 page = grab_cache_page_write_begin(mapping, index, flags);
1820 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1821 if (unlikely(status)) {
1822 struct inode *inode = mapping->host;
1823 size_t isize = i_size_read(inode);
1825 xfs_vm_write_failed(inode, page, pos, len);
1829 * If the write is beyond EOF, we only want to kill blocks
1830 * allocated in this write, not blocks that were previously
1831 * written successfully.
1833 if (pos + len > isize) {
1834 ssize_t start = max_t(ssize_t, pos, isize);
1836 truncate_pagecache_range(inode, start, pos + len);
1839 page_cache_release(page);
1848 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1849 * this specific write because they will never be written. Previous writes
1850 * beyond EOF where block allocation succeeded do not need to be trashed, so
1851 * only new blocks from this write should be trashed. For blocks within
1852 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1853 * written with all the other valid data.
1858 struct address_space *mapping,
1867 ASSERT(len <= PAGE_CACHE_SIZE);
1869 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1870 if (unlikely(ret < len)) {
1871 struct inode *inode = mapping->host;
1872 size_t isize = i_size_read(inode);
1873 loff_t to = pos + len;
1876 /* only kill blocks in this write beyond EOF */
1879 xfs_vm_kill_delalloc_range(inode, isize, to);
1880 truncate_pagecache_range(inode, isize, to);
1888 struct address_space *mapping,
1891 struct inode *inode = (struct inode *)mapping->host;
1892 struct xfs_inode *ip = XFS_I(inode);
1894 trace_xfs_vm_bmap(XFS_I(inode));
1895 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1896 filemap_write_and_wait(mapping);
1897 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1898 return generic_block_bmap(mapping, block, xfs_get_blocks);
1903 struct file *unused,
1906 return mpage_readpage(page, xfs_get_blocks);
1911 struct file *unused,
1912 struct address_space *mapping,
1913 struct list_head *pages,
1916 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1920 * This is basically a copy of __set_page_dirty_buffers() with one
1921 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1922 * dirty, we'll never be able to clean them because we don't write buffers
1923 * beyond EOF, and that means we can't invalidate pages that span EOF
1924 * that have been marked dirty. Further, the dirty state can leak into
1925 * the file interior if the file is extended, resulting in all sorts of
1926 * bad things happening as the state does not match the underlying data.
1928 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1929 * this only exist because of bufferheads and how the generic code manages them.
1932 xfs_vm_set_page_dirty(
1935 struct address_space *mapping = page->mapping;
1936 struct inode *inode = mapping->host;
1940 struct mem_cgroup *memcg;
1942 if (unlikely(!mapping))
1943 return !TestSetPageDirty(page);
1945 end_offset = i_size_read(inode);
1946 offset = page_offset(page);
1948 spin_lock(&mapping->private_lock);
1949 if (page_has_buffers(page)) {
1950 struct buffer_head *head = page_buffers(page);
1951 struct buffer_head *bh = head;
1954 if (offset < end_offset)
1955 set_buffer_dirty(bh);
1956 bh = bh->b_this_page;
1957 offset += 1 << inode->i_blkbits;
1958 } while (bh != head);
1961 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
1962 * per-memcg dirty page counters.
1964 memcg = mem_cgroup_begin_page_stat(page);
1965 newly_dirty = !TestSetPageDirty(page);
1966 spin_unlock(&mapping->private_lock);
1969 /* sigh - __set_page_dirty() is static, so copy it here, too */
1970 unsigned long flags;
1972 spin_lock_irqsave(&mapping->tree_lock, flags);
1973 if (page->mapping) { /* Race with truncate? */
1974 WARN_ON_ONCE(!PageUptodate(page));
1975 account_page_dirtied(page, mapping, memcg);
1976 radix_tree_tag_set(&mapping->page_tree,
1977 page_index(page), PAGECACHE_TAG_DIRTY);
1979 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1981 mem_cgroup_end_page_stat(memcg);
1983 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1987 const struct address_space_operations xfs_address_space_operations = {
1988 .readpage = xfs_vm_readpage,
1989 .readpages = xfs_vm_readpages,
1990 .writepage = xfs_vm_writepage,
1991 .writepages = xfs_vm_writepages,
1992 .set_page_dirty = xfs_vm_set_page_dirty,
1993 .releasepage = xfs_vm_releasepage,
1994 .invalidatepage = xfs_vm_invalidatepage,
1995 .write_begin = xfs_vm_write_begin,
1996 .write_end = xfs_vm_write_end,
1997 .bmap = xfs_vm_bmap,
1998 .direct_IO = xfs_vm_direct_IO,
1999 .migratepage = buffer_migrate_page,
2000 .is_partially_uptodate = block_is_partially_uptodate,
2001 .error_remove_page = generic_error_remove_page,