2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_dinode.h"
28 #include "xfs_inode.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_alloc.h"
31 #include "xfs_error.h"
33 #include "xfs_iomap.h"
34 #include "xfs_vnodeops.h"
35 #include "xfs_trace.h"
37 #include <linux/gfp.h>
38 #include <linux/mpage.h>
39 #include <linux/pagevec.h>
40 #include <linux/writeback.h>
48 struct buffer_head *bh, *head;
50 *delalloc = *unwritten = 0;
52 bh = head = page_buffers(page);
54 if (buffer_unwritten(bh))
56 else if (buffer_delay(bh))
58 } while ((bh = bh->b_this_page) != head);
61 STATIC struct block_device *
62 xfs_find_bdev_for_inode(
65 struct xfs_inode *ip = XFS_I(inode);
66 struct xfs_mount *mp = ip->i_mount;
68 if (XFS_IS_REALTIME_INODE(ip))
69 return mp->m_rtdev_targp->bt_bdev;
71 return mp->m_ddev_targp->bt_bdev;
75 * We're now finished for good with this ioend structure.
76 * Update the page state via the associated buffer_heads,
77 * release holds on the inode and bio, and finally free
78 * up memory. Do not use the ioend after this.
84 struct buffer_head *bh, *next;
86 for (bh = ioend->io_buffer_head; bh; bh = next) {
88 bh->b_end_io(bh, !ioend->io_error);
92 if (ioend->io_isasync) {
93 aio_complete(ioend->io_iocb, ioend->io_error ?
94 ioend->io_error : ioend->io_result, 0);
96 inode_dio_done(ioend->io_inode);
99 mempool_free(ioend, xfs_ioend_pool);
103 * Fast and loose check if this write could update the on-disk inode size.
105 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
107 return ioend->io_offset + ioend->io_size >
108 XFS_I(ioend->io_inode)->i_d.di_size;
112 xfs_setfilesize_trans_alloc(
113 struct xfs_ioend *ioend)
115 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
116 struct xfs_trans *tp;
119 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
121 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
123 xfs_trans_cancel(tp, 0);
127 ioend->io_append_trans = tp;
130 * We hand off the transaction to the completion thread now, so
131 * clear the flag here.
133 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
138 * Update on-disk file size now that data has been written to disk.
142 struct xfs_ioend *ioend)
144 struct xfs_inode *ip = XFS_I(ioend->io_inode);
145 struct xfs_trans *tp = ioend->io_append_trans;
149 * The transaction was allocated in the I/O submission thread,
150 * thus we need to mark ourselves as beeing in a transaction
153 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
155 xfs_ilock(ip, XFS_ILOCK_EXCL);
156 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
158 xfs_iunlock(ip, XFS_ILOCK_EXCL);
159 xfs_trans_cancel(tp, 0);
163 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
165 ip->i_d.di_size = isize;
166 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
167 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
169 return xfs_trans_commit(tp, 0);
173 * Schedule IO completion handling on the final put of an ioend.
175 * If there is no work to do we might as well call it a day and free the
180 struct xfs_ioend *ioend)
182 if (atomic_dec_and_test(&ioend->io_remaining)) {
183 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
185 if (ioend->io_type == IO_UNWRITTEN)
186 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
187 else if (ioend->io_append_trans)
188 queue_work(mp->m_data_workqueue, &ioend->io_work);
190 xfs_destroy_ioend(ioend);
195 * IO write completion.
199 struct work_struct *work)
201 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
202 struct xfs_inode *ip = XFS_I(ioend->io_inode);
205 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
206 ioend->io_error = -EIO;
213 * For unwritten extents we need to issue transactions to convert a
214 * range to normal written extens after the data I/O has finished.
216 if (ioend->io_type == IO_UNWRITTEN) {
218 * For buffered I/O we never preallocate a transaction when
219 * doing the unwritten extent conversion, but for direct I/O
220 * we do not know if we are converting an unwritten extent
221 * or not at the point where we preallocate the transaction.
223 if (ioend->io_append_trans) {
224 ASSERT(ioend->io_isdirect);
226 current_set_flags_nested(
227 &ioend->io_append_trans->t_pflags, PF_FSTRANS);
228 xfs_trans_cancel(ioend->io_append_trans, 0);
231 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
234 ioend->io_error = -error;
237 } else if (ioend->io_append_trans) {
238 error = xfs_setfilesize(ioend);
240 ioend->io_error = -error;
242 ASSERT(!xfs_ioend_is_append(ioend));
246 xfs_destroy_ioend(ioend);
250 * Call IO completion handling in caller context on the final put of an ioend.
253 xfs_finish_ioend_sync(
254 struct xfs_ioend *ioend)
256 if (atomic_dec_and_test(&ioend->io_remaining))
257 xfs_end_io(&ioend->io_work);
261 * Allocate and initialise an IO completion structure.
262 * We need to track unwritten extent write completion here initially.
263 * We'll need to extend this for updating the ondisk inode size later
273 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
276 * Set the count to 1 initially, which will prevent an I/O
277 * completion callback from happening before we have started
278 * all the I/O from calling the completion routine too early.
280 atomic_set(&ioend->io_remaining, 1);
281 ioend->io_isasync = 0;
282 ioend->io_isdirect = 0;
284 ioend->io_list = NULL;
285 ioend->io_type = type;
286 ioend->io_inode = inode;
287 ioend->io_buffer_head = NULL;
288 ioend->io_buffer_tail = NULL;
289 ioend->io_offset = 0;
291 ioend->io_iocb = NULL;
292 ioend->io_result = 0;
293 ioend->io_append_trans = NULL;
295 INIT_WORK(&ioend->io_work, xfs_end_io);
303 struct xfs_bmbt_irec *imap,
307 struct xfs_inode *ip = XFS_I(inode);
308 struct xfs_mount *mp = ip->i_mount;
309 ssize_t count = 1 << inode->i_blkbits;
310 xfs_fileoff_t offset_fsb, end_fsb;
312 int bmapi_flags = XFS_BMAPI_ENTIRE;
315 if (XFS_FORCED_SHUTDOWN(mp))
316 return -XFS_ERROR(EIO);
318 if (type == IO_UNWRITTEN)
319 bmapi_flags |= XFS_BMAPI_IGSTATE;
321 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
323 return -XFS_ERROR(EAGAIN);
324 xfs_ilock(ip, XFS_ILOCK_SHARED);
327 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
328 (ip->i_df.if_flags & XFS_IFEXTENTS));
329 ASSERT(offset <= mp->m_maxioffset);
331 if (offset + count > mp->m_maxioffset)
332 count = mp->m_maxioffset - offset;
333 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
334 offset_fsb = XFS_B_TO_FSBT(mp, offset);
335 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
336 imap, &nimaps, bmapi_flags);
337 xfs_iunlock(ip, XFS_ILOCK_SHARED);
340 return -XFS_ERROR(error);
342 if (type == IO_DELALLOC &&
343 (!nimaps || isnullstartblock(imap->br_startblock))) {
344 error = xfs_iomap_write_allocate(ip, offset, count, imap);
346 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
347 return -XFS_ERROR(error);
351 if (type == IO_UNWRITTEN) {
353 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
354 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
358 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
365 struct xfs_bmbt_irec *imap,
368 offset >>= inode->i_blkbits;
370 return offset >= imap->br_startoff &&
371 offset < imap->br_startoff + imap->br_blockcount;
375 * BIO completion handler for buffered IO.
382 xfs_ioend_t *ioend = bio->bi_private;
384 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
385 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
387 /* Toss bio and pass work off to an xfsdatad thread */
388 bio->bi_private = NULL;
389 bio->bi_end_io = NULL;
392 xfs_finish_ioend(ioend);
396 xfs_submit_ioend_bio(
397 struct writeback_control *wbc,
401 atomic_inc(&ioend->io_remaining);
402 bio->bi_private = ioend;
403 bio->bi_end_io = xfs_end_bio;
404 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
409 struct buffer_head *bh)
411 int nvecs = bio_get_nr_vecs(bh->b_bdev);
412 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
414 ASSERT(bio->bi_private == NULL);
415 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
416 bio->bi_bdev = bh->b_bdev;
421 xfs_start_buffer_writeback(
422 struct buffer_head *bh)
424 ASSERT(buffer_mapped(bh));
425 ASSERT(buffer_locked(bh));
426 ASSERT(!buffer_delay(bh));
427 ASSERT(!buffer_unwritten(bh));
429 mark_buffer_async_write(bh);
430 set_buffer_uptodate(bh);
431 clear_buffer_dirty(bh);
435 xfs_start_page_writeback(
440 ASSERT(PageLocked(page));
441 ASSERT(!PageWriteback(page));
443 clear_page_dirty_for_io(page);
444 set_page_writeback(page);
446 /* If no buffers on the page are to be written, finish it here */
448 end_page_writeback(page);
451 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
453 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
457 * Submit all of the bios for all of the ioends we have saved up, covering the
458 * initial writepage page and also any probed pages.
460 * Because we may have multiple ioends spanning a page, we need to start
461 * writeback on all the buffers before we submit them for I/O. If we mark the
462 * buffers as we got, then we can end up with a page that only has buffers
463 * marked async write and I/O complete on can occur before we mark the other
464 * buffers async write.
466 * The end result of this is that we trip a bug in end_page_writeback() because
467 * we call it twice for the one page as the code in end_buffer_async_write()
468 * assumes that all buffers on the page are started at the same time.
470 * The fix is two passes across the ioend list - one to start writeback on the
471 * buffer_heads, and then submit them for I/O on the second pass.
475 struct writeback_control *wbc,
478 xfs_ioend_t *head = ioend;
480 struct buffer_head *bh;
482 sector_t lastblock = 0;
484 /* Pass 1 - start writeback */
486 next = ioend->io_list;
487 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
488 xfs_start_buffer_writeback(bh);
489 } while ((ioend = next) != NULL);
491 /* Pass 2 - submit I/O */
494 next = ioend->io_list;
497 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
501 bio = xfs_alloc_ioend_bio(bh);
502 } else if (bh->b_blocknr != lastblock + 1) {
503 xfs_submit_ioend_bio(wbc, ioend, bio);
507 if (bio_add_buffer(bio, bh) != bh->b_size) {
508 xfs_submit_ioend_bio(wbc, ioend, bio);
512 lastblock = bh->b_blocknr;
515 xfs_submit_ioend_bio(wbc, ioend, bio);
516 xfs_finish_ioend(ioend);
517 } while ((ioend = next) != NULL);
521 * Cancel submission of all buffer_heads so far in this endio.
522 * Toss the endio too. Only ever called for the initial page
523 * in a writepage request, so only ever one page.
530 struct buffer_head *bh, *next_bh;
533 next = ioend->io_list;
534 bh = ioend->io_buffer_head;
536 next_bh = bh->b_private;
537 clear_buffer_async_write(bh);
539 } while ((bh = next_bh) != NULL);
541 mempool_free(ioend, xfs_ioend_pool);
542 } while ((ioend = next) != NULL);
546 * Test to see if we've been building up a completion structure for
547 * earlier buffers -- if so, we try to append to this ioend if we
548 * can, otherwise we finish off any current ioend and start another.
549 * Return true if we've finished the given ioend.
554 struct buffer_head *bh,
557 xfs_ioend_t **result,
560 xfs_ioend_t *ioend = *result;
562 if (!ioend || need_ioend || type != ioend->io_type) {
563 xfs_ioend_t *previous = *result;
565 ioend = xfs_alloc_ioend(inode, type);
566 ioend->io_offset = offset;
567 ioend->io_buffer_head = bh;
568 ioend->io_buffer_tail = bh;
570 previous->io_list = ioend;
573 ioend->io_buffer_tail->b_private = bh;
574 ioend->io_buffer_tail = bh;
577 bh->b_private = NULL;
578 ioend->io_size += bh->b_size;
584 struct buffer_head *bh,
585 struct xfs_bmbt_irec *imap,
589 struct xfs_mount *m = XFS_I(inode)->i_mount;
590 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
591 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
593 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
594 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
596 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
597 ((offset - iomap_offset) >> inode->i_blkbits);
599 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
602 set_buffer_mapped(bh);
608 struct buffer_head *bh,
609 struct xfs_bmbt_irec *imap,
612 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
613 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
615 xfs_map_buffer(inode, bh, imap, offset);
616 set_buffer_mapped(bh);
617 clear_buffer_delay(bh);
618 clear_buffer_unwritten(bh);
622 * Test if a given page is suitable for writing as part of an unwritten
623 * or delayed allocate extent.
630 if (PageWriteback(page))
633 if (page->mapping && page_has_buffers(page)) {
634 struct buffer_head *bh, *head;
637 bh = head = page_buffers(page);
639 if (buffer_unwritten(bh))
640 acceptable = (type == IO_UNWRITTEN);
641 else if (buffer_delay(bh))
642 acceptable = (type == IO_DELALLOC);
643 else if (buffer_dirty(bh) && buffer_mapped(bh))
644 acceptable = (type == IO_OVERWRITE);
647 } while ((bh = bh->b_this_page) != head);
657 * Allocate & map buffers for page given the extent map. Write it out.
658 * except for the original page of a writepage, this is called on
659 * delalloc/unwritten pages only, for the original page it is possible
660 * that the page has no mapping at all.
667 struct xfs_bmbt_irec *imap,
668 xfs_ioend_t **ioendp,
669 struct writeback_control *wbc)
671 struct buffer_head *bh, *head;
672 xfs_off_t end_offset;
673 unsigned long p_offset;
676 int count = 0, done = 0, uptodate = 1;
677 xfs_off_t offset = page_offset(page);
679 if (page->index != tindex)
681 if (!trylock_page(page))
683 if (PageWriteback(page))
684 goto fail_unlock_page;
685 if (page->mapping != inode->i_mapping)
686 goto fail_unlock_page;
687 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
688 goto fail_unlock_page;
691 * page_dirty is initially a count of buffers on the page before
692 * EOF and is decremented as we move each into a cleanable state.
696 * End offset is the highest offset that this page should represent.
697 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
698 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
699 * hence give us the correct page_dirty count. On any other page,
700 * it will be zero and in that case we need page_dirty to be the
701 * count of buffers on the page.
703 end_offset = min_t(unsigned long long,
704 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
707 len = 1 << inode->i_blkbits;
708 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
710 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
711 page_dirty = p_offset / len;
713 bh = head = page_buffers(page);
715 if (offset >= end_offset)
717 if (!buffer_uptodate(bh))
719 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
724 if (buffer_unwritten(bh) || buffer_delay(bh) ||
726 if (buffer_unwritten(bh))
728 else if (buffer_delay(bh))
733 if (!xfs_imap_valid(inode, imap, offset)) {
739 if (type != IO_OVERWRITE)
740 xfs_map_at_offset(inode, bh, imap, offset);
741 xfs_add_to_ioend(inode, bh, offset, type,
749 } while (offset += len, (bh = bh->b_this_page) != head);
751 if (uptodate && bh == head)
752 SetPageUptodate(page);
755 if (--wbc->nr_to_write <= 0 &&
756 wbc->sync_mode == WB_SYNC_NONE)
759 xfs_start_page_writeback(page, !page_dirty, count);
769 * Convert & write out a cluster of pages in the same extent as defined
770 * by mp and following the start page.
776 struct xfs_bmbt_irec *imap,
777 xfs_ioend_t **ioendp,
778 struct writeback_control *wbc,
784 pagevec_init(&pvec, 0);
785 while (!done && tindex <= tlast) {
786 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
788 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
791 for (i = 0; i < pagevec_count(&pvec); i++) {
792 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
798 pagevec_release(&pvec);
804 xfs_vm_invalidatepage(
806 unsigned long offset)
808 trace_xfs_invalidatepage(page->mapping->host, page, offset);
809 block_invalidatepage(page, offset);
813 * If the page has delalloc buffers on it, we need to punch them out before we
814 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
815 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
816 * is done on that same region - the delalloc extent is returned when none is
817 * supposed to be there.
819 * We prevent this by truncating away the delalloc regions on the page before
820 * invalidating it. Because they are delalloc, we can do this without needing a
821 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
822 * truncation without a transaction as there is no space left for block
823 * reservation (typically why we see a ENOSPC in writeback).
825 * This is not a performance critical path, so for now just do the punching a
826 * buffer head at a time.
829 xfs_aops_discard_page(
832 struct inode *inode = page->mapping->host;
833 struct xfs_inode *ip = XFS_I(inode);
834 struct buffer_head *bh, *head;
835 loff_t offset = page_offset(page);
837 if (!xfs_is_delayed_page(page, IO_DELALLOC))
840 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
843 xfs_alert(ip->i_mount,
844 "page discard on page %p, inode 0x%llx, offset %llu.",
845 page, ip->i_ino, offset);
847 xfs_ilock(ip, XFS_ILOCK_EXCL);
848 bh = head = page_buffers(page);
851 xfs_fileoff_t start_fsb;
853 if (!buffer_delay(bh))
856 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
857 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
859 /* something screwed, just bail */
860 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
861 xfs_alert(ip->i_mount,
862 "page discard unable to remove delalloc mapping.");
867 offset += 1 << inode->i_blkbits;
869 } while ((bh = bh->b_this_page) != head);
871 xfs_iunlock(ip, XFS_ILOCK_EXCL);
873 xfs_vm_invalidatepage(page, 0);
878 * Write out a dirty page.
880 * For delalloc space on the page we need to allocate space and flush it.
881 * For unwritten space on the page we need to start the conversion to
882 * regular allocated space.
883 * For any other dirty buffer heads on the page we should flush them.
888 struct writeback_control *wbc)
890 struct inode *inode = page->mapping->host;
891 struct buffer_head *bh, *head;
892 struct xfs_bmbt_irec imap;
893 xfs_ioend_t *ioend = NULL, *iohead = NULL;
896 __uint64_t end_offset;
897 pgoff_t end_index, last_index;
899 int err, imap_valid = 0, uptodate = 1;
903 trace_xfs_writepage(inode, page, 0);
905 ASSERT(page_has_buffers(page));
908 * Refuse to write the page out if we are called from reclaim context.
910 * This avoids stack overflows when called from deeply used stacks in
911 * random callers for direct reclaim or memcg reclaim. We explicitly
912 * allow reclaim from kswapd as the stack usage there is relatively low.
914 * This should never happen except in the case of a VM regression so
917 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
922 * Given that we do not allow direct reclaim to call us, we should
923 * never be called while in a filesystem transaction.
925 if (WARN_ON(current->flags & PF_FSTRANS))
928 /* Is this page beyond the end of the file? */
929 offset = i_size_read(inode);
930 end_index = offset >> PAGE_CACHE_SHIFT;
931 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
932 if (page->index >= end_index) {
933 if ((page->index >= end_index + 1) ||
934 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
940 end_offset = min_t(unsigned long long,
941 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
943 len = 1 << inode->i_blkbits;
945 bh = head = page_buffers(page);
946 offset = page_offset(page);
949 if (wbc->sync_mode == WB_SYNC_NONE)
955 if (offset >= end_offset)
957 if (!buffer_uptodate(bh))
961 * set_page_dirty dirties all buffers in a page, independent
962 * of their state. The dirty state however is entirely
963 * meaningless for holes (!mapped && uptodate), so skip
964 * buffers covering holes here.
966 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
971 if (buffer_unwritten(bh)) {
972 if (type != IO_UNWRITTEN) {
976 } else if (buffer_delay(bh)) {
977 if (type != IO_DELALLOC) {
981 } else if (buffer_uptodate(bh)) {
982 if (type != IO_OVERWRITE) {
987 if (PageUptodate(page)) {
988 ASSERT(buffer_mapped(bh));
995 imap_valid = xfs_imap_valid(inode, &imap, offset);
998 * If we didn't have a valid mapping then we need to
999 * put the new mapping into a separate ioend structure.
1000 * This ensures non-contiguous extents always have
1001 * separate ioends, which is particularly important
1002 * for unwritten extent conversion at I/O completion
1006 err = xfs_map_blocks(inode, offset, &imap, type,
1010 imap_valid = xfs_imap_valid(inode, &imap, offset);
1014 if (type != IO_OVERWRITE)
1015 xfs_map_at_offset(inode, bh, &imap, offset);
1016 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1024 } while (offset += len, ((bh = bh->b_this_page) != head));
1026 if (uptodate && bh == head)
1027 SetPageUptodate(page);
1029 xfs_start_page_writeback(page, 1, count);
1031 if (ioend && imap_valid) {
1032 xfs_off_t end_index;
1034 end_index = imap.br_startoff + imap.br_blockcount;
1037 end_index <<= inode->i_blkbits;
1040 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1042 /* check against file size */
1043 if (end_index > last_index)
1044 end_index = last_index;
1046 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1052 * Reserve log space if we might write beyond the on-disk
1055 if (ioend->io_type != IO_UNWRITTEN &&
1056 xfs_ioend_is_append(ioend)) {
1057 err = xfs_setfilesize_trans_alloc(ioend);
1062 xfs_submit_ioend(wbc, iohead);
1069 xfs_cancel_ioend(iohead);
1074 xfs_aops_discard_page(page);
1075 ClearPageUptodate(page);
1080 redirty_page_for_writepage(wbc, page);
1087 struct address_space *mapping,
1088 struct writeback_control *wbc)
1090 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1091 return generic_writepages(mapping, wbc);
1095 * Called to move a page into cleanable state - and from there
1096 * to be released. The page should already be clean. We always
1097 * have buffer heads in this call.
1099 * Returns 1 if the page is ok to release, 0 otherwise.
1106 int delalloc, unwritten;
1108 trace_xfs_releasepage(page->mapping->host, page, 0);
1110 xfs_count_page_state(page, &delalloc, &unwritten);
1112 if (WARN_ON(delalloc))
1114 if (WARN_ON(unwritten))
1117 return try_to_free_buffers(page);
1122 struct inode *inode,
1124 struct buffer_head *bh_result,
1128 struct xfs_inode *ip = XFS_I(inode);
1129 struct xfs_mount *mp = ip->i_mount;
1130 xfs_fileoff_t offset_fsb, end_fsb;
1133 struct xfs_bmbt_irec imap;
1139 if (XFS_FORCED_SHUTDOWN(mp))
1140 return -XFS_ERROR(EIO);
1142 offset = (xfs_off_t)iblock << inode->i_blkbits;
1143 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1144 size = bh_result->b_size;
1146 if (!create && direct && offset >= i_size_read(inode))
1150 lockmode = XFS_ILOCK_EXCL;
1151 xfs_ilock(ip, lockmode);
1153 lockmode = xfs_ilock_map_shared(ip);
1156 ASSERT(offset <= mp->m_maxioffset);
1157 if (offset + size > mp->m_maxioffset)
1158 size = mp->m_maxioffset - offset;
1159 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1160 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1162 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1163 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1169 (imap.br_startblock == HOLESTARTBLOCK ||
1170 imap.br_startblock == DELAYSTARTBLOCK))) {
1172 error = xfs_iomap_write_direct(ip, offset, size,
1175 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1180 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1181 } else if (nimaps) {
1182 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1184 trace_xfs_get_blocks_notfound(ip, offset, size);
1187 xfs_iunlock(ip, lockmode);
1189 if (imap.br_startblock != HOLESTARTBLOCK &&
1190 imap.br_startblock != DELAYSTARTBLOCK) {
1192 * For unwritten extents do not report a disk address on
1193 * the read case (treat as if we're reading into a hole).
1195 if (create || !ISUNWRITTEN(&imap))
1196 xfs_map_buffer(inode, bh_result, &imap, offset);
1197 if (create && ISUNWRITTEN(&imap)) {
1199 bh_result->b_private = inode;
1200 set_buffer_unwritten(bh_result);
1205 * If this is a realtime file, data may be on a different device.
1206 * to that pointed to from the buffer_head b_bdev currently.
1208 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1211 * If we previously allocated a block out beyond eof and we are now
1212 * coming back to use it then we will need to flag it as new even if it
1213 * has a disk address.
1215 * With sub-block writes into unwritten extents we also need to mark
1216 * the buffer as new so that the unwritten parts of the buffer gets
1220 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1221 (offset >= i_size_read(inode)) ||
1222 (new || ISUNWRITTEN(&imap))))
1223 set_buffer_new(bh_result);
1225 if (imap.br_startblock == DELAYSTARTBLOCK) {
1228 set_buffer_uptodate(bh_result);
1229 set_buffer_mapped(bh_result);
1230 set_buffer_delay(bh_result);
1235 * If this is O_DIRECT or the mpage code calling tell them how large
1236 * the mapping is, so that we can avoid repeated get_blocks calls.
1238 if (direct || size > (1 << inode->i_blkbits)) {
1239 xfs_off_t mapping_size;
1241 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1242 mapping_size <<= inode->i_blkbits;
1244 ASSERT(mapping_size > 0);
1245 if (mapping_size > size)
1246 mapping_size = size;
1247 if (mapping_size > LONG_MAX)
1248 mapping_size = LONG_MAX;
1250 bh_result->b_size = mapping_size;
1256 xfs_iunlock(ip, lockmode);
1262 struct inode *inode,
1264 struct buffer_head *bh_result,
1267 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1271 xfs_get_blocks_direct(
1272 struct inode *inode,
1274 struct buffer_head *bh_result,
1277 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1281 * Complete a direct I/O write request.
1283 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1284 * need to issue a transaction to convert the range from unwritten to written
1285 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1286 * to do this and we are done. But in case this was a successful AIO
1287 * request this handler is called from interrupt context, from which we
1288 * can't start transactions. In that case offload the I/O completion to
1289 * the workqueues we also use for buffered I/O completion.
1292 xfs_end_io_direct_write(
1300 struct xfs_ioend *ioend = iocb->private;
1303 * While the generic direct I/O code updates the inode size, it does
1304 * so only after the end_io handler is called, which means our
1305 * end_io handler thinks the on-disk size is outside the in-core
1306 * size. To prevent this just update it a little bit earlier here.
1308 if (offset + size > i_size_read(ioend->io_inode))
1309 i_size_write(ioend->io_inode, offset + size);
1312 * blockdev_direct_IO can return an error even after the I/O
1313 * completion handler was called. Thus we need to protect
1314 * against double-freeing.
1316 iocb->private = NULL;
1318 ioend->io_offset = offset;
1319 ioend->io_size = size;
1320 ioend->io_iocb = iocb;
1321 ioend->io_result = ret;
1322 if (private && size > 0)
1323 ioend->io_type = IO_UNWRITTEN;
1326 ioend->io_isasync = 1;
1327 xfs_finish_ioend(ioend);
1329 xfs_finish_ioend_sync(ioend);
1337 const struct iovec *iov,
1339 unsigned long nr_segs)
1341 struct inode *inode = iocb->ki_filp->f_mapping->host;
1342 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1343 struct xfs_ioend *ioend = NULL;
1347 size_t size = iov_length(iov, nr_segs);
1350 * We need to preallocate a transaction for a size update
1351 * here. In the case that this write both updates the size
1352 * and converts at least on unwritten extent we will cancel
1353 * the still clean transaction after the I/O has finished.
1355 iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT);
1356 if (offset + size > XFS_I(inode)->i_d.di_size) {
1357 ret = xfs_setfilesize_trans_alloc(ioend);
1359 goto out_destroy_ioend;
1360 ioend->io_isdirect = 1;
1363 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1365 xfs_get_blocks_direct,
1366 xfs_end_io_direct_write, NULL, 0);
1367 if (ret != -EIOCBQUEUED && iocb->private)
1368 goto out_trans_cancel;
1370 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1372 xfs_get_blocks_direct,
1379 if (ioend->io_append_trans) {
1380 current_set_flags_nested(&ioend->io_append_trans->t_pflags,
1382 xfs_trans_cancel(ioend->io_append_trans, 0);
1385 xfs_destroy_ioend(ioend);
1390 xfs_vm_write_failed(
1391 struct address_space *mapping,
1394 struct inode *inode = mapping->host;
1396 if (to > inode->i_size) {
1398 * Punch out the delalloc blocks we have already allocated.
1400 * Don't bother with xfs_setattr given that nothing can have
1401 * made it to disk yet as the page is still locked at this
1404 struct xfs_inode *ip = XFS_I(inode);
1405 xfs_fileoff_t start_fsb;
1406 xfs_fileoff_t end_fsb;
1409 truncate_pagecache(inode, to, inode->i_size);
1412 * Check if there are any blocks that are outside of i_size
1413 * that need to be trimmed back.
1415 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1416 end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1417 if (end_fsb <= start_fsb)
1420 xfs_ilock(ip, XFS_ILOCK_EXCL);
1421 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1422 end_fsb - start_fsb);
1424 /* something screwed, just bail */
1425 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1426 xfs_alert(ip->i_mount,
1427 "xfs_vm_write_failed: unable to clean up ino %lld",
1431 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1438 struct address_space *mapping,
1442 struct page **pagep,
1447 ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
1448 pagep, xfs_get_blocks);
1450 xfs_vm_write_failed(mapping, pos + len);
1457 struct address_space *mapping,
1466 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1467 if (unlikely(ret < len))
1468 xfs_vm_write_failed(mapping, pos + len);
1474 struct address_space *mapping,
1477 struct inode *inode = (struct inode *)mapping->host;
1478 struct xfs_inode *ip = XFS_I(inode);
1480 trace_xfs_vm_bmap(XFS_I(inode));
1481 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1482 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1483 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1484 return generic_block_bmap(mapping, block, xfs_get_blocks);
1489 struct file *unused,
1492 return mpage_readpage(page, xfs_get_blocks);
1497 struct file *unused,
1498 struct address_space *mapping,
1499 struct list_head *pages,
1502 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1505 const struct address_space_operations xfs_address_space_operations = {
1506 .readpage = xfs_vm_readpage,
1507 .readpages = xfs_vm_readpages,
1508 .writepage = xfs_vm_writepage,
1509 .writepages = xfs_vm_writepages,
1510 .releasepage = xfs_vm_releasepage,
1511 .invalidatepage = xfs_vm_invalidatepage,
1512 .write_begin = xfs_vm_write_begin,
1513 .write_end = xfs_vm_write_end,
1514 .bmap = xfs_vm_bmap,
1515 .direct_IO = xfs_vm_direct_IO,
1516 .migratepage = buffer_migrate_page,
1517 .is_partially_uptodate = block_is_partially_uptodate,
1518 .error_remove_page = generic_error_remove_page,