2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_dinode.h"
28 #include "xfs_inode.h"
29 #include "xfs_alloc.h"
30 #include "xfs_error.h"
32 #include "xfs_iomap.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_trace.h"
36 #include <linux/gfp.h>
37 #include <linux/mpage.h>
38 #include <linux/pagevec.h>
39 #include <linux/writeback.h>
43 * Prime number of hash buckets since address is used as the key.
46 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
47 static wait_queue_head_t xfs_ioend_wq[NVSYNC];
54 for (i = 0; i < NVSYNC; i++)
55 init_waitqueue_head(&xfs_ioend_wq[i]);
62 wait_queue_head_t *wq = to_ioend_wq(ip);
64 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
71 if (atomic_dec_and_test(&ip->i_iocount))
72 wake_up(to_ioend_wq(ip));
81 struct buffer_head *bh, *head;
83 *delalloc = *unwritten = 0;
85 bh = head = page_buffers(page);
87 if (buffer_unwritten(bh))
89 else if (buffer_delay(bh))
91 } while ((bh = bh->b_this_page) != head);
94 STATIC struct block_device *
95 xfs_find_bdev_for_inode(
98 struct xfs_inode *ip = XFS_I(inode);
99 struct xfs_mount *mp = ip->i_mount;
101 if (XFS_IS_REALTIME_INODE(ip))
102 return mp->m_rtdev_targp->bt_bdev;
104 return mp->m_ddev_targp->bt_bdev;
108 * We're now finished for good with this ioend structure.
109 * Update the page state via the associated buffer_heads,
110 * release holds on the inode and bio, and finally free
111 * up memory. Do not use the ioend after this.
117 struct buffer_head *bh, *next;
118 struct xfs_inode *ip = XFS_I(ioend->io_inode);
120 for (bh = ioend->io_buffer_head; bh; bh = next) {
121 next = bh->b_private;
122 bh->b_end_io(bh, !ioend->io_error);
126 mempool_free(ioend, xfs_ioend_pool);
130 * If the end of the current ioend is beyond the current EOF,
131 * return the new EOF value, otherwise zero.
137 xfs_inode_t *ip = XFS_I(ioend->io_inode);
141 bsize = ioend->io_offset + ioend->io_size;
142 isize = MAX(ip->i_size, ip->i_new_size);
143 isize = MIN(isize, bsize);
144 return isize > ip->i_d.di_size ? isize : 0;
148 * Update on-disk file size now that data has been written to disk. The
149 * current in-memory file size is i_size. If a write is beyond eof i_new_size
150 * will be the intended file size until i_size is updated. If this write does
151 * not extend all the way to the valid file size then restrict this update to
152 * the end of the write.
154 * This function does not block as blocking on the inode lock in IO completion
155 * can lead to IO completion order dependency deadlocks.. If it can't get the
156 * inode ilock it will return EAGAIN. Callers must handle this.
162 xfs_inode_t *ip = XFS_I(ioend->io_inode);
165 if (unlikely(ioend->io_error))
168 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
171 isize = xfs_ioend_new_eof(ioend);
173 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
174 ip->i_d.di_size = isize;
175 xfs_mark_inode_dirty(ip);
178 xfs_iunlock(ip, XFS_ILOCK_EXCL);
183 * Schedule IO completion handling on the final put of an ioend.
187 struct xfs_ioend *ioend)
189 if (atomic_dec_and_test(&ioend->io_remaining)) {
190 if (ioend->io_type == IO_UNWRITTEN)
191 queue_work(xfsconvertd_workqueue, &ioend->io_work);
193 queue_work(xfsdatad_workqueue, &ioend->io_work);
198 * IO write completion.
202 struct work_struct *work)
204 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
205 struct xfs_inode *ip = XFS_I(ioend->io_inode);
209 * For unwritten extents we need to issue transactions to convert a
210 * range to normal written extens after the data I/O has finished.
212 if (ioend->io_type == IO_UNWRITTEN &&
213 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
215 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
218 ioend->io_error = error;
222 * We might have to update the on-disk file size after extending
225 error = xfs_setfilesize(ioend);
226 ASSERT(!error || error == EAGAIN);
229 * If we didn't complete processing of the ioend, requeue it to the
230 * tail of the workqueue for another attempt later. Otherwise destroy
233 if (error == EAGAIN) {
234 atomic_inc(&ioend->io_remaining);
235 xfs_finish_ioend(ioend);
236 /* ensure we don't spin on blocked ioends */
240 aio_complete(ioend->io_iocb, ioend->io_result, 0);
241 xfs_destroy_ioend(ioend);
246 * Call IO completion handling in caller context on the final put of an ioend.
249 xfs_finish_ioend_sync(
250 struct xfs_ioend *ioend)
252 if (atomic_dec_and_test(&ioend->io_remaining))
253 xfs_end_io(&ioend->io_work);
257 * Allocate and initialise an IO completion structure.
258 * We need to track unwritten extent write completion here initially.
259 * We'll need to extend this for updating the ondisk inode size later
269 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
272 * Set the count to 1 initially, which will prevent an I/O
273 * completion callback from happening before we have started
274 * all the I/O from calling the completion routine too early.
276 atomic_set(&ioend->io_remaining, 1);
278 ioend->io_list = NULL;
279 ioend->io_type = type;
280 ioend->io_inode = inode;
281 ioend->io_buffer_head = NULL;
282 ioend->io_buffer_tail = NULL;
283 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
284 ioend->io_offset = 0;
286 ioend->io_iocb = NULL;
287 ioend->io_result = 0;
289 INIT_WORK(&ioend->io_work, xfs_end_io);
297 struct xfs_bmbt_irec *imap,
301 struct xfs_inode *ip = XFS_I(inode);
302 struct xfs_mount *mp = ip->i_mount;
303 ssize_t count = 1 << inode->i_blkbits;
304 xfs_fileoff_t offset_fsb, end_fsb;
306 int bmapi_flags = XFS_BMAPI_ENTIRE;
309 if (XFS_FORCED_SHUTDOWN(mp))
310 return -XFS_ERROR(EIO);
312 if (type == IO_UNWRITTEN)
313 bmapi_flags |= XFS_BMAPI_IGSTATE;
315 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
317 return -XFS_ERROR(EAGAIN);
318 xfs_ilock(ip, XFS_ILOCK_SHARED);
321 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
322 (ip->i_df.if_flags & XFS_IFEXTENTS));
323 ASSERT(offset <= mp->m_maxioffset);
325 if (offset + count > mp->m_maxioffset)
326 count = mp->m_maxioffset - offset;
327 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
328 offset_fsb = XFS_B_TO_FSBT(mp, offset);
329 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
330 bmapi_flags, NULL, 0, imap, &nimaps, NULL);
331 xfs_iunlock(ip, XFS_ILOCK_SHARED);
334 return -XFS_ERROR(error);
336 if (type == IO_DELALLOC &&
337 (!nimaps || isnullstartblock(imap->br_startblock))) {
338 error = xfs_iomap_write_allocate(ip, offset, count, imap);
340 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
341 return -XFS_ERROR(error);
345 if (type == IO_UNWRITTEN) {
347 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
348 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
352 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
359 struct xfs_bmbt_irec *imap,
362 offset >>= inode->i_blkbits;
364 return offset >= imap->br_startoff &&
365 offset < imap->br_startoff + imap->br_blockcount;
369 * BIO completion handler for buffered IO.
376 xfs_ioend_t *ioend = bio->bi_private;
378 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
379 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
381 /* Toss bio and pass work off to an xfsdatad thread */
382 bio->bi_private = NULL;
383 bio->bi_end_io = NULL;
386 xfs_finish_ioend(ioend);
390 xfs_submit_ioend_bio(
391 struct writeback_control *wbc,
395 atomic_inc(&ioend->io_remaining);
396 bio->bi_private = ioend;
397 bio->bi_end_io = xfs_end_bio;
400 * If the I/O is beyond EOF we mark the inode dirty immediately
401 * but don't update the inode size until I/O completion.
403 if (xfs_ioend_new_eof(ioend))
404 xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
406 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
411 struct buffer_head *bh)
413 int nvecs = bio_get_nr_vecs(bh->b_bdev);
414 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
416 ASSERT(bio->bi_private == NULL);
417 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
418 bio->bi_bdev = bh->b_bdev;
423 xfs_start_buffer_writeback(
424 struct buffer_head *bh)
426 ASSERT(buffer_mapped(bh));
427 ASSERT(buffer_locked(bh));
428 ASSERT(!buffer_delay(bh));
429 ASSERT(!buffer_unwritten(bh));
431 mark_buffer_async_write(bh);
432 set_buffer_uptodate(bh);
433 clear_buffer_dirty(bh);
437 xfs_start_page_writeback(
442 ASSERT(PageLocked(page));
443 ASSERT(!PageWriteback(page));
445 clear_page_dirty_for_io(page);
446 set_page_writeback(page);
448 /* If no buffers on the page are to be written, finish it here */
450 end_page_writeback(page);
453 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
455 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
459 * Submit all of the bios for all of the ioends we have saved up, covering the
460 * initial writepage page and also any probed pages.
462 * Because we may have multiple ioends spanning a page, we need to start
463 * writeback on all the buffers before we submit them for I/O. If we mark the
464 * buffers as we got, then we can end up with a page that only has buffers
465 * marked async write and I/O complete on can occur before we mark the other
466 * buffers async write.
468 * The end result of this is that we trip a bug in end_page_writeback() because
469 * we call it twice for the one page as the code in end_buffer_async_write()
470 * assumes that all buffers on the page are started at the same time.
472 * The fix is two passes across the ioend list - one to start writeback on the
473 * buffer_heads, and then submit them for I/O on the second pass.
477 struct writeback_control *wbc,
480 xfs_ioend_t *head = ioend;
482 struct buffer_head *bh;
484 sector_t lastblock = 0;
486 /* Pass 1 - start writeback */
488 next = ioend->io_list;
489 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
490 xfs_start_buffer_writeback(bh);
491 } while ((ioend = next) != NULL);
493 /* Pass 2 - submit I/O */
496 next = ioend->io_list;
499 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
503 bio = xfs_alloc_ioend_bio(bh);
504 } else if (bh->b_blocknr != lastblock + 1) {
505 xfs_submit_ioend_bio(wbc, ioend, bio);
509 if (bio_add_buffer(bio, bh) != bh->b_size) {
510 xfs_submit_ioend_bio(wbc, ioend, bio);
514 lastblock = bh->b_blocknr;
517 xfs_submit_ioend_bio(wbc, ioend, bio);
518 xfs_finish_ioend(ioend);
519 } while ((ioend = next) != NULL);
523 * Cancel submission of all buffer_heads so far in this endio.
524 * Toss the endio too. Only ever called for the initial page
525 * in a writepage request, so only ever one page.
532 struct buffer_head *bh, *next_bh;
535 next = ioend->io_list;
536 bh = ioend->io_buffer_head;
538 next_bh = bh->b_private;
539 clear_buffer_async_write(bh);
541 } while ((bh = next_bh) != NULL);
543 xfs_ioend_wake(XFS_I(ioend->io_inode));
544 mempool_free(ioend, xfs_ioend_pool);
545 } while ((ioend = next) != NULL);
549 * Test to see if we've been building up a completion structure for
550 * earlier buffers -- if so, we try to append to this ioend if we
551 * can, otherwise we finish off any current ioend and start another.
552 * Return true if we've finished the given ioend.
557 struct buffer_head *bh,
560 xfs_ioend_t **result,
563 xfs_ioend_t *ioend = *result;
565 if (!ioend || need_ioend || type != ioend->io_type) {
566 xfs_ioend_t *previous = *result;
568 ioend = xfs_alloc_ioend(inode, type);
569 ioend->io_offset = offset;
570 ioend->io_buffer_head = bh;
571 ioend->io_buffer_tail = bh;
573 previous->io_list = ioend;
576 ioend->io_buffer_tail->b_private = bh;
577 ioend->io_buffer_tail = bh;
580 bh->b_private = NULL;
581 ioend->io_size += bh->b_size;
587 struct buffer_head *bh,
588 struct xfs_bmbt_irec *imap,
592 struct xfs_mount *m = XFS_I(inode)->i_mount;
593 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
594 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
596 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
597 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
599 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
600 ((offset - iomap_offset) >> inode->i_blkbits);
602 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
605 set_buffer_mapped(bh);
611 struct buffer_head *bh,
612 struct xfs_bmbt_irec *imap,
615 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
616 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
618 xfs_map_buffer(inode, bh, imap, offset);
619 set_buffer_mapped(bh);
620 clear_buffer_delay(bh);
621 clear_buffer_unwritten(bh);
625 * Test if a given page is suitable for writing as part of an unwritten
626 * or delayed allocate extent.
633 if (PageWriteback(page))
636 if (page->mapping && page_has_buffers(page)) {
637 struct buffer_head *bh, *head;
640 bh = head = page_buffers(page);
642 if (buffer_unwritten(bh))
643 acceptable = (type == IO_UNWRITTEN);
644 else if (buffer_delay(bh))
645 acceptable = (type == IO_DELALLOC);
646 else if (buffer_dirty(bh) && buffer_mapped(bh))
647 acceptable = (type == IO_OVERWRITE);
650 } while ((bh = bh->b_this_page) != head);
660 * Allocate & map buffers for page given the extent map. Write it out.
661 * except for the original page of a writepage, this is called on
662 * delalloc/unwritten pages only, for the original page it is possible
663 * that the page has no mapping at all.
670 struct xfs_bmbt_irec *imap,
671 xfs_ioend_t **ioendp,
672 struct writeback_control *wbc)
674 struct buffer_head *bh, *head;
675 xfs_off_t end_offset;
676 unsigned long p_offset;
679 int count = 0, done = 0, uptodate = 1;
680 xfs_off_t offset = page_offset(page);
682 if (page->index != tindex)
684 if (!trylock_page(page))
686 if (PageWriteback(page))
687 goto fail_unlock_page;
688 if (page->mapping != inode->i_mapping)
689 goto fail_unlock_page;
690 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
691 goto fail_unlock_page;
694 * page_dirty is initially a count of buffers on the page before
695 * EOF and is decremented as we move each into a cleanable state.
699 * End offset is the highest offset that this page should represent.
700 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
701 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
702 * hence give us the correct page_dirty count. On any other page,
703 * it will be zero and in that case we need page_dirty to be the
704 * count of buffers on the page.
706 end_offset = min_t(unsigned long long,
707 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
710 len = 1 << inode->i_blkbits;
711 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
713 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
714 page_dirty = p_offset / len;
716 bh = head = page_buffers(page);
718 if (offset >= end_offset)
720 if (!buffer_uptodate(bh))
722 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
727 if (buffer_unwritten(bh) || buffer_delay(bh) ||
729 if (buffer_unwritten(bh))
731 else if (buffer_delay(bh))
736 if (!xfs_imap_valid(inode, imap, offset)) {
742 if (type != IO_OVERWRITE)
743 xfs_map_at_offset(inode, bh, imap, offset);
744 xfs_add_to_ioend(inode, bh, offset, type,
752 } while (offset += len, (bh = bh->b_this_page) != head);
754 if (uptodate && bh == head)
755 SetPageUptodate(page);
758 if (--wbc->nr_to_write <= 0 &&
759 wbc->sync_mode == WB_SYNC_NONE)
762 xfs_start_page_writeback(page, !page_dirty, count);
772 * Convert & write out a cluster of pages in the same extent as defined
773 * by mp and following the start page.
779 struct xfs_bmbt_irec *imap,
780 xfs_ioend_t **ioendp,
781 struct writeback_control *wbc,
787 pagevec_init(&pvec, 0);
788 while (!done && tindex <= tlast) {
789 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
791 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
794 for (i = 0; i < pagevec_count(&pvec); i++) {
795 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
801 pagevec_release(&pvec);
807 xfs_vm_invalidatepage(
809 unsigned long offset)
811 trace_xfs_invalidatepage(page->mapping->host, page, offset);
812 block_invalidatepage(page, offset);
816 * If the page has delalloc buffers on it, we need to punch them out before we
817 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
818 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
819 * is done on that same region - the delalloc extent is returned when none is
820 * supposed to be there.
822 * We prevent this by truncating away the delalloc regions on the page before
823 * invalidating it. Because they are delalloc, we can do this without needing a
824 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
825 * truncation without a transaction as there is no space left for block
826 * reservation (typically why we see a ENOSPC in writeback).
828 * This is not a performance critical path, so for now just do the punching a
829 * buffer head at a time.
832 xfs_aops_discard_page(
835 struct inode *inode = page->mapping->host;
836 struct xfs_inode *ip = XFS_I(inode);
837 struct buffer_head *bh, *head;
838 loff_t offset = page_offset(page);
840 if (!xfs_is_delayed_page(page, IO_DELALLOC))
843 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
846 xfs_alert(ip->i_mount,
847 "page discard on page %p, inode 0x%llx, offset %llu.",
848 page, ip->i_ino, offset);
850 xfs_ilock(ip, XFS_ILOCK_EXCL);
851 bh = head = page_buffers(page);
854 xfs_fileoff_t start_fsb;
856 if (!buffer_delay(bh))
859 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
860 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
862 /* something screwed, just bail */
863 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
864 xfs_alert(ip->i_mount,
865 "page discard unable to remove delalloc mapping.");
870 offset += 1 << inode->i_blkbits;
872 } while ((bh = bh->b_this_page) != head);
874 xfs_iunlock(ip, XFS_ILOCK_EXCL);
876 xfs_vm_invalidatepage(page, 0);
881 * Write out a dirty page.
883 * For delalloc space on the page we need to allocate space and flush it.
884 * For unwritten space on the page we need to start the conversion to
885 * regular allocated space.
886 * For any other dirty buffer heads on the page we should flush them.
891 struct writeback_control *wbc)
893 struct inode *inode = page->mapping->host;
894 struct buffer_head *bh, *head;
895 struct xfs_bmbt_irec imap;
896 xfs_ioend_t *ioend = NULL, *iohead = NULL;
899 __uint64_t end_offset;
900 pgoff_t end_index, last_index;
902 int err, imap_valid = 0, uptodate = 1;
906 trace_xfs_writepage(inode, page, 0);
908 ASSERT(page_has_buffers(page));
911 * Refuse to write the page out if we are called from reclaim context.
913 * This avoids stack overflows when called from deeply used stacks in
914 * random callers for direct reclaim or memcg reclaim. We explicitly
915 * allow reclaim from kswapd as the stack usage there is relatively low.
917 * This should really be done by the core VM, but until that happens
918 * filesystems like XFS, btrfs and ext4 have to take care of this
921 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
925 * Given that we do not allow direct reclaim to call us, we should
926 * never be called while in a filesystem transaction.
928 if (WARN_ON(current->flags & PF_FSTRANS))
931 /* Is this page beyond the end of the file? */
932 offset = i_size_read(inode);
933 end_index = offset >> PAGE_CACHE_SHIFT;
934 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
935 if (page->index >= end_index) {
936 if ((page->index >= end_index + 1) ||
937 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
943 end_offset = min_t(unsigned long long,
944 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
946 len = 1 << inode->i_blkbits;
948 bh = head = page_buffers(page);
949 offset = page_offset(page);
952 if (wbc->sync_mode == WB_SYNC_NONE)
958 if (offset >= end_offset)
960 if (!buffer_uptodate(bh))
964 * set_page_dirty dirties all buffers in a page, independent
965 * of their state. The dirty state however is entirely
966 * meaningless for holes (!mapped && uptodate), so skip
967 * buffers covering holes here.
969 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
974 if (buffer_unwritten(bh)) {
975 if (type != IO_UNWRITTEN) {
979 } else if (buffer_delay(bh)) {
980 if (type != IO_DELALLOC) {
984 } else if (buffer_uptodate(bh)) {
985 if (type != IO_OVERWRITE) {
990 if (PageUptodate(page)) {
991 ASSERT(buffer_mapped(bh));
998 imap_valid = xfs_imap_valid(inode, &imap, offset);
1001 * If we didn't have a valid mapping then we need to
1002 * put the new mapping into a separate ioend structure.
1003 * This ensures non-contiguous extents always have
1004 * separate ioends, which is particularly important
1005 * for unwritten extent conversion at I/O completion
1009 err = xfs_map_blocks(inode, offset, &imap, type,
1013 imap_valid = xfs_imap_valid(inode, &imap, offset);
1017 if (type != IO_OVERWRITE)
1018 xfs_map_at_offset(inode, bh, &imap, offset);
1019 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1027 } while (offset += len, ((bh = bh->b_this_page) != head));
1029 if (uptodate && bh == head)
1030 SetPageUptodate(page);
1032 xfs_start_page_writeback(page, 1, count);
1034 if (ioend && imap_valid) {
1035 xfs_off_t end_index;
1037 end_index = imap.br_startoff + imap.br_blockcount;
1040 end_index <<= inode->i_blkbits;
1043 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1045 /* check against file size */
1046 if (end_index > last_index)
1047 end_index = last_index;
1049 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1054 xfs_submit_ioend(wbc, iohead);
1060 xfs_cancel_ioend(iohead);
1065 xfs_aops_discard_page(page);
1066 ClearPageUptodate(page);
1071 redirty_page_for_writepage(wbc, page);
1078 struct address_space *mapping,
1079 struct writeback_control *wbc)
1081 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1082 return generic_writepages(mapping, wbc);
1086 * Called to move a page into cleanable state - and from there
1087 * to be released. The page should already be clean. We always
1088 * have buffer heads in this call.
1090 * Returns 1 if the page is ok to release, 0 otherwise.
1097 int delalloc, unwritten;
1099 trace_xfs_releasepage(page->mapping->host, page, 0);
1101 xfs_count_page_state(page, &delalloc, &unwritten);
1103 if (WARN_ON(delalloc))
1105 if (WARN_ON(unwritten))
1108 return try_to_free_buffers(page);
1113 struct inode *inode,
1115 struct buffer_head *bh_result,
1119 struct xfs_inode *ip = XFS_I(inode);
1120 struct xfs_mount *mp = ip->i_mount;
1121 xfs_fileoff_t offset_fsb, end_fsb;
1124 struct xfs_bmbt_irec imap;
1130 if (XFS_FORCED_SHUTDOWN(mp))
1131 return -XFS_ERROR(EIO);
1133 offset = (xfs_off_t)iblock << inode->i_blkbits;
1134 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1135 size = bh_result->b_size;
1137 if (!create && direct && offset >= i_size_read(inode))
1141 lockmode = XFS_ILOCK_EXCL;
1142 xfs_ilock(ip, lockmode);
1144 lockmode = xfs_ilock_map_shared(ip);
1147 ASSERT(offset <= mp->m_maxioffset);
1148 if (offset + size > mp->m_maxioffset)
1149 size = mp->m_maxioffset - offset;
1150 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1151 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1153 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
1154 XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL);
1160 (imap.br_startblock == HOLESTARTBLOCK ||
1161 imap.br_startblock == DELAYSTARTBLOCK))) {
1163 error = xfs_iomap_write_direct(ip, offset, size,
1166 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1171 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1172 } else if (nimaps) {
1173 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1175 trace_xfs_get_blocks_notfound(ip, offset, size);
1178 xfs_iunlock(ip, lockmode);
1180 if (imap.br_startblock != HOLESTARTBLOCK &&
1181 imap.br_startblock != DELAYSTARTBLOCK) {
1183 * For unwritten extents do not report a disk address on
1184 * the read case (treat as if we're reading into a hole).
1186 if (create || !ISUNWRITTEN(&imap))
1187 xfs_map_buffer(inode, bh_result, &imap, offset);
1188 if (create && ISUNWRITTEN(&imap)) {
1190 bh_result->b_private = inode;
1191 set_buffer_unwritten(bh_result);
1196 * If this is a realtime file, data may be on a different device.
1197 * to that pointed to from the buffer_head b_bdev currently.
1199 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1202 * If we previously allocated a block out beyond eof and we are now
1203 * coming back to use it then we will need to flag it as new even if it
1204 * has a disk address.
1206 * With sub-block writes into unwritten extents we also need to mark
1207 * the buffer as new so that the unwritten parts of the buffer gets
1211 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1212 (offset >= i_size_read(inode)) ||
1213 (new || ISUNWRITTEN(&imap))))
1214 set_buffer_new(bh_result);
1216 if (imap.br_startblock == DELAYSTARTBLOCK) {
1219 set_buffer_uptodate(bh_result);
1220 set_buffer_mapped(bh_result);
1221 set_buffer_delay(bh_result);
1226 * If this is O_DIRECT or the mpage code calling tell them how large
1227 * the mapping is, so that we can avoid repeated get_blocks calls.
1229 if (direct || size > (1 << inode->i_blkbits)) {
1230 xfs_off_t mapping_size;
1232 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1233 mapping_size <<= inode->i_blkbits;
1235 ASSERT(mapping_size > 0);
1236 if (mapping_size > size)
1237 mapping_size = size;
1238 if (mapping_size > LONG_MAX)
1239 mapping_size = LONG_MAX;
1241 bh_result->b_size = mapping_size;
1247 xfs_iunlock(ip, lockmode);
1253 struct inode *inode,
1255 struct buffer_head *bh_result,
1258 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1262 xfs_get_blocks_direct(
1263 struct inode *inode,
1265 struct buffer_head *bh_result,
1268 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1272 * Complete a direct I/O write request.
1274 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1275 * need to issue a transaction to convert the range from unwritten to written
1276 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1277 * to do this and we are done. But in case this was a successful AIO
1278 * request this handler is called from interrupt context, from which we
1279 * can't start transactions. In that case offload the I/O completion to
1280 * the workqueues we also use for buffered I/O completion.
1283 xfs_end_io_direct_write(
1291 struct xfs_ioend *ioend = iocb->private;
1292 struct inode *inode = ioend->io_inode;
1295 * blockdev_direct_IO can return an error even after the I/O
1296 * completion handler was called. Thus we need to protect
1297 * against double-freeing.
1299 iocb->private = NULL;
1301 ioend->io_offset = offset;
1302 ioend->io_size = size;
1303 if (private && size > 0)
1304 ioend->io_type = IO_UNWRITTEN;
1308 * If we are converting an unwritten extent we need to delay
1309 * the AIO completion until after the unwrittent extent
1310 * conversion has completed, otherwise do it ASAP.
1312 if (ioend->io_type == IO_UNWRITTEN) {
1313 ioend->io_iocb = iocb;
1314 ioend->io_result = ret;
1316 aio_complete(iocb, ret, 0);
1318 xfs_finish_ioend(ioend);
1320 xfs_finish_ioend_sync(ioend);
1323 /* XXX: probably should move into the real I/O completion handler */
1324 inode_dio_done(inode);
1331 const struct iovec *iov,
1333 unsigned long nr_segs)
1335 struct inode *inode = iocb->ki_filp->f_mapping->host;
1336 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1340 iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
1342 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1344 xfs_get_blocks_direct,
1345 xfs_end_io_direct_write, NULL, 0);
1346 if (ret != -EIOCBQUEUED && iocb->private)
1347 xfs_destroy_ioend(iocb->private);
1349 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1351 xfs_get_blocks_direct,
1359 xfs_vm_write_failed(
1360 struct address_space *mapping,
1363 struct inode *inode = mapping->host;
1365 if (to > inode->i_size) {
1367 * punch out the delalloc blocks we have already allocated. We
1368 * don't call xfs_setattr() to do this as we may be in the
1369 * middle of a multi-iovec write and so the vfs inode->i_size
1370 * will not match the xfs ip->i_size and so it will zero too
1371 * much. Hence we jus truncate the page cache to zero what is
1372 * necessary and punch the delalloc blocks directly.
1374 struct xfs_inode *ip = XFS_I(inode);
1375 xfs_fileoff_t start_fsb;
1376 xfs_fileoff_t end_fsb;
1379 truncate_pagecache(inode, to, inode->i_size);
1382 * Check if there are any blocks that are outside of i_size
1383 * that need to be trimmed back.
1385 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1386 end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1387 if (end_fsb <= start_fsb)
1390 xfs_ilock(ip, XFS_ILOCK_EXCL);
1391 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1392 end_fsb - start_fsb);
1394 /* something screwed, just bail */
1395 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1396 xfs_alert(ip->i_mount,
1397 "xfs_vm_write_failed: unable to clean up ino %lld",
1401 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1408 struct address_space *mapping,
1412 struct page **pagep,
1417 ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
1418 pagep, xfs_get_blocks);
1420 xfs_vm_write_failed(mapping, pos + len);
1427 struct address_space *mapping,
1436 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1437 if (unlikely(ret < len))
1438 xfs_vm_write_failed(mapping, pos + len);
1444 struct address_space *mapping,
1447 struct inode *inode = (struct inode *)mapping->host;
1448 struct xfs_inode *ip = XFS_I(inode);
1450 trace_xfs_vm_bmap(XFS_I(inode));
1451 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1452 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1453 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1454 return generic_block_bmap(mapping, block, xfs_get_blocks);
1459 struct file *unused,
1462 return mpage_readpage(page, xfs_get_blocks);
1467 struct file *unused,
1468 struct address_space *mapping,
1469 struct list_head *pages,
1472 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1475 const struct address_space_operations xfs_address_space_operations = {
1476 .readpage = xfs_vm_readpage,
1477 .readpages = xfs_vm_readpages,
1478 .writepage = xfs_vm_writepage,
1479 .writepages = xfs_vm_writepages,
1480 .releasepage = xfs_vm_releasepage,
1481 .invalidatepage = xfs_vm_invalidatepage,
1482 .write_begin = xfs_vm_write_begin,
1483 .write_end = xfs_vm_write_end,
1484 .bmap = xfs_vm_bmap,
1485 .direct_IO = xfs_vm_direct_IO,
1486 .migratepage = buffer_migrate_page,
1487 .is_partially_uptodate = block_is_partially_uptodate,
1488 .error_remove_page = generic_error_remove_page,