* guaranteed). After mapping we submit all mapped pages for IO.
*/
static int mpage_map_and_submit_extent(handle_t *handle,
- struct mpage_da_data *mpd)
+ struct mpage_da_data *mpd,
+ bool *give_up_on_write)
{
struct inode *inode = mpd->inode;
struct ext4_map_blocks *map = &mpd->map;
if (err < 0) {
struct super_block *sb = inode->i_sb;
+ if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) {
+ /* invalidate all the pages */
+ *give_up_on_write = true;
+ return err;
+ }
+ if ((err != -ENOSPC) || ext4_count_free_clusters(sb))
+ return err;
/*
* Need to commit transaction to free blocks. Let upper
* layers sort it out.
*/
- if (err == -ENOSPC && ext4_count_free_clusters(sb))
- return -ENOSPC;
-
- if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
- ext4_msg(sb, KERN_CRIT,
- "Delayed block allocation failed for "
- "inode %lu at logical offset %llu with"
- " max blocks %u with error %d",
- inode->i_ino,
- (unsigned long long)map->m_lblk,
- (unsigned)map->m_len, err);
- ext4_msg(sb, KERN_CRIT,
- "This should not happen!! Data will "
- "be lost\n");
- if (err == -ENOSPC)
- ext4_print_free_blocks(inode);
- }
- /* invalidate all the pages */
- mpage_release_unused_pages(mpd, true);
+ ext4_msg(sb, KERN_CRIT,
+ "Delayed block allocation failed for inode %lu at "
+ "logical offset %llu with max blocks %u",
+ inode->i_ino, (unsigned long long)map->m_lblk,
+ (unsigned)map->m_len);
+ ext4_msg(sb, KERN_CRIT,
+ "This should not happen!! Data will be lost\n");
+ ext4_print_free_blocks(inode);
+ *give_up_on_write = true; /* invalidate all the pages */
return err;
}
/*
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
bool done;
struct blk_plug plug;
+ bool give_up_on_write = false;
trace_ext4_writepages(inode, wbc);
ret = mpage_prepare_extent_to_map(&mpd);
if (!ret) {
if (mpd.map.m_len)
- ret = mpage_map_and_submit_extent(handle, &mpd);
+ ret = mpage_map_and_submit_extent(handle, &mpd,
+ &give_up_on_write);
else {
/*
* We scanned the whole range (or exhausted
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
/* Unlock pages we didn't use */
- mpage_release_unused_pages(&mpd, false);
+ mpage_release_unused_pages(&mpd, give_up_on_write);
/* Drop our io_end reference we got from init */
ext4_put_io_end(mpd.io_submit.io_end);