]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/ext4/inode.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[karo-tx-linux.git] / fs / ext4 / inode.c
index 25f809dc45a367a8d9ceaf3c4fab2579fffd2fdb..6324f74e03424e9ac21ceb01f6fbee07caa546df 100644 (file)
@@ -233,6 +233,11 @@ void ext4_evict_inode(struct inode *inode)
        if (is_bad_inode(inode))
                goto no_delete;
 
+       /*
+        * Protect us against freezing - iput() caller didn't have to have any
+        * protection against it
+        */
+       sb_start_intwrite(inode->i_sb);
        handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
        if (IS_ERR(handle)) {
                ext4_std_error(inode->i_sb, PTR_ERR(handle));
@@ -242,6 +247,7 @@ void ext4_evict_inode(struct inode *inode)
                 * cleaned up.
                 */
                ext4_orphan_del(NULL, inode);
+               sb_end_intwrite(inode->i_sb);
                goto no_delete;
        }
 
@@ -273,6 +279,7 @@ void ext4_evict_inode(struct inode *inode)
                stop_handle:
                        ext4_journal_stop(handle);
                        ext4_orphan_del(NULL, inode);
+                       sb_end_intwrite(inode->i_sb);
                        goto no_delete;
                }
        }
@@ -301,6 +308,7 @@ void ext4_evict_inode(struct inode *inode)
        else
                ext4_free_inode(handle, inode);
        ext4_journal_stop(handle);
+       sb_end_intwrite(inode->i_sb);
        return;
 no_delete:
        ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
@@ -1182,6 +1190,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int md_needed;
        int ret;
+       ext4_lblk_t save_last_lblock;
+       int save_len;
+
+       /*
+        * We will charge metadata quota at writeout time; this saves
+        * us from metadata over-estimation, though we may go over by
+        * a small amount in the end.  Here we just reserve for data.
+        */
+       ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+       if (ret)
+               return ret;
 
        /*
         * recalculate the amount of metadata blocks to reserve
@@ -1190,32 +1209,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
         */
 repeat:
        spin_lock(&ei->i_block_reservation_lock);
+       /*
+        * ext4_calc_metadata_amount() has side effects, which we have
+        * to be prepared undo if we fail to claim space.
+        */
+       save_len = ei->i_da_metadata_calc_len;
+       save_last_lblock = ei->i_da_metadata_calc_last_lblock;
        md_needed = EXT4_NUM_B2C(sbi,
                                 ext4_calc_metadata_amount(inode, lblock));
        trace_ext4_da_reserve_space(inode, md_needed);
-       spin_unlock(&ei->i_block_reservation_lock);
 
-       /*
-        * We will charge metadata quota at writeout time; this saves
-        * us from metadata over-estimation, though we may go over by
-        * a small amount in the end.  Here we just reserve for data.
-        */
-       ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
-       if (ret)
-               return ret;
        /*
         * We do still charge estimated metadata to the sb though;
         * we cannot afford to run out of free blocks.
         */
        if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
-               dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+               ei->i_da_metadata_calc_len = save_len;
+               ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+               spin_unlock(&ei->i_block_reservation_lock);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
+               dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
                return -ENOSPC;
        }
-       spin_lock(&ei->i_block_reservation_lock);
        ei->i_reserved_data_blocks++;
        ei->i_reserved_meta_blocks += md_needed;
        spin_unlock(&ei->i_block_reservation_lock);
@@ -4769,11 +4787,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        get_block_t *get_block;
        int retries = 0;
 
-       /*
-        * This check is racy but catches the common case. We rely on
-        * __block_page_mkwrite() to do a reliable check.
-        */
-       vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+       sb_start_pagefault(inode->i_sb);
        /* Delalloc case is easy... */
        if (test_opt(inode->i_sb, DELALLOC) &&
            !ext4_should_journal_data(inode) &&
@@ -4841,5 +4855,6 @@ retry_alloc:
 out_ret:
        ret = block_page_mkwrite_return(ret);
 out:
+       sb_end_pagefault(inode->i_sb);
        return ret;
 }