]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: prevent concurrent unmap_mapping_range() on the same inode
authorMiklos Szeredi <mszeredi@suse.cz>
Wed, 23 Feb 2011 12:49:47 +0000 (13:49 +0100)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 13 Jul 2011 03:31:27 +0000 (05:31 +0200)
commit 2aa15890f3c191326678f1bd68af61ec6b8753ec upstream.

Michael Leun reported that running parallel opens on a fuse filesystem
can trigger a "kernel BUG at mm/truncate.c:475"

Gurudas Pai reported the same bug on NFS.

The reason is, unmap_mapping_range() is not prepared for more than
one concurrent invocation per inode.  For example:

  thread1: going through a big range, stops in the middle of a vma and
     stores the restart address in vm_truncate_count.

  thread2: comes in with a small (e.g. single page) unmap request on
     the same vma, somewhere before restart_address, finds that the
     vma was already unmapped up to the restart address and happily
     returns without doing anything.

Another scenario would be two big unmap requests, both having to
restart the unmapping and each one setting vm_truncate_count to its
own value.  This could go on forever without any of them being able to
finish.

Truncate and hole punching already serialize with i_mutex.  Other
callers of unmap_mapping_range() do not, and it's difficult to get
i_mutex protection for all callers.  In particular ->d_revalidate(),
which calls invalidate_inode_pages2_range() in fuse, may be called
with or without i_mutex.

This patch adds a new mutex to 'struct address_space' to prevent
running multiple concurrent unmap_mapping_range() on the same mapping.

[ We'll hopefully get rid of all this with the upcoming mm
  preemptibility series by Peter Zijlstra, the "mm: Remove i_mmap_mutex
  lockbreak" patch in particular.  But that is for 2.6.39 ]

Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Reported-by: Michael Leun <lkml20101129@newton.leun.net>
Reported-by: Gurudas Pai <gurudas.pai@oracle.com>
Tested-by: Gurudas Pai <gurudas.pai@oracle.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
fs/inode.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/super.c
include/linux/fs.h
mm/memory.c

index 03dfeb2e39287a75b2fccbe6c71d1451ebade3a0..4405e8804599a5c28ebc5fada60411e7e49cf093 100644 (file)
@@ -246,6 +246,20 @@ void destroy_inode(struct inode *inode)
                kmem_cache_free(inode_cachep, (inode));
 }
 
+void address_space_init_once(struct address_space *mapping)
+{
+       memset(mapping, 0, sizeof(*mapping));
+       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+       spin_lock_init(&mapping->tree_lock);
+       spin_lock_init(&mapping->i_mmap_lock);
+       INIT_LIST_HEAD(&mapping->private_list);
+       spin_lock_init(&mapping->private_lock);
+       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       mutex_init(&mapping->unmap_mutex);
+}
+EXPORT_SYMBOL(address_space_init_once);
+
 /*
  * These are initializations that only need to be done
  * once, because the fields are idempotent across use
@@ -257,13 +271,7 @@ void inode_init_once(struct inode *inode)
        INIT_HLIST_NODE(&inode->i_hash);
        INIT_LIST_HEAD(&inode->i_dentry);
        INIT_LIST_HEAD(&inode->i_devices);
-       INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-       spin_lock_init(&inode->i_data.tree_lock);
-       spin_lock_init(&inode->i_data.i_mmap_lock);
-       INIT_LIST_HEAD(&inode->i_data.private_list);
-       spin_lock_init(&inode->i_data.private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
-       INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+       address_space_init_once(&inode->i_data);
        i_size_ordered_init(inode);
 #ifdef CONFIG_INOTIFY
        INIT_LIST_HEAD(&inode->inotify_watches);
index 471e269536ae1f24e07d84c9d412abcd5068d17d..3c18687e8c74f5a3b76344193720419d6863f71d 100644 (file)
 #include "btnode.h"
 
 
-void nilfs_btnode_cache_init_once(struct address_space *btnc)
-{
-       memset(btnc, 0, sizeof(*btnc));
-       INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
-       spin_lock_init(&btnc->tree_lock);
-       INIT_LIST_HEAD(&btnc->private_list);
-       spin_lock_init(&btnc->private_lock);
-
-       spin_lock_init(&btnc->i_mmap_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap);
-       INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
-}
-
 static const struct address_space_operations def_btnode_aops = {
        .sync_page              = block_sync_page,
 };
index 07da83f07712a9caa7afd6a673bc070783d20795..fa2f1e68f4d17f3d720ecb0a0b5818915712ddc0 100644 (file)
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
-void nilfs_btnode_cache_init_once(struct address_space *);
 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
index 4d3ddcca305a0cf7688f905ed2f830cfa4db76d9..84dffd85b4c69dc0fd9525fe5019fbf20ce3e32b 100644 (file)
@@ -166,7 +166,7 @@ static void init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+       address_space_init_once(&ii->i_btnode_cache);
        ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
        inode_init_once(&ii->vfs_inode);
 }
index 899a4d6985d49a183f9a1e7eaf0613f5adef29aa..d6b2ab57e54bd4b9deca6630c5386691de4ad689 100644 (file)
@@ -637,6 +637,7 @@ struct address_space {
        spinlock_t              private_lock;   /* for use by the address_space */
        struct list_head        private_list;   /* ditto */
        struct address_space    *assoc_mapping; /* ditto */
+       struct mutex            unmap_mutex;    /* to protect unmapping */
 } __attribute__((aligned(sizeof(long))));
        /*
         * On most architectures that alignment is already the case; but
@@ -2148,6 +2149,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
 
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
 extern void inode_add_to_lists(struct super_block *, struct inode *);
 extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
index e298c6f499bc2d0b1bb05d34b8ec2c0373931825..5df9a03087f529a1012610e2e44df4f2bef153d9 100644 (file)
@@ -2459,6 +2459,7 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
        details.i_mmap_lock = &mapping->i_mmap_lock;
 
+       mutex_lock(&mapping->unmap_mutex);
        spin_lock(&mapping->i_mmap_lock);
 
        /* Protect against endless unmapping loops */
@@ -2475,6 +2476,7 @@ void unmap_mapping_range(struct address_space *mapping,
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
        spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->unmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);