]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
include/linux/fs.h: disable preempt when acquire i_size_seqcount write lock
authorFan Du <fan.du@windriver.com>
Wed, 20 Mar 2013 04:07:32 +0000 (15:07 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 20 Mar 2013 04:22:54 +0000 (15:22 +1100)
Two rt tasks bind to one CPU core.

The higher priority rt task A preempts a lower priority rt task B which
has already taken the write seq lock, and then the higher priority rt task
A try to acquire read seq lock, it's doomed to lockup.

rt task A with lower priority: call write
i_size_write                                        rt task B with higher priority: call sync, and preempt task A
  write_seqcount_begin(&inode->i_size_seqcount);    i_size_read
  inode->i_size = i_size;                             read_seqcount_begin <-- lockup here...

So disable preempt when acquiring every i_size_seqcount *write* lock will
cure the problem.

Signed-off-by: Fan Du <fan.du@windriver.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/fs.h

index 0dc32bbb8b6ca0da390e02ba9cbf471c8cc13210..4c4f0e48831394a05eab2824cf76803b71975098 100644 (file)
@@ -675,9 +675,11 @@ static inline loff_t i_size_read(const struct inode *inode)
 static inline void i_size_write(struct inode *inode, loff_t i_size)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+       preempt_disable();
        write_seqcount_begin(&inode->i_size_seqcount);
        inode->i_size = i_size;
        write_seqcount_end(&inode->i_size_seqcount);
+       preempt_enable();
 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
        preempt_disable();
        inode->i_size = i_size;