From: Tejun Heo Date: Sat, 11 Feb 2012 11:37:25 +0000 (+0100) Subject: block: fix lockdep warning on io_context release put_io_context() X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=d8c66c5d59247e25a69428aced0b79d33b9c66d6;p=linux-beck.git block: fix lockdep warning on io_context release put_io_context() 11a3122f6c "block: strip out locking optimization in put_io_context()" removed ioc_lock depth lockdep annoation along with locking optimization; however, while recursing from put_io_context() is no longer possible, ioc_release_fn() may still end up putting the last reference of another ioc through elevator, which wlil grab ioc->lock triggering spurious (as the ioc is always different one) A-A deadlock warning. As this can only happen one time from ioc_release_fn(), using non-zero subclass from ioc_release_fn() is enough. Use subclass 1. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 9884fd7427fe..8b782a63c297 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work) struct io_context *ioc = container_of(work, struct io_context, release_work); struct request_queue *last_q = NULL; + unsigned long flags; - spin_lock_irq(&ioc->lock); + /* + * Exiting icq may call into put_io_context() through elevator + * which will trigger lockdep warning. The ioc's are guaranteed to + * be different, use a different locking subclass here. Use + * irqsave variant as there's no spin_lock_irq_nested(). + */ + spin_lock_irqsave_nested(&ioc->lock, flags, 1); while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, @@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work) */ if (last_q) { spin_unlock(last_q->queue_lock); - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); blk_put_queue(last_q); } else { - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); } last_q = this_q; - spin_lock_irq(this_q->queue_lock); - spin_lock(&ioc->lock); + spin_lock_irqsave(this_q->queue_lock, flags); + spin_lock_nested(&ioc->lock, 1); continue; } ioc_exit_icq(icq); @@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work) if (last_q) { spin_unlock(last_q->queue_lock); - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); blk_put_queue(last_q); } else { - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); } kmem_cache_free(iocontext_cachep, ioc);