From: Thomas Graf Date: Mon, 12 Jan 2015 23:58:21 +0000 (+0000) Subject: rhashtable: Lower/upper bucket may map to same lock while shrinking X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=80ca8c3a84c74a87977558861bb8eef650732912;p=linux-beck.git rhashtable: Lower/upper bucket may map to same lock while shrinking Each per bucket lock covers a configurable number of buckets. While shrinking, two buckets in the old table contain entries for a single bucket in the new table. We need to lock down both while linking. Check if they are protected by different locks to avoid a recursive lock. Fixes: 97defe1e ("rhashtable: Per bucket locks & deferred expansion/shrinking") Reported-by: Fengguang Wu Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ed6ae1ad304c..aca699813ba9 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -443,8 +443,16 @@ int rhashtable_shrink(struct rhashtable *ht) new_bucket_lock = bucket_lock(new_tbl, new_hash); spin_lock_bh(old_bucket_lock1); - spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); + + /* Depending on the lock per buckets mapping, the bucket in + * the lower and upper region may map to the same lock. + */ + if (old_bucket_lock1 != old_bucket_lock2) { + spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); + } else { + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + } rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash]); @@ -452,7 +460,8 @@ int rhashtable_shrink(struct rhashtable *ht) tbl->buckets[new_hash + new_tbl->size]); spin_unlock_bh(new_bucket_lock); - spin_unlock_bh(old_bucket_lock2); + if (old_bucket_lock1 != old_bucket_lock2) + spin_unlock_bh(old_bucket_lock2); spin_unlock_bh(old_bucket_lock1); }