X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=lib%2Fkernel_lock.c;h=e0fdfddb406ec3975673f150f46bd3302bc08599;hb=139eecf94cf5ab1f9749874cd362db5bff7dc09c;hp=bd2bc5d887b815e261ff82c4167e410966b659d3;hpb=54522b6613a03807f057fd567794a31267ef85cb;p=mv-sheeva.git diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index bd2bc5d887b..e0fdfddb406 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -14,7 +14,7 @@ * The 'big kernel semaphore' * * This mutex is taken and released recursively by lock_kernel() - * and unlock_kernel(). It is transparently dropped and reaquired + * and unlock_kernel(). It is transparently dropped and reacquired * over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * @@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void) * The 'big kernel lock' * * This spinlock is taken and released recursively by lock_kernel() - * and unlock_kernel(). It is transparently dropped and reaquired + * and unlock_kernel(). It is transparently dropped and reacquired * over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * @@ -177,6 +177,10 @@ static inline void __lock_kernel(void) static inline void __unlock_kernel(void) { + /* + * the BKL is not covered by lockdep, so we open-code the + * unlocking sequence (and thus avoid the dep-chain ops): + */ _raw_spin_unlock(&kernel_flag); preempt_enable(); }