]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/locking/rtmutex.c
rtmutex: Simplify and document try_to_take_rtmutex()
[karo-tx-linux.git] / kernel / locking / rtmutex.c
index a620d4d08ca6c17580f0bbb76d7c3ac9129330b9..39c9f8075e14d124eec475f1190dfdc64b06017e 100644 (file)
@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
                owner = *p;
        } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
 }
+
+/*
+ * Safe fastpath aware unlock:
+ * 1) Clear the waiters bit
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       struct task_struct *owner = rt_mutex_owner(lock);
+
+       clear_rt_mutex_waiters(lock);
+       raw_spin_unlock(&lock->wait_lock);
+       /*
+        * If a new waiter comes in between the unlock and the cmpxchg
+        * we have two situations:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        * cmpxchg(p, owner, 0) == owner
+        *                                      mark_rt_mutex_waiters(lock);
+        *                                      acquire(lock);
+        * or:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      mark_rt_mutex_waiters(lock);
+        *
+        * cmpxchg(p, owner, 0) != owner
+        *                                      enqueue_waiter();
+        *                                      unlock(wait_lock);
+        * lock(wait_lock);
+        * wake waiter();
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      acquire(lock);
+        */
+       return rt_mutex_cmpxchg(lock, owner, NULL);
+}
+
 #else
 # define rt_mutex_cmpxchg(l,c,n)       (0)
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
 }
+
+/*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       lock->owner = NULL;
+       raw_spin_unlock(&lock->wait_lock);
+       return true;
+}
 #endif
 
 static inline int
@@ -260,27 +312,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
  */
 int max_lock_depth = 1024;
 
+static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+{
+       return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+}
+
 /*
  * Adjust the priority chain. Also used for deadlock detection.
  * Decreases task's usage by one - may thus free the task.
  *
- * @task: the task owning the mutex (owner) for which a chain walk is probably
- *       needed
+ * @task:      the task owning the mutex (owner) for which a chain walk is
+ *             probably needed
  * @deadlock_detect: do we have to carry out deadlock detection?
- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
- *            things for a task that has just got its priority adjusted, and
- *            is waiting on a mutex)
+ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+ *             things for a task that has just got its priority adjusted, and
+ *             is waiting on a mutex)
+ * @next_lock: the mutex on which the owner of @orig_lock was blocked before
+ *             we dropped its pi_lock. Is never dereferenced, only used for
+ *             comparison to detect lock chain changes.
  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
- *              its priority to the mutex owner (can be NULL in the case
- *              depicted above or if the top waiter is gone away and we are
- *              actually deboosting the owner)
- * @top_task: the current top waiter
+ *             its priority to the mutex owner (can be NULL in the case
+ *             depicted above or if the top waiter is gone away and we are
+ *             actually deboosting the owner)
+ * @top_task:  the current top waiter
  *
  * Returns 0 or -EDEADLK.
  */
 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                                      int deadlock_detect,
                                      struct rt_mutex *orig_lock,
+                                     struct rt_mutex *next_lock,
                                      struct rt_mutex_waiter *orig_waiter,
                                      struct task_struct *top_task)
 {
@@ -314,7 +375,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                }
                put_task_struct(task);
 
-               return deadlock_detect ? -EDEADLK : 0;
+               return -EDEADLK;
        }
  retry:
        /*
@@ -338,6 +399,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (orig_waiter && !rt_mutex_owner(orig_lock))
                goto out_unlock_pi;
 
+       /*
+        * We dropped all locks after taking a refcount on @task, so
+        * the task might have moved on in the lock chain or even left
+        * the chain completely and blocks now on an unrelated lock or
+        * on @orig_lock.
+        *
+        * We stored the lock on which @task was blocked in @next_lock,
+        * so we can detect the chain change.
+        */
+       if (next_lock != waiter->lock)
+               goto out_unlock_pi;
+
        /*
         * Drop out, when the task has no waiters. Note,
         * top_waiter can be NULL, when we are in the deboosting
@@ -377,7 +450,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
                debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
                raw_spin_unlock(&lock->wait_lock);
-               ret = deadlock_detect ? -EDEADLK : 0;
+               ret = -EDEADLK;
                goto out_unlock_pi;
        }
 
@@ -422,11 +495,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                __rt_mutex_adjust_prio(task);
        }
 
+       /*
+        * Check whether the task which owns the current lock is pi
+        * blocked itself. If yes we store a pointer to the lock for
+        * the lock chain change detection above. After we dropped
+        * task->pi_lock next_lock cannot be dereferenced anymore.
+        */
+       next_lock = task_blocked_on_lock(task);
+
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
        top_waiter = rt_mutex_top_waiter(lock);
        raw_spin_unlock(&lock->wait_lock);
 
+       /*
+        * We reached the end of the lock chain. Stop right here. No
+        * point to go back just to figure that out.
+        */
+       if (!next_lock)
+               goto out_put_task;
+
        if (!detect_deadlock && waiter != top_waiter)
                goto out_put_task;
 
@@ -445,76 +533,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
  *
  * Must be called with lock->wait_lock held.
  *
- * @lock:   the lock to be acquired.
- * @task:   the task which wants to acquire the lock
- * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+ * @lock:   The lock to be acquired.
+ * @task:   The task which wants to acquire the lock
+ * @waiter: The waiter that is queued to the lock's wait list if the
+ *         callsite called task_blocked_on_lock(), otherwise NULL
  */
 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-               struct rt_mutex_waiter *waiter)
+                               struct rt_mutex_waiter *waiter)
 {
+       unsigned long flags;
+
        /*
-        * We have to be careful here if the atomic speedups are
-        * enabled, such that, when
-        *  - no other waiter is on the lock
-        *  - the lock has been released since we did the cmpxchg
-        * the lock can be released or taken while we are doing the
-        * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
+        * Before testing whether we can acquire @lock, we set the
+        * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
+        * other tasks which try to modify @lock into the slow path
+        * and they serialize on @lock->wait_lock.
+        *
+        * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
+        * as explained at the top of this file if and only if:
         *
-        * The atomic acquire/release aware variant of
-        * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
-        * the WAITERS bit, the atomic release / acquire can not
-        * happen anymore and lock->wait_lock protects us from the
-        * non-atomic case.
+        * - There is a lock owner. The caller must fixup the
+        *   transient state if it does a trylock or leaves the lock
+        *   function due to a signal or timeout.
         *
-        * Note, that this might set lock->owner =
-        * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
-        * any more. This is fixed up when we take the ownership.
-        * This is the transitional state explained at the top of this file.
+        * - @task acquires the lock and there are no other
+        *   waiters. This is undone in rt_mutex_set_owner(@task) at
+        *   the end of this function.
         */
        mark_rt_mutex_waiters(lock);
 
+       /*
+        * If @lock has an owner, give up.
+        */
        if (rt_mutex_owner(lock))
                return 0;
 
        /*
-        * It will get the lock because of one of these conditions:
-        * 1) there is no waiter
-        * 2) higher priority than waiters
-        * 3) it is top waiter
+        * If @waiter != NULL, @task has already enqueued the waiter
+        * into @lock waiter list. If @waiter == NULL then this is a
+        * trylock attempt.
         */
-       if (rt_mutex_has_waiters(lock)) {
-               if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
-                       if (!waiter || waiter != rt_mutex_top_waiter(lock))
-                               return 0;
-               }
-       }
-
-       if (waiter || rt_mutex_has_waiters(lock)) {
-               unsigned long flags;
-               struct rt_mutex_waiter *top;
-
-               raw_spin_lock_irqsave(&task->pi_lock, flags);
+       if (waiter) {
+               /*
+                * If waiter is not the highest priority waiter of
+                * @lock, give up.
+                */
+               if (waiter != rt_mutex_top_waiter(lock))
+                       return 0;
 
-               /* remove the queued waiter. */
-               if (waiter) {
-                       rt_mutex_dequeue(lock, waiter);
-                       task->pi_blocked_on = NULL;
-               }
+               /*
+                * We can acquire the lock. Remove the waiter from the
+                * lock waiters list.
+                */
+               rt_mutex_dequeue(lock, waiter);
 
+       } else {
                /*
-                * We have to enqueue the top waiter(if it exists) into
-                * task->pi_waiters list.
+                * If the lock has waiters already we check whether @task is
+                * eligible to take over the lock.
+                *
+                * If there are no other waiters, @task can acquire
+                * the lock.  @task->pi_blocked_on is NULL, so it does
+                * not need to be dequeued.
                 */
                if (rt_mutex_has_waiters(lock)) {
-                       top = rt_mutex_top_waiter(lock);
-                       rt_mutex_enqueue_pi(task, top);
+                       /*
+                        * If @task->prio is greater than or equal to
+                        * the top waiter priority (kernel view),
+                        * @task lost.
+                        */
+                       if (task->prio >= rt_mutex_top_waiter(lock)->prio)
+                               return 0;
+
+                       /*
+                        * The current top waiter stays enqueued. We
+                        * don't have to change anything in the lock
+                        * waiters order.
+                        */
+               } else {
+                       /*
+                        * No waiters. Take the lock without the
+                        * pi_lock dance.@task->pi_blocked_on is NULL
+                        * and we have no waiters to enqueue in @task
+                        * pi waiters list.
+                        */
+                       goto takeit;
                }
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
        }
 
+       /*
+        * Clear @task->pi_blocked_on. Requires protection by
+        * @task->pi_lock. Redundant operation for the @waiter == NULL
+        * case, but conditionals are more expensive than a redundant
+        * store.
+        */
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       task->pi_blocked_on = NULL;
+       /*
+        * Finish the lock acquisition. @task is the new owner. If
+        * other waiters exist we have to insert the highest priority
+        * waiter into @task->pi_waiters list.
+        */
+       if (rt_mutex_has_waiters(lock))
+               rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+takeit:
        /* We got the lock. */
        debug_rt_mutex_lock(lock);
 
+       /*
+        * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
+        * are still waiters or clears it.
+        */
        rt_mutex_set_owner(lock, task);
 
        rt_mutex_deadlock_account_lock(lock, task);
@@ -536,8 +667,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 {
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex_waiter *top_waiter = waiter;
-       unsigned long flags;
+       struct rt_mutex *next_lock;
        int chain_walk = 0, res;
+       unsigned long flags;
 
        /*
         * Early deadlock detection. We really don't want the task to
@@ -548,7 +680,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
         * which is wrong, as the other waiter is not in a deadlock
         * situation.
         */
-       if (detect_deadlock && owner == task)
+       if (owner == task)
                return -EDEADLK;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -569,20 +701,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        if (!owner)
                return 0;
 
+       raw_spin_lock_irqsave(&owner->pi_lock, flags);
        if (waiter == rt_mutex_top_waiter(lock)) {
-               raw_spin_lock_irqsave(&owner->pi_lock, flags);
                rt_mutex_dequeue_pi(owner, top_waiter);
                rt_mutex_enqueue_pi(owner, waiter);
 
                __rt_mutex_adjust_prio(owner);
                if (owner->pi_blocked_on)
                        chain_walk = 1;
-               raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-       }
-       else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+       } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
                chain_walk = 1;
+       }
+
+       /* Store the lock on which owner is blocked or NULL */
+       next_lock = task_blocked_on_lock(owner);
 
-       if (!chain_walk)
+       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       /*
+        * Even if full deadlock detection is on, if the owner is not
+        * blocked itself, we can avoid finding this out in the chain
+        * walk.
+        */
+       if (!chain_walk || !next_lock)
                return 0;
 
        /*
@@ -594,8 +734,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
-                                        task);
+       res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
+                                        next_lock, waiter, task);
 
        raw_spin_lock(&lock->wait_lock);
 
@@ -605,7 +745,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 /*
  * Wake up the next waiter on the lock.
  *
- * Remove the top waiter from the current tasks waiter list and wake it up.
+ * Remove the top waiter from the current tasks pi waiter list and
+ * wake it up.
  *
  * Called with lock->wait_lock held.
  */
@@ -626,10 +767,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
         */
        rt_mutex_dequeue_pi(current, waiter);
 
-       rt_mutex_set_owner(lock, NULL);
+       /*
+        * As we are waking up the top waiter, and the waiter stays
+        * queued on the lock until it gets the lock, this lock
+        * obviously has waiters. Just set the bit here and this has
+        * the added benefit of forcing all new tasks into the
+        * slow path making sure no task of lower priority than
+        * the top waiter can steal this lock.
+        */
+       lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
 
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
+       /*
+        * It's safe to dereference waiter as it cannot go away as
+        * long as we hold lock->wait_lock. The waiter task needs to
+        * acquire it in order to dequeue the waiter.
+        */
        wake_up_process(waiter->task);
 }
 
@@ -644,8 +798,8 @@ static void remove_waiter(struct rt_mutex *lock,
 {
        int first = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
+       struct rt_mutex *next_lock = NULL;
        unsigned long flags;
-       int chain_walk = 0;
 
        raw_spin_lock_irqsave(&current->pi_lock, flags);
        rt_mutex_dequeue(lock, waiter);
@@ -669,13 +823,13 @@ static void remove_waiter(struct rt_mutex *lock,
                }
                __rt_mutex_adjust_prio(owner);
 
-               if (owner->pi_blocked_on)
-                       chain_walk = 1;
+               /* Store the lock on which owner is blocked or NULL */
+               next_lock = task_blocked_on_lock(owner);
 
                raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
        }
 
-       if (!chain_walk)
+       if (!next_lock)
                return;
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -683,7 +837,7 @@ static void remove_waiter(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+       rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
 
        raw_spin_lock(&lock->wait_lock);
 }
@@ -696,6 +850,7 @@ static void remove_waiter(struct rt_mutex *lock,
 void rt_mutex_adjust_pi(struct task_struct *task)
 {
        struct rt_mutex_waiter *waiter;
+       struct rt_mutex *next_lock;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -706,12 +861,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                return;
        }
-
+       next_lock = waiter->lock;
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
        get_task_struct(task);
-       rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+
+       rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
 }
 
 /**
@@ -763,6 +919,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
        return ret;
 }
 
+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+                                    struct rt_mutex_waiter *w)
+{
+       /*
+        * If the result is not -EDEADLOCK or the caller requested
+        * deadlock detection, nothing to do here.
+        */
+       if (res != -EDEADLOCK || detect_deadlock)
+               return;
+
+       /*
+        * Yell lowdly and stop the task right here.
+        */
+       rt_mutex_print_deadlock(w);
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
+}
+
 /*
  * Slow path lock function:
  */
@@ -802,8 +978,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 
        set_current_state(TASK_RUNNING);
 
-       if (unlikely(ret))
+       if (unlikely(ret)) {
                remove_waiter(lock, &waiter);
+               rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+       }
 
        /*
         * try_to_take_rt_mutex() sets the waiter bit
@@ -825,22 +1003,31 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 /*
  * Slow path try-lock function:
  */
-static inline int
-rt_mutex_slowtrylock(struct rt_mutex *lock)
+static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
-       int ret = 0;
+       int ret;
+
+       /*
+        * If the lock already has an owner we fail to get the lock.
+        * This can be done without taking the @lock->wait_lock as
+        * it is only being read, and this is a trylock anyway.
+        */
+       if (rt_mutex_owner(lock))
+               return 0;
 
+       /*
+        * The mutex has currently no owner. Lock the wait lock and
+        * try to acquire the lock.
+        */
        raw_spin_lock(&lock->wait_lock);
 
-       if (likely(rt_mutex_owner(lock) != current)) {
+       ret = try_to_take_rt_mutex(lock, current, NULL);
 
-               ret = try_to_take_rt_mutex(lock, current, NULL);
-               /*
-                * try_to_take_rt_mutex() sets the lock waiters
-                * bit unconditionally. Clean this up.
-                */
-               fixup_rt_mutex_waiters(lock);
-       }
+       /*
+        * try_to_take_rt_mutex() sets the lock waiters bit
+        * unconditionally. Clean this up.
+        */
+       fixup_rt_mutex_waiters(lock);
 
        raw_spin_unlock(&lock->wait_lock);
 
@@ -859,12 +1046,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
 
        rt_mutex_deadlock_account_unlock(current);
 
-       if (!rt_mutex_has_waiters(lock)) {
-               lock->owner = NULL;
-               raw_spin_unlock(&lock->wait_lock);
-               return;
+       /*
+        * We must be careful here if the fast path is enabled. If we
+        * have no waiters queued we cannot set owner to NULL here
+        * because of:
+        *
+        * foo->lock->owner = NULL;
+        *                      rtmutex_lock(foo->lock);   <- fast path
+        *                      free = atomic_dec_and_test(foo->refcnt);
+        *                      rtmutex_unlock(foo->lock); <- fast path
+        *                      if (free)
+        *                              kfree(foo);
+        * raw_spin_unlock(foo->lock->wait_lock);
+        *
+        * So for the fastpath enabled kernel:
+        *
+        * Nothing can set the waiters bit as long as we hold
+        * lock->wait_lock. So we do the following sequence:
+        *
+        *      owner = rt_mutex_owner(lock);
+        *      clear_rt_mutex_waiters(lock);
+        *      raw_spin_unlock(&lock->wait_lock);
+        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
+        *              return;
+        *      goto retry;
+        *
+        * The fastpath disabled variant is simple as all access to
+        * lock->owner is serialized by lock->wait_lock:
+        *
+        *      lock->owner = NULL;
+        *      raw_spin_unlock(&lock->wait_lock);
+        */
+       while (!rt_mutex_has_waiters(lock)) {
+               /* Drops lock->wait_lock ! */
+               if (unlock_rt_mutex_safe(lock) == true)
+                       return;
+               /* Relock the rtmutex and try again */
+               raw_spin_lock(&lock->wait_lock);
        }
 
+       /*
+        * The wakeup next waiter path does not suffer from the above
+        * race. See the comments there.
+        */
        wakeup_next_waiter(lock);
 
        raw_spin_unlock(&lock->wait_lock);
@@ -1112,7 +1336,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                return 1;
        }
 
-       ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+       /* We enforce deadlock detection for futexes */
+       ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
 
        if (ret && !rt_mutex_owner(lock)) {
                /*