]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/rtmutex.c
f9d8482dd4872f631b8bfb3524f5797b2eb711f4
[karo-tx-linux.git] / kernel / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17
18 #include "rtmutex_common.h"
19
20 /*
21  * lock->owner state tracking:
22  *
23  * lock->owner holds the task_struct pointer of the owner. Bit 0
24  * is used to keep track of the "lock has waiters" state.
25  *
26  * owner        bit0
27  * NULL         0       lock is free (fast acquire possible)
28  * NULL         1       lock is free and has waiters and the top waiter
29  *                              is going to take the lock*
30  * taskpointer  0       lock is held (fast release possible)
31  * taskpointer  1       lock is held and has waiters**
32  *
33  * The fast atomic compare exchange based acquire and release is only
34  * possible when bit 0 of lock->owner is 0.
35  *
36  * (*) It also can be a transitional state when grabbing the lock
37  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
38  * we need to set the bit0 before looking at the lock, and the owner may be
39  * NULL in this small time, hence this can be a transitional state.
40  *
41  * (**) There is a small time when bit 0 is set but there are no
42  * waiters. This can happen when grabbing the lock in the slow path.
43  * To prevent a cmpxchg of the owner releasing the lock, we need to
44  * set this bit before looking at the lock.
45  */
46
47 static void
48 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
49 {
50         unsigned long val = (unsigned long)owner;
51
52         if (rt_mutex_has_waiters(lock))
53                 val |= RT_MUTEX_HAS_WAITERS;
54
55         lock->owner = (struct task_struct *)val;
56 }
57
58 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
59 {
60         lock->owner = (struct task_struct *)
61                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
62 }
63
64 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
65 {
66         if (!rt_mutex_has_waiters(lock))
67                 clear_rt_mutex_waiters(lock);
68 }
69
70 /*
71  * We can speed up the acquire/release, if the architecture
72  * supports cmpxchg and if there's no debugging state to be set up
73  */
74 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
75 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
76 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
77 {
78         unsigned long owner, *p = (unsigned long *) &lock->owner;
79
80         do {
81                 owner = *p;
82         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
83 }
84 #else
85 # define rt_mutex_cmpxchg(l,c,n)        (0)
86 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
87 {
88         lock->owner = (struct task_struct *)
89                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
90 }
91 #endif
92
93 /*
94  * Calculate task priority from the waiter list priority
95  *
96  * Return task->normal_prio when the waiter list is empty or when
97  * the waiter is not allowed to do priority boosting
98  */
99 int rt_mutex_getprio(struct task_struct *task)
100 {
101         if (likely(!task_has_pi_waiters(task)))
102                 return task->normal_prio;
103
104         return min(task_top_pi_waiter(task)->pi_list_entry.prio,
105                    task->normal_prio);
106 }
107
108 /*
109  * Adjust the priority of a task, after its pi_waiters got modified.
110  *
111  * This can be both boosting and unboosting. task->pi_lock must be held.
112  */
113 static void __rt_mutex_adjust_prio(struct task_struct *task)
114 {
115         int prio = rt_mutex_getprio(task);
116
117         if (task->prio != prio)
118                 rt_mutex_setprio(task, prio);
119 }
120
121 /*
122  * Adjust task priority (undo boosting). Called from the exit path of
123  * rt_mutex_slowunlock() and rt_mutex_slowlock().
124  *
125  * (Note: We do this outside of the protection of lock->wait_lock to
126  * allow the lock to be taken while or before we readjust the priority
127  * of task. We do not use the spin_xx_mutex() variants here as we are
128  * outside of the debug path.)
129  */
130 static void rt_mutex_adjust_prio(struct task_struct *task)
131 {
132         unsigned long flags;
133
134         raw_spin_lock_irqsave(&task->pi_lock, flags);
135         __rt_mutex_adjust_prio(task);
136         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
137 }
138
139 /*
140  * Max number of times we'll walk the boosting chain:
141  */
142 int max_lock_depth = 1024;
143
144 /*
145  * Adjust the priority chain. Also used for deadlock detection.
146  * Decreases task's usage by one - may thus free the task.
147  * Returns 0 or -EDEADLK.
148  */
149 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
150                                       int deadlock_detect,
151                                       struct rt_mutex *orig_lock,
152                                       struct rt_mutex_waiter *orig_waiter,
153                                       struct task_struct *top_task)
154 {
155         struct rt_mutex *lock;
156         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
157         int detect_deadlock, ret = 0, depth = 0;
158         unsigned long flags;
159
160         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
161                                                          deadlock_detect);
162
163         /*
164          * The (de)boosting is a step by step approach with a lot of
165          * pitfalls. We want this to be preemptible and we want hold a
166          * maximum of two locks per step. So we have to check
167          * carefully whether things change under us.
168          */
169  again:
170         if (++depth > max_lock_depth) {
171                 static int prev_max;
172
173                 /*
174                  * Print this only once. If the admin changes the limit,
175                  * print a new message when reaching the limit again.
176                  */
177                 if (prev_max != max_lock_depth) {
178                         prev_max = max_lock_depth;
179                         printk(KERN_WARNING "Maximum lock depth %d reached "
180                                "task: %s (%d)\n", max_lock_depth,
181                                top_task->comm, task_pid_nr(top_task));
182                 }
183                 put_task_struct(task);
184
185                 return deadlock_detect ? -EDEADLK : 0;
186         }
187  retry:
188         /*
189          * Task can not go away as we did a get_task() before !
190          */
191         raw_spin_lock_irqsave(&task->pi_lock, flags);
192
193         waiter = task->pi_blocked_on;
194         /*
195          * Check whether the end of the boosting chain has been
196          * reached or the state of the chain has changed while we
197          * dropped the locks.
198          */
199         if (!waiter)
200                 goto out_unlock_pi;
201
202         /*
203          * Check the orig_waiter state. After we dropped the locks,
204          * the previous owner of the lock might have released the lock.
205          */
206         if (orig_waiter && !rt_mutex_owner(orig_lock))
207                 goto out_unlock_pi;
208
209         /*
210          * Drop out, when the task has no waiters. Note,
211          * top_waiter can be NULL, when we are in the deboosting
212          * mode!
213          */
214         if (top_waiter && (!task_has_pi_waiters(task) ||
215                            top_waiter != task_top_pi_waiter(task)))
216                 goto out_unlock_pi;
217
218         /*
219          * When deadlock detection is off then we check, if further
220          * priority adjustment is necessary.
221          */
222         if (!detect_deadlock && waiter->list_entry.prio == task->prio)
223                 goto out_unlock_pi;
224
225         lock = waiter->lock;
226         if (!raw_spin_trylock(&lock->wait_lock)) {
227                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
228                 cpu_relax();
229                 goto retry;
230         }
231
232         /* Deadlock detection */
233         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
234                 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
235                 raw_spin_unlock(&lock->wait_lock);
236                 ret = deadlock_detect ? -EDEADLK : 0;
237                 goto out_unlock_pi;
238         }
239
240         top_waiter = rt_mutex_top_waiter(lock);
241
242         /* Requeue the waiter */
243         plist_del(&waiter->list_entry, &lock->wait_list);
244         waiter->list_entry.prio = task->prio;
245         plist_add(&waiter->list_entry, &lock->wait_list);
246
247         /* Release the task */
248         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
249         if (!rt_mutex_owner(lock)) {
250                 /*
251                  * If the requeue above changed the top waiter, then we need
252                  * to wake the new top waiter up to try to get the lock.
253                  */
254
255                 if (top_waiter != rt_mutex_top_waiter(lock))
256                         wake_up_process(rt_mutex_top_waiter(lock)->task);
257                 raw_spin_unlock(&lock->wait_lock);
258                 goto out_put_task;
259         }
260         put_task_struct(task);
261
262         /* Grab the next task */
263         task = rt_mutex_owner(lock);
264         get_task_struct(task);
265         raw_spin_lock_irqsave(&task->pi_lock, flags);
266
267         if (waiter == rt_mutex_top_waiter(lock)) {
268                 /* Boost the owner */
269                 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
270                 waiter->pi_list_entry.prio = waiter->list_entry.prio;
271                 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
272                 __rt_mutex_adjust_prio(task);
273
274         } else if (top_waiter == waiter) {
275                 /* Deboost the owner */
276                 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
277                 waiter = rt_mutex_top_waiter(lock);
278                 waiter->pi_list_entry.prio = waiter->list_entry.prio;
279                 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
280                 __rt_mutex_adjust_prio(task);
281         }
282
283         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
284
285         top_waiter = rt_mutex_top_waiter(lock);
286         raw_spin_unlock(&lock->wait_lock);
287
288         if (!detect_deadlock && waiter != top_waiter)
289                 goto out_put_task;
290
291         goto again;
292
293  out_unlock_pi:
294         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
295  out_put_task:
296         put_task_struct(task);
297
298         return ret;
299 }
300
301 /*
302  * Try to take an rt-mutex
303  *
304  * Must be called with lock->wait_lock held.
305  *
306  * @lock:   the lock to be acquired.
307  * @task:   the task which wants to acquire the lock
308  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
309  */
310 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
311                 struct rt_mutex_waiter *waiter)
312 {
313         /*
314          * We have to be careful here if the atomic speedups are
315          * enabled, such that, when
316          *  - no other waiter is on the lock
317          *  - the lock has been released since we did the cmpxchg
318          * the lock can be released or taken while we are doing the
319          * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
320          *
321          * The atomic acquire/release aware variant of
322          * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
323          * the WAITERS bit, the atomic release / acquire can not
324          * happen anymore and lock->wait_lock protects us from the
325          * non-atomic case.
326          *
327          * Note, that this might set lock->owner =
328          * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
329          * any more. This is fixed up when we take the ownership.
330          * This is the transitional state explained at the top of this file.
331          */
332         mark_rt_mutex_waiters(lock);
333
334         if (rt_mutex_owner(lock))
335                 return 0;
336
337         /*
338          * It will get the lock because of one of these conditions:
339          * 1) there is no waiter
340          * 2) higher priority than waiters
341          * 3) it is top waiter
342          */
343         if (rt_mutex_has_waiters(lock)) {
344                 if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
345                         if (!waiter || waiter != rt_mutex_top_waiter(lock))
346                                 return 0;
347                 }
348         }
349
350         if (waiter || rt_mutex_has_waiters(lock)) {
351                 unsigned long flags;
352                 struct rt_mutex_waiter *top;
353
354                 raw_spin_lock_irqsave(&task->pi_lock, flags);
355
356                 /* remove the queued waiter. */
357                 if (waiter) {
358                         plist_del(&waiter->list_entry, &lock->wait_list);
359                         task->pi_blocked_on = NULL;
360                 }
361
362                 /*
363                  * We have to enqueue the top waiter(if it exists) into
364                  * task->pi_waiters list.
365                  */
366                 if (rt_mutex_has_waiters(lock)) {
367                         top = rt_mutex_top_waiter(lock);
368                         top->pi_list_entry.prio = top->list_entry.prio;
369                         plist_add(&top->pi_list_entry, &task->pi_waiters);
370                 }
371                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
372         }
373
374         /* We got the lock. */
375         debug_rt_mutex_lock(lock);
376
377         rt_mutex_set_owner(lock, task);
378
379         rt_mutex_deadlock_account_lock(lock, task);
380
381         return 1;
382 }
383
384 /*
385  * Task blocks on lock.
386  *
387  * Prepare waiter and propagate pi chain
388  *
389  * This must be called with lock->wait_lock held.
390  */
391 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
392                                    struct rt_mutex_waiter *waiter,
393                                    struct task_struct *task,
394                                    int detect_deadlock)
395 {
396         struct task_struct *owner = rt_mutex_owner(lock);
397         struct rt_mutex_waiter *top_waiter = waiter;
398         unsigned long flags;
399         int chain_walk = 0, res;
400
401         raw_spin_lock_irqsave(&task->pi_lock, flags);
402         __rt_mutex_adjust_prio(task);
403         waiter->task = task;
404         waiter->lock = lock;
405         plist_node_init(&waiter->list_entry, task->prio);
406         plist_node_init(&waiter->pi_list_entry, task->prio);
407
408         /* Get the top priority waiter on the lock */
409         if (rt_mutex_has_waiters(lock))
410                 top_waiter = rt_mutex_top_waiter(lock);
411         plist_add(&waiter->list_entry, &lock->wait_list);
412
413         task->pi_blocked_on = waiter;
414
415         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
416
417         if (!owner)
418                 return 0;
419
420         if (waiter == rt_mutex_top_waiter(lock)) {
421                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
422                 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
423                 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
424
425                 __rt_mutex_adjust_prio(owner);
426                 if (owner->pi_blocked_on)
427                         chain_walk = 1;
428                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
429         }
430         else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
431                 chain_walk = 1;
432
433         if (!chain_walk)
434                 return 0;
435
436         /*
437          * The owner can't disappear while holding a lock,
438          * so the owner struct is protected by wait_lock.
439          * Gets dropped in rt_mutex_adjust_prio_chain()!
440          */
441         get_task_struct(owner);
442
443         raw_spin_unlock(&lock->wait_lock);
444
445         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
446                                          task);
447
448         raw_spin_lock(&lock->wait_lock);
449
450         return res;
451 }
452
453 /*
454  * Wake up the next waiter on the lock.
455  *
456  * Remove the top waiter from the current tasks waiter list and wake it up.
457  *
458  * Called with lock->wait_lock held.
459  */
460 static void wakeup_next_waiter(struct rt_mutex *lock)
461 {
462         struct rt_mutex_waiter *waiter;
463         unsigned long flags;
464
465         raw_spin_lock_irqsave(&current->pi_lock, flags);
466
467         waiter = rt_mutex_top_waiter(lock);
468
469         /*
470          * Remove it from current->pi_waiters. We do not adjust a
471          * possible priority boost right now. We execute wakeup in the
472          * boosted mode and go back to normal after releasing
473          * lock->wait_lock.
474          */
475         plist_del(&waiter->pi_list_entry, &current->pi_waiters);
476
477         rt_mutex_set_owner(lock, NULL);
478
479         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
480
481         wake_up_process(waiter->task);
482 }
483
484 /*
485  * Remove a waiter from a lock and give up
486  *
487  * Must be called with lock->wait_lock held and
488  * have just failed to try_to_take_rt_mutex().
489  */
490 static void remove_waiter(struct rt_mutex *lock,
491                           struct rt_mutex_waiter *waiter)
492 {
493         int first = (waiter == rt_mutex_top_waiter(lock));
494         struct task_struct *owner = rt_mutex_owner(lock);
495         unsigned long flags;
496         int chain_walk = 0;
497
498         raw_spin_lock_irqsave(&current->pi_lock, flags);
499         plist_del(&waiter->list_entry, &lock->wait_list);
500         current->pi_blocked_on = NULL;
501         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
502
503         if (!owner)
504                 return;
505
506         if (first) {
507
508                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
509
510                 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
511
512                 if (rt_mutex_has_waiters(lock)) {
513                         struct rt_mutex_waiter *next;
514
515                         next = rt_mutex_top_waiter(lock);
516                         plist_add(&next->pi_list_entry, &owner->pi_waiters);
517                 }
518                 __rt_mutex_adjust_prio(owner);
519
520                 if (owner->pi_blocked_on)
521                         chain_walk = 1;
522
523                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
524         }
525
526         WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
527
528         if (!chain_walk)
529                 return;
530
531         /* gets dropped in rt_mutex_adjust_prio_chain()! */
532         get_task_struct(owner);
533
534         raw_spin_unlock(&lock->wait_lock);
535
536         rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
537
538         raw_spin_lock(&lock->wait_lock);
539 }
540
541 /*
542  * Recheck the pi chain, in case we got a priority setting
543  *
544  * Called from sched_setscheduler
545  */
546 void rt_mutex_adjust_pi(struct task_struct *task)
547 {
548         struct rt_mutex_waiter *waiter;
549         unsigned long flags;
550
551         raw_spin_lock_irqsave(&task->pi_lock, flags);
552
553         waiter = task->pi_blocked_on;
554         if (!waiter || waiter->list_entry.prio == task->prio) {
555                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
556                 return;
557         }
558
559         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
560
561         /* gets dropped in rt_mutex_adjust_prio_chain()! */
562         get_task_struct(task);
563         rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
564 }
565
566 /**
567  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
568  * @lock:                the rt_mutex to take
569  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
570  *                       or TASK_UNINTERRUPTIBLE)
571  * @timeout:             the pre-initialized and started timer, or NULL for none
572  * @waiter:              the pre-initialized rt_mutex_waiter
573  *
574  * lock->wait_lock must be held by the caller.
575  */
576 static int __sched
577 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
578                     struct hrtimer_sleeper *timeout,
579                     struct rt_mutex_waiter *waiter)
580 {
581         int ret = 0;
582         int was_disabled;
583
584         for (;;) {
585                 /* Try to acquire the lock: */
586                 if (try_to_take_rt_mutex(lock, current, waiter))
587                         break;
588
589                 /*
590                  * TASK_INTERRUPTIBLE checks for signals and
591                  * timeout. Ignored otherwise.
592                  */
593                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
594                         /* Signal pending? */
595                         if (signal_pending(current))
596                                 ret = -EINTR;
597                         if (timeout && !timeout->task)
598                                 ret = -ETIMEDOUT;
599                         if (ret)
600                                 break;
601                 }
602
603                 raw_spin_unlock(&lock->wait_lock);
604
605                 was_disabled = irqs_disabled();
606                 if (was_disabled)
607                         local_irq_enable();
608
609                 debug_rt_mutex_print_deadlock(waiter);
610
611                 schedule_rt_mutex(lock);
612
613                 if (was_disabled)
614                         local_irq_disable();
615
616                 raw_spin_lock(&lock->wait_lock);
617                 set_current_state(state);
618         }
619
620         return ret;
621 }
622
623 /*
624  * Slow path lock function:
625  */
626 static int __sched
627 rt_mutex_slowlock(struct rt_mutex *lock, int state,
628                   struct hrtimer_sleeper *timeout,
629                   int detect_deadlock)
630 {
631         struct rt_mutex_waiter waiter;
632         int ret = 0;
633
634         debug_rt_mutex_init_waiter(&waiter);
635
636         raw_spin_lock(&lock->wait_lock);
637
638         /* Try to acquire the lock again: */
639         if (try_to_take_rt_mutex(lock, current, NULL)) {
640                 raw_spin_unlock(&lock->wait_lock);
641                 return 0;
642         }
643
644         set_current_state(state);
645
646         /* Setup the timer, when timeout != NULL */
647         if (unlikely(timeout)) {
648                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
649                 if (!hrtimer_active(&timeout->timer))
650                         timeout->task = NULL;
651         }
652
653         ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
654
655         if (likely(!ret))
656                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
657
658         set_current_state(TASK_RUNNING);
659
660         if (unlikely(ret))
661                 remove_waiter(lock, &waiter);
662
663         /*
664          * try_to_take_rt_mutex() sets the waiter bit
665          * unconditionally. We might have to fix that up.
666          */
667         fixup_rt_mutex_waiters(lock);
668
669         raw_spin_unlock(&lock->wait_lock);
670
671         /* Remove pending timer: */
672         if (unlikely(timeout))
673                 hrtimer_cancel(&timeout->timer);
674
675         debug_rt_mutex_free_waiter(&waiter);
676
677         return ret;
678 }
679
680 /*
681  * Slow path try-lock function:
682  */
683 static inline int
684 rt_mutex_slowtrylock(struct rt_mutex *lock)
685 {
686         int ret = 0;
687
688         raw_spin_lock(&lock->wait_lock);
689
690         if (likely(rt_mutex_owner(lock) != current)) {
691
692                 ret = try_to_take_rt_mutex(lock, current, NULL);
693                 /*
694                  * try_to_take_rt_mutex() sets the lock waiters
695                  * bit unconditionally. Clean this up.
696                  */
697                 fixup_rt_mutex_waiters(lock);
698         }
699
700         raw_spin_unlock(&lock->wait_lock);
701
702         return ret;
703 }
704
705 /*
706  * Slow path to release a rt-mutex:
707  */
708 static void __sched
709 rt_mutex_slowunlock(struct rt_mutex *lock)
710 {
711         raw_spin_lock(&lock->wait_lock);
712
713         debug_rt_mutex_unlock(lock);
714
715         rt_mutex_deadlock_account_unlock(current);
716
717         if (!rt_mutex_has_waiters(lock)) {
718                 lock->owner = NULL;
719                 raw_spin_unlock(&lock->wait_lock);
720                 return;
721         }
722
723         wakeup_next_waiter(lock);
724
725         raw_spin_unlock(&lock->wait_lock);
726
727         /* Undo pi boosting if necessary: */
728         rt_mutex_adjust_prio(current);
729 }
730
731 /*
732  * debug aware fast / slowpath lock,trylock,unlock
733  *
734  * The atomic acquire/release ops are compiled away, when either the
735  * architecture does not support cmpxchg or when debugging is enabled.
736  */
737 static inline int
738 rt_mutex_fastlock(struct rt_mutex *lock, int state,
739                   int detect_deadlock,
740                   int (*slowfn)(struct rt_mutex *lock, int state,
741                                 struct hrtimer_sleeper *timeout,
742                                 int detect_deadlock))
743 {
744         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
745                 rt_mutex_deadlock_account_lock(lock, current);
746                 return 0;
747         } else
748                 return slowfn(lock, state, NULL, detect_deadlock);
749 }
750
751 static inline int
752 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
753                         struct hrtimer_sleeper *timeout, int detect_deadlock,
754                         int (*slowfn)(struct rt_mutex *lock, int state,
755                                       struct hrtimer_sleeper *timeout,
756                                       int detect_deadlock))
757 {
758         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
759                 rt_mutex_deadlock_account_lock(lock, current);
760                 return 0;
761         } else
762                 return slowfn(lock, state, timeout, detect_deadlock);
763 }
764
765 static inline int
766 rt_mutex_fasttrylock(struct rt_mutex *lock,
767                      int (*slowfn)(struct rt_mutex *lock))
768 {
769         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
770                 rt_mutex_deadlock_account_lock(lock, current);
771                 return 1;
772         }
773         return slowfn(lock);
774 }
775
776 static inline void
777 rt_mutex_fastunlock(struct rt_mutex *lock,
778                     void (*slowfn)(struct rt_mutex *lock))
779 {
780         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
781                 rt_mutex_deadlock_account_unlock(current);
782         else
783                 slowfn(lock);
784 }
785
786 /**
787  * rt_mutex_lock - lock a rt_mutex
788  *
789  * @lock: the rt_mutex to be locked
790  */
791 void __sched rt_mutex_lock(struct rt_mutex *lock)
792 {
793         might_sleep();
794
795         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
796 }
797 EXPORT_SYMBOL_GPL(rt_mutex_lock);
798
799 /**
800  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
801  *
802  * @lock:               the rt_mutex to be locked
803  * @detect_deadlock:    deadlock detection on/off
804  *
805  * Returns:
806  *  0           on success
807  * -EINTR       when interrupted by a signal
808  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
809  */
810 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
811                                                  int detect_deadlock)
812 {
813         might_sleep();
814
815         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
816                                  detect_deadlock, rt_mutex_slowlock);
817 }
818 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
819
820 /**
821  * rt_mutex_timed_lock - lock a rt_mutex interruptible
822  *                      the timeout structure is provided
823  *                      by the caller
824  *
825  * @lock:               the rt_mutex to be locked
826  * @timeout:            timeout structure or NULL (no timeout)
827  * @detect_deadlock:    deadlock detection on/off
828  *
829  * Returns:
830  *  0           on success
831  * -EINTR       when interrupted by a signal
832  * -ETIMEDOUT   when the timeout expired
833  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
834  */
835 int
836 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
837                     int detect_deadlock)
838 {
839         might_sleep();
840
841         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
842                                        detect_deadlock, rt_mutex_slowlock);
843 }
844 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
845
846 /**
847  * rt_mutex_trylock - try to lock a rt_mutex
848  *
849  * @lock:       the rt_mutex to be locked
850  *
851  * Returns 1 on success and 0 on contention
852  */
853 int __sched rt_mutex_trylock(struct rt_mutex *lock)
854 {
855         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
856 }
857 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
858
859 /**
860  * rt_mutex_unlock - unlock a rt_mutex
861  *
862  * @lock: the rt_mutex to be unlocked
863  */
864 void __sched rt_mutex_unlock(struct rt_mutex *lock)
865 {
866         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
867 }
868 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
869
870 /**
871  * rt_mutex_destroy - mark a mutex unusable
872  * @lock: the mutex to be destroyed
873  *
874  * This function marks the mutex uninitialized, and any subsequent
875  * use of the mutex is forbidden. The mutex must not be locked when
876  * this function is called.
877  */
878 void rt_mutex_destroy(struct rt_mutex *lock)
879 {
880         WARN_ON(rt_mutex_is_locked(lock));
881 #ifdef CONFIG_DEBUG_RT_MUTEXES
882         lock->magic = NULL;
883 #endif
884 }
885
886 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
887
888 /**
889  * __rt_mutex_init - initialize the rt lock
890  *
891  * @lock: the rt lock to be initialized
892  *
893  * Initialize the rt lock to unlocked state.
894  *
895  * Initializing of a locked rt lock is not allowed
896  */
897 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
898 {
899         lock->owner = NULL;
900         raw_spin_lock_init(&lock->wait_lock);
901         plist_head_init(&lock->wait_list);
902
903         debug_rt_mutex_init(lock, name);
904 }
905 EXPORT_SYMBOL_GPL(__rt_mutex_init);
906
907 /**
908  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
909  *                              proxy owner
910  *
911  * @lock:       the rt_mutex to be locked
912  * @proxy_owner:the task to set as owner
913  *
914  * No locking. Caller has to do serializing itself
915  * Special API call for PI-futex support
916  */
917 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
918                                 struct task_struct *proxy_owner)
919 {
920         __rt_mutex_init(lock, NULL);
921         debug_rt_mutex_proxy_lock(lock, proxy_owner);
922         rt_mutex_set_owner(lock, proxy_owner);
923         rt_mutex_deadlock_account_lock(lock, proxy_owner);
924 }
925
926 /**
927  * rt_mutex_proxy_unlock - release a lock on behalf of owner
928  *
929  * @lock:       the rt_mutex to be locked
930  *
931  * No locking. Caller has to do serializing itself
932  * Special API call for PI-futex support
933  */
934 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
935                            struct task_struct *proxy_owner)
936 {
937         debug_rt_mutex_proxy_unlock(lock);
938         rt_mutex_set_owner(lock, NULL);
939         rt_mutex_deadlock_account_unlock(proxy_owner);
940 }
941
942 /**
943  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
944  * @lock:               the rt_mutex to take
945  * @waiter:             the pre-initialized rt_mutex_waiter
946  * @task:               the task to prepare
947  * @detect_deadlock:    perform deadlock detection (1) or not (0)
948  *
949  * Returns:
950  *  0 - task blocked on lock
951  *  1 - acquired the lock for task, caller should wake it up
952  * <0 - error
953  *
954  * Special API call for FUTEX_REQUEUE_PI support.
955  */
956 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
957                               struct rt_mutex_waiter *waiter,
958                               struct task_struct *task, int detect_deadlock)
959 {
960         int ret;
961
962         raw_spin_lock(&lock->wait_lock);
963
964         if (try_to_take_rt_mutex(lock, task, NULL)) {
965                 raw_spin_unlock(&lock->wait_lock);
966                 return 1;
967         }
968
969         ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
970
971         if (ret && !rt_mutex_owner(lock)) {
972                 /*
973                  * Reset the return value. We might have
974                  * returned with -EDEADLK and the owner
975                  * released the lock while we were walking the
976                  * pi chain.  Let the waiter sort it out.
977                  */
978                 ret = 0;
979         }
980
981         if (unlikely(ret))
982                 remove_waiter(lock, waiter);
983
984         raw_spin_unlock(&lock->wait_lock);
985
986         debug_rt_mutex_print_deadlock(waiter);
987
988         return ret;
989 }
990
991 /**
992  * rt_mutex_next_owner - return the next owner of the lock
993  *
994  * @lock: the rt lock query
995  *
996  * Returns the next owner of the lock or NULL
997  *
998  * Caller has to serialize against other accessors to the lock
999  * itself.
1000  *
1001  * Special API call for PI-futex support
1002  */
1003 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1004 {
1005         if (!rt_mutex_has_waiters(lock))
1006                 return NULL;
1007
1008         return rt_mutex_top_waiter(lock)->task;
1009 }
1010
1011 /**
1012  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1013  * @lock:               the rt_mutex we were woken on
1014  * @to:                 the timeout, null if none. hrtimer should already have
1015  *                      been started.
1016  * @waiter:             the pre-initialized rt_mutex_waiter
1017  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1018  *
1019  * Complete the lock acquisition started our behalf by another thread.
1020  *
1021  * Returns:
1022  *  0 - success
1023  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1024  *
1025  * Special API call for PI-futex requeue support
1026  */
1027 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1028                                struct hrtimer_sleeper *to,
1029                                struct rt_mutex_waiter *waiter,
1030                                int detect_deadlock)
1031 {
1032         int ret;
1033
1034         raw_spin_lock(&lock->wait_lock);
1035
1036         set_current_state(TASK_INTERRUPTIBLE);
1037
1038         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1039
1040         set_current_state(TASK_RUNNING);
1041
1042         if (unlikely(ret))
1043                 remove_waiter(lock, waiter);
1044
1045         /*
1046          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1047          * have to fix that up.
1048          */
1049         fixup_rt_mutex_waiters(lock);
1050
1051         raw_spin_unlock(&lock->wait_lock);
1052
1053         return ret;
1054 }