]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched.c
perf, trace: Inline perf_swevent_put_recursion_context()
[karo-tx-linux.git] / kernel / sched.c
index 054a6012de99b7298d5f92c4b137be17a37335f3..d3c120f1bf53631b1ef9fee2758f29979bb236f7 100644 (file)
@@ -969,14 +969,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        }
 }
 
-void task_rq_unlock_wait(struct task_struct *p)
-{
-       struct rq *rq = task_rq(p);
-
-       smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-       raw_spin_unlock_wait(&rq->lock);
-}
-
 static void __task_rq_unlock(struct rq *rq)
        __releases(rq->lock)
 {
@@ -3738,7 +3730,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
  * off of preempt_enable. Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
-asmlinkage void __sched preempt_schedule(void)
+asmlinkage void __sched notrace preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
 
@@ -3750,9 +3742,9 @@ asmlinkage void __sched preempt_schedule(void)
                return;
 
        do {
-               add_preempt_count(PREEMPT_ACTIVE);
+               add_preempt_count_notrace(PREEMPT_ACTIVE);
                schedule();
-               sub_preempt_count(PREEMPT_ACTIVE);
+               sub_preempt_count_notrace(PREEMPT_ACTIVE);
 
                /*
                 * Check again in case we missed a preemption opportunity
@@ -4061,6 +4053,23 @@ int __sched wait_for_completion_killable(struct completion *x)
 }
 EXPORT_SYMBOL(wait_for_completion_killable);
 
+/**
+ * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
+ * @x:  holds the state of this particular completion
+ * @timeout:  timeout value in jiffies
+ *
+ * This waits for either a completion of a specific task to be
+ * signaled or for a specified timeout to expire. It can be
+ * interrupted by a kill signal. The timeout is in jiffies.
+ */
+unsigned long __sched
+wait_for_completion_killable_timeout(struct completion *x,
+                                    unsigned long timeout)
+{
+       return wait_for_common(x, timeout, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(wait_for_completion_killable_timeout);
+
 /**
  *     try_wait_for_completion - try to decrement a completion without blocking
  *     @x:     completion structure