* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
-static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+static int finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
-#ifdef CONFIG_SMP
int post_schedule = 0;
+#ifdef CONFIG_SMP
if (current->sched_class->needs_post_schedule)
post_schedule = current->sched_class->needs_post_schedule(rq);
#endif
finish_arch_switch(prev);
perf_counter_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
-#ifdef CONFIG_SMP
- if (post_schedule)
- current->sched_class->post_schedule(rq);
-#endif
fire_sched_in_preempt_notifiers(current);
if (mm)
kprobe_flush_task(prev);
put_task_struct(prev);
}
+
+ return post_schedule;
}
/**
__releases(rq->lock)
{
struct rq *rq = this_rq();
+ int post_schedule;
+
+ post_schedule = finish_task_switch(rq, prev);
+
+#ifdef CONFIG_SMP
+ if (post_schedule)
+ current->sched_class->post_schedule(rq);
+#endif
- finish_task_switch(rq, prev);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
* context_switch - switch to the new MM and the new
* thread's register state.
*/
-static inline void
+static inline int
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
- finish_task_switch(this_rq(), prev);
+ return finish_task_switch(this_rq(), prev);
}
/*
{
struct task_struct *prev, *next;
unsigned long *switch_count;
+ int post_schedule = 0;
struct rq *rq;
int cpu;
rq->curr = next;
++*switch_count;
- context_switch(rq, prev, next); /* unlocks the rq */
+ post_schedule = context_switch(rq, prev, next); /* unlocks the rq */
/*
* the context switch might have flipped the stack from under
* us, hence refresh the local variables.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- } else
+ } else {
+#ifdef CONFIG_SMP
+ if (current->sched_class->needs_post_schedule)
+ post_schedule = current->sched_class->needs_post_schedule(rq);
+#endif
spin_unlock_irq(&rq->lock);
+ }
+
+#ifdef CONFIG_SMP
+ if (post_schedule)
+ current->sched_class->post_schedule(rq);
+#endif
if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;