The new rcu_read_lock_nesting and rcu_read_unlock_special per-CPU
variables must be saved and restored at every context switch, including
those involving schedule_tail(). This commit therefore adds the saving
and restoring to schedul_tail().
Reported-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Sasha Levin <levinsasha928@gmail.com>
struct task_struct *from = current, *to = arg;
to->thread.saved_task = from;
- rcu_switch_from();
+ rcu_switch_from(from);
switch_to(from, to, from);
rcu_switch_to();
}
*
* The caller must have disabled preemption.
*/
-static inline void rcu_switch_from(void)
+static inline void rcu_switch_from(struct task_struct *t)
{
- struct task_struct *t = current;
-
if (__this_cpu_read(rcu_read_lock_nesting) != 0)
rcu_preempt_note_context_switch();
t->rcu_read_lock_nesting_save = __this_cpu_read(rcu_read_lock_nesting);
{
}
-static inline void rcu_switch_from(void)
+static inline void rcu_switch_from(struct task_struct *t)
{
}
{
struct rq *rq = this_rq();
+ rcu_switch_from(prev);
+ rcu_switch_to();
finish_task_switch(rq, prev);
/*
#endif
/* Here we just switch the register state and the stack. */
- rcu_switch_from();
+ rcu_switch_from(current);
switch_to(prev, next, prev);
rcu_switch_to();