return;
}
RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
- if (current->pid != 0) {
+ if (!is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
return;
}
RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
- if (current->pid != 0) {
+ if (!is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
return;
}
trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
- if (current->pid != 0) {
+ if (!is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on entry: not idle task",
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
- if (current->pid != 0) {
+ if (!is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on exit: not idle task",