return &rsp->node[0];
}
-#ifdef CONFIG_SMP
-
/*
* If the specified CPU is offline, tell the caller that it is in
* a quiescent state. Otherwise, whack it with a reschedule IPI.
return 0;
}
-#endif /* #ifdef CONFIG_SMP */
-
/*
* rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
*
return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
}
-#ifdef CONFIG_SMP
-
/*
* Snapshot the specified CPU's dynticks counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU
return rcu_implicit_offline_qs(rdp);
}
-#endif /* #ifdef CONFIG_SMP */
-
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
rsp->gp_start = jiffies;
trace_rcu_utilization("End scheduler-tick");
}
-#ifdef CONFIG_SMP
-
/*
* Scan the leaf rcu_node structures, processing dyntick state for any that
* have not yet encountered a quiescent state, using the function specified.
trace_rcu_utilization("End fqs");
}
-#else /* #ifdef CONFIG_SMP */
-
-static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
-{
- set_need_resched();
-}
-
-#endif /* #else #ifdef CONFIG_SMP */
-
/*
* This does the RCU core processing work for the specified rcu_state
* and rcu_data structures. This may be called only from the CPU to
#endif /* #else #ifdef CONFIG_RCU_BOOST */
-#ifndef CONFIG_SMP
-
-void synchronize_sched_expedited(void)
-{
- cond_resched();
-}
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
-
-#else /* #ifndef CONFIG_SMP */
-
static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
-#endif /* #else #ifndef CONFIG_SMP */
-
#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*