raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
rcu_report_exp_rnp(rsp, rnp);
-
- /*
- * If there are no more online CPUs for this rcu_node structure,
- * kill the rcu_node structure's kthread. Otherwise, adjust its
- * affinity.
- */
- t = rnp->node_kthread_task;
- if (t != NULL &&
- rnp->qsmaskinit == 0) {
- raw_spin_lock_irqsave(&rnp->lock, flags);
- rnp->node_kthread_task = NULL;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- kthread_stop(t);
- rcu_stop_boost_kthread(rnp);
- } else
- rcu_node_kthread_setaffinity(rnp, -1);
+ rcu_node_kthread_setaffinity(rnp, -1);
}
/*
return;
}
if (rnp->qsmask == 0) {
- rcu_initiate_boost(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
continue;
}
cpu = rnp->grplo;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
rnp = rcu_get_root(rsp);
- raw_spin_lock_irqsave(&rnp->lock, flags);
- if (rnp->qsmask == 0)
- rcu_initiate_boost(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ if (rnp->qsmask == 0) {
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+ }
}
/*
/*
* Wake up the specified per-rcu_node-structure kthread.
- * The caller must hold ->lock.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
*/
static void invoke_rcu_node_kthread(struct rcu_node *rnp)
{
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->wakemask |= rdp->grpmask;
- invoke_rcu_node_kthread(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ invoke_rcu_node_kthread(rnp);
}
/*
for (;;) {
rnp->node_kthread_status = RCU_KTHREAD_WAITING;
- wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
+ wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
raw_spin_lock_irqsave(&rnp->lock, flags);
mask = rnp->wakemask;
rnp->wakemask = 0;
- rcu_initiate_boost(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
if ((mask & 0x1) == 0)
continue;
preempt_enable();
}
}
+ /* NOTREACHED */
rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
return 0;
}
int cpu;
unsigned long mask = rnp->qsmaskinit;
- if (rnp->node_kthread_task == NULL || mask == 0)
+ if (rnp->node_kthread_task == NULL)
return;
if (!alloc_cpumask_var(&cm, GFP_KERNEL))
return;
static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void);
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
-static void rcu_initiate_boost(struct rcu_node *rnp);
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index);
-#ifdef CONFIG_HOTPLUG_CPU
-static void rcu_stop_boost_kthread(struct rcu_node *rnp);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#endif /* #ifndef RCU_TREE_NONCORE */
static void
sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
{
+ unsigned long flags;
int must_wait = 0;
- raw_spin_lock(&rnp->lock); /* irqs already disabled */
- if (!list_empty(&rnp->blkd_tasks)) {
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ if (list_empty(&rnp->blkd_tasks))
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ else {
rnp->exp_tasks = rnp->blkd_tasks.next;
- rcu_initiate_boost(rnp);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
must_wait = 1;
}
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
if (!must_wait)
rcu_report_exp_rnp(rsp, rnp);
}
*/
static void rcu_boost_kthread_timer(unsigned long arg)
{
- unsigned long flags;
- struct rcu_node *rnp = (struct rcu_node *)arg;
-
- raw_spin_lock_irqsave(&rnp->lock, flags);
- invoke_rcu_node_kthread(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ invoke_rcu_node_kthread((struct rcu_node *)arg);
}
/*
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
- rnp->exp_tasks ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
+ rnp->exp_tasks);
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
spincnt = 0;
}
}
- rnp->boost_kthread_status = RCU_KTHREAD_STOPPED;
+ /* NOTREACHED */
return 0;
}
* kthread to start boosting them. If there is an expedited grace
* period in progress, it is always time to boost.
*
- * The caller must hold rnp->lock.
+ * The caller must hold rnp->lock, which this function releases,
+ * but irqs remain disabled. The ->boost_kthread_task is immortal,
+ * so we don't need to worry about it going away.
*/
-static void rcu_initiate_boost(struct rcu_node *rnp)
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
{
struct task_struct *t;
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
rnp->n_balk_exp_gp_tasks++;
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
if (rnp->exp_tasks != NULL ||
ULONG_CMP_GE(jiffies, rnp->boost_time))) {
if (rnp->exp_tasks == NULL)
rnp->boost_tasks = rnp->gp_tasks;
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
t = rnp->boost_kthread_task;
if (t != NULL)
wake_up_process(t);
- } else
+ } else {
rcu_initiate_boost_trace(rnp);
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ }
}
/*
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void rcu_stop_boost_kthread(struct rcu_node *rnp)
-{
- unsigned long flags;
- struct task_struct *t;
-
- raw_spin_lock_irqsave(&rnp->lock, flags);
- t = rnp->boost_kthread_task;
- rnp->boost_kthread_task = NULL;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- if (t != NULL)
- kthread_stop(t);
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
#else /* #ifdef CONFIG_RCU_BOOST */
-static void rcu_initiate_boost(struct rcu_node *rnp)
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
{
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void rcu_stop_boost_kthread(struct rcu_node *rnp)
-{
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
#endif /* #else #ifdef CONFIG_RCU_BOOST */
#ifndef CONFIG_SMP