2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/oom.h>
29 #include <linux/smpboot.h>
31 #define RCU_KTHREAD_PRIO 1
33 #ifdef CONFIG_RCU_BOOST
34 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
36 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
40 * Check the RCU kernel configuration parameters and print informative
41 * messages about anything out of the ordinary. If you like #ifdef, you
42 * will love this function.
44 static void __init rcu_bootup_announce_oddness(void)
46 #ifdef CONFIG_RCU_TRACE
47 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
49 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
50 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
53 #ifdef CONFIG_RCU_FANOUT_EXACT
54 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
56 #ifdef CONFIG_RCU_FAST_NO_HZ
58 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
60 #ifdef CONFIG_PROVE_RCU
61 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
63 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
64 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
66 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
67 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
69 #if defined(CONFIG_RCU_CPU_STALL_INFO)
70 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
72 #if NUM_RCU_LVL_4 != 0
73 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
75 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
76 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
77 if (nr_cpu_ids != NR_CPUS)
78 printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
81 #ifdef CONFIG_TREE_PREEMPT_RCU
83 struct rcu_state rcu_preempt_state =
84 RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
85 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
86 static struct rcu_state *rcu_state = &rcu_preempt_state;
88 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
91 * Tell them what RCU they are running.
93 static void __init rcu_bootup_announce(void)
95 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
96 rcu_bootup_announce_oddness();
100 * Return the number of RCU-preempt batches processed thus far
101 * for debug and statistics.
103 long rcu_batches_completed_preempt(void)
105 return rcu_preempt_state.completed;
107 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
110 * Return the number of RCU batches processed thus far for debug & stats.
112 long rcu_batches_completed(void)
114 return rcu_batches_completed_preempt();
116 EXPORT_SYMBOL_GPL(rcu_batches_completed);
119 * Force a quiescent state for preemptible RCU.
121 void rcu_force_quiescent_state(void)
123 force_quiescent_state(&rcu_preempt_state);
125 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
128 * Record a preemptible-RCU quiescent state for the specified CPU. Note
129 * that this just means that the task currently running on the CPU is
130 * not in a quiescent state. There might be any number of tasks blocked
131 * while in an RCU read-side critical section.
133 * Unlike the other rcu_*_qs() functions, callers to this function
134 * must disable irqs in order to protect the assignment to
135 * ->rcu_read_unlock_special.
137 static void rcu_preempt_qs(int cpu)
139 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
141 if (rdp->passed_quiesce == 0)
142 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
143 rdp->passed_quiesce = 1;
144 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
148 * We have entered the scheduler, and the current task might soon be
149 * context-switched away from. If this task is in an RCU read-side
150 * critical section, we will no longer be able to rely on the CPU to
151 * record that fact, so we enqueue the task on the blkd_tasks list.
152 * The task will dequeue itself when it exits the outermost enclosing
153 * RCU read-side critical section. Therefore, the current grace period
154 * cannot be permitted to complete until the blkd_tasks list entries
155 * predating the current grace period drain, in other words, until
156 * rnp->gp_tasks becomes NULL.
158 * Caller must disable preemption.
160 static void rcu_preempt_note_context_switch(int cpu)
162 struct task_struct *t = current;
164 struct rcu_data *rdp;
165 struct rcu_node *rnp;
167 if (t->rcu_read_lock_nesting > 0 &&
168 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
170 /* Possibly blocking in an RCU read-side critical section. */
171 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
173 raw_spin_lock_irqsave(&rnp->lock, flags);
174 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
175 t->rcu_blocked_node = rnp;
178 * If this CPU has already checked in, then this task
179 * will hold up the next grace period rather than the
180 * current grace period. Queue the task accordingly.
181 * If the task is queued for the current grace period
182 * (i.e., this CPU has not yet passed through a quiescent
183 * state for the current grace period), then as long
184 * as that task remains queued, the current grace period
185 * cannot end. Note that there is some uncertainty as
186 * to exactly when the current grace period started.
187 * We take a conservative approach, which can result
188 * in unnecessarily waiting on tasks that started very
189 * slightly after the current grace period began. C'est
192 * But first, note that the current CPU must still be
195 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
196 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
197 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
198 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
199 rnp->gp_tasks = &t->rcu_node_entry;
200 #ifdef CONFIG_RCU_BOOST
201 if (rnp->boost_tasks != NULL)
202 rnp->boost_tasks = rnp->gp_tasks;
203 #endif /* #ifdef CONFIG_RCU_BOOST */
205 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
206 if (rnp->qsmask & rdp->grpmask)
207 rnp->gp_tasks = &t->rcu_node_entry;
209 trace_rcu_preempt_task(rdp->rsp->name,
211 (rnp->qsmask & rdp->grpmask)
214 raw_spin_unlock_irqrestore(&rnp->lock, flags);
215 } else if (t->rcu_read_lock_nesting < 0 &&
216 t->rcu_read_unlock_special) {
219 * Complete exit from RCU read-side critical section on
220 * behalf of preempted instance of __rcu_read_unlock().
222 rcu_read_unlock_special(t);
226 * Either we were not in an RCU read-side critical section to
227 * begin with, or we have now recorded that critical section
228 * globally. Either way, we can now note a quiescent state
229 * for this CPU. Again, if we were in an RCU read-side critical
230 * section, and if that critical section was blocking the current
231 * grace period, then the fact that the task has been enqueued
232 * means that we continue to block the current grace period.
234 local_irq_save(flags);
236 local_irq_restore(flags);
240 * Check for preempted RCU readers blocking the current grace period
241 * for the specified rcu_node structure. If the caller needs a reliable
242 * answer, it must hold the rcu_node's ->lock.
244 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
246 return rnp->gp_tasks != NULL;
250 * Record a quiescent state for all tasks that were previously queued
251 * on the specified rcu_node structure and that were blocking the current
252 * RCU grace period. The caller must hold the specified rnp->lock with
253 * irqs disabled, and this lock is released upon return, but irqs remain
256 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
257 __releases(rnp->lock)
260 struct rcu_node *rnp_p;
262 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
263 raw_spin_unlock_irqrestore(&rnp->lock, flags);
264 return; /* Still need more quiescent states! */
270 * Either there is only one rcu_node in the tree,
271 * or tasks were kicked up to root rcu_node due to
272 * CPUs going offline.
274 rcu_report_qs_rsp(&rcu_preempt_state, flags);
278 /* Report up the rest of the hierarchy. */
280 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
281 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
282 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
286 * Advance a ->blkd_tasks-list pointer to the next entry, instead
287 * returning NULL if at the end of the list.
289 static struct list_head *rcu_next_node_entry(struct task_struct *t,
290 struct rcu_node *rnp)
292 struct list_head *np;
294 np = t->rcu_node_entry.next;
295 if (np == &rnp->blkd_tasks)
301 * Handle special cases during rcu_read_unlock(), such as needing to
302 * notify RCU core processing or task having blocked during the RCU
303 * read-side critical section.
305 void rcu_read_unlock_special(struct task_struct *t)
311 struct list_head *np;
312 #ifdef CONFIG_RCU_BOOST
313 struct rt_mutex *rbmp = NULL;
314 #endif /* #ifdef CONFIG_RCU_BOOST */
315 struct rcu_node *rnp;
318 /* NMI handlers cannot block and cannot safely manipulate state. */
322 local_irq_save(flags);
325 * If RCU core is waiting for this CPU to exit critical section,
326 * let it know that we have done so.
328 special = t->rcu_read_unlock_special;
329 if (special & RCU_READ_UNLOCK_NEED_QS) {
330 rcu_preempt_qs(smp_processor_id());
333 /* Hardware IRQ handlers cannot block. */
334 if (in_irq() || in_serving_softirq()) {
335 local_irq_restore(flags);
339 /* Clean up if blocked during RCU read-side critical section. */
340 if (special & RCU_READ_UNLOCK_BLOCKED) {
341 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
344 * Remove this task from the list it blocked on. The
345 * task can migrate while we acquire the lock, but at
346 * most one time. So at most two passes through loop.
349 rnp = t->rcu_blocked_node;
350 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
351 if (rnp == t->rcu_blocked_node)
353 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
355 empty = !rcu_preempt_blocked_readers_cgp(rnp);
356 empty_exp = !rcu_preempted_readers_exp(rnp);
357 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
358 np = rcu_next_node_entry(t, rnp);
359 list_del_init(&t->rcu_node_entry);
360 t->rcu_blocked_node = NULL;
361 trace_rcu_unlock_preempted_task("rcu_preempt",
363 if (&t->rcu_node_entry == rnp->gp_tasks)
365 if (&t->rcu_node_entry == rnp->exp_tasks)
367 #ifdef CONFIG_RCU_BOOST
368 if (&t->rcu_node_entry == rnp->boost_tasks)
369 rnp->boost_tasks = np;
370 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
371 if (t->rcu_boost_mutex) {
372 rbmp = t->rcu_boost_mutex;
373 t->rcu_boost_mutex = NULL;
375 #endif /* #ifdef CONFIG_RCU_BOOST */
378 * If this was the last task on the current list, and if
379 * we aren't waiting on any CPUs, report the quiescent state.
380 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
381 * so we must take a snapshot of the expedited state.
383 empty_exp_now = !rcu_preempted_readers_exp(rnp);
384 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
385 trace_rcu_quiescent_state_report("preempt_rcu",
392 rcu_report_unblock_qs_rnp(rnp, flags);
394 raw_spin_unlock_irqrestore(&rnp->lock, flags);
397 #ifdef CONFIG_RCU_BOOST
398 /* Unboost if we were boosted. */
400 rt_mutex_unlock(rbmp);
401 #endif /* #ifdef CONFIG_RCU_BOOST */
404 * If this was the last task on the expedited lists,
405 * then we need to report up the rcu_node hierarchy.
407 if (!empty_exp && empty_exp_now)
408 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
410 local_irq_restore(flags);
414 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
417 * Dump detailed information for all tasks blocking the current RCU
418 * grace period on the specified rcu_node structure.
420 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
423 struct task_struct *t;
425 raw_spin_lock_irqsave(&rnp->lock, flags);
426 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
427 raw_spin_unlock_irqrestore(&rnp->lock, flags);
430 t = list_entry(rnp->gp_tasks,
431 struct task_struct, rcu_node_entry);
432 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
434 raw_spin_unlock_irqrestore(&rnp->lock, flags);
438 * Dump detailed information for all tasks blocking the current RCU
441 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
443 struct rcu_node *rnp = rcu_get_root(rsp);
445 rcu_print_detail_task_stall_rnp(rnp);
446 rcu_for_each_leaf_node(rsp, rnp)
447 rcu_print_detail_task_stall_rnp(rnp);
450 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
452 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
456 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
458 #ifdef CONFIG_RCU_CPU_STALL_INFO
460 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
462 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
463 rnp->level, rnp->grplo, rnp->grphi);
466 static void rcu_print_task_stall_end(void)
468 printk(KERN_CONT "\n");
471 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
473 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
477 static void rcu_print_task_stall_end(void)
481 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
484 * Scan the current list of tasks blocked within RCU read-side critical
485 * sections, printing out the tid of each.
487 static int rcu_print_task_stall(struct rcu_node *rnp)
489 struct task_struct *t;
492 if (!rcu_preempt_blocked_readers_cgp(rnp))
494 rcu_print_task_stall_begin(rnp);
495 t = list_entry(rnp->gp_tasks,
496 struct task_struct, rcu_node_entry);
497 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
498 printk(KERN_CONT " P%d", t->pid);
501 rcu_print_task_stall_end();
506 * Check that the list of blocked tasks for the newly completed grace
507 * period is in fact empty. It is a serious bug to complete a grace
508 * period that still has RCU readers blocked! This function must be
509 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
510 * must be held by the caller.
512 * Also, if there are blocked tasks on the list, they automatically
513 * block the newly created grace period, so set up ->gp_tasks accordingly.
515 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
517 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
518 if (!list_empty(&rnp->blkd_tasks))
519 rnp->gp_tasks = rnp->blkd_tasks.next;
520 WARN_ON_ONCE(rnp->qsmask);
523 #ifdef CONFIG_HOTPLUG_CPU
526 * Handle tasklist migration for case in which all CPUs covered by the
527 * specified rcu_node have gone offline. Move them up to the root
528 * rcu_node. The reason for not just moving them to the immediate
529 * parent is to remove the need for rcu_read_unlock_special() to
530 * make more than two attempts to acquire the target rcu_node's lock.
531 * Returns true if there were tasks blocking the current RCU grace
534 * Returns 1 if there was previously a task blocking the current grace
535 * period on the specified rcu_node structure.
537 * The caller must hold rnp->lock with irqs disabled.
539 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
540 struct rcu_node *rnp,
541 struct rcu_data *rdp)
543 struct list_head *lp;
544 struct list_head *lp_root;
546 struct rcu_node *rnp_root = rcu_get_root(rsp);
547 struct task_struct *t;
549 if (rnp == rnp_root) {
550 WARN_ONCE(1, "Last CPU thought to be offlined?");
551 return 0; /* Shouldn't happen: at least one CPU online. */
554 /* If we are on an internal node, complain bitterly. */
555 WARN_ON_ONCE(rnp != rdp->mynode);
558 * Move tasks up to root rcu_node. Don't try to get fancy for
559 * this corner-case operation -- just put this node's tasks
560 * at the head of the root node's list, and update the root node's
561 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
562 * if non-NULL. This might result in waiting for more tasks than
563 * absolutely necessary, but this is a good performance/complexity
566 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
567 retval |= RCU_OFL_TASKS_NORM_GP;
568 if (rcu_preempted_readers_exp(rnp))
569 retval |= RCU_OFL_TASKS_EXP_GP;
570 lp = &rnp->blkd_tasks;
571 lp_root = &rnp_root->blkd_tasks;
572 while (!list_empty(lp)) {
573 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
574 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
575 list_del(&t->rcu_node_entry);
576 t->rcu_blocked_node = rnp_root;
577 list_add(&t->rcu_node_entry, lp_root);
578 if (&t->rcu_node_entry == rnp->gp_tasks)
579 rnp_root->gp_tasks = rnp->gp_tasks;
580 if (&t->rcu_node_entry == rnp->exp_tasks)
581 rnp_root->exp_tasks = rnp->exp_tasks;
582 #ifdef CONFIG_RCU_BOOST
583 if (&t->rcu_node_entry == rnp->boost_tasks)
584 rnp_root->boost_tasks = rnp->boost_tasks;
585 #endif /* #ifdef CONFIG_RCU_BOOST */
586 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
589 rnp->gp_tasks = NULL;
590 rnp->exp_tasks = NULL;
591 #ifdef CONFIG_RCU_BOOST
592 rnp->boost_tasks = NULL;
594 * In case root is being boosted and leaf was not. Make sure
595 * that we boost the tasks blocking the current grace period
598 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
599 if (rnp_root->boost_tasks != NULL &&
600 rnp_root->boost_tasks != rnp_root->gp_tasks &&
601 rnp_root->boost_tasks != rnp_root->exp_tasks)
602 rnp_root->boost_tasks = rnp_root->gp_tasks;
603 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
604 #endif /* #ifdef CONFIG_RCU_BOOST */
609 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
612 * Check for a quiescent state from the current CPU. When a task blocks,
613 * the task is recorded in the corresponding CPU's rcu_node structure,
614 * which is checked elsewhere.
616 * Caller must disable hard irqs.
618 static void rcu_preempt_check_callbacks(int cpu)
620 struct task_struct *t = current;
622 if (t->rcu_read_lock_nesting == 0) {
626 if (t->rcu_read_lock_nesting > 0 &&
627 per_cpu(rcu_preempt_data, cpu).qs_pending)
628 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
631 #ifdef CONFIG_RCU_BOOST
633 static void rcu_preempt_do_callbacks(void)
635 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
638 #endif /* #ifdef CONFIG_RCU_BOOST */
641 * Queue a preemptible-RCU callback for invocation after a grace period.
643 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
645 __call_rcu(head, func, &rcu_preempt_state, 0);
647 EXPORT_SYMBOL_GPL(call_rcu);
650 * Queue an RCU callback for lazy invocation after a grace period.
651 * This will likely be later named something like "call_rcu_lazy()",
652 * but this change will require some way of tagging the lazy RCU
653 * callbacks in the list of pending callbacks. Until then, this
654 * function may only be called from __kfree_rcu().
656 void kfree_call_rcu(struct rcu_head *head,
657 void (*func)(struct rcu_head *rcu))
659 __call_rcu(head, func, &rcu_preempt_state, 1);
661 EXPORT_SYMBOL_GPL(kfree_call_rcu);
664 * synchronize_rcu - wait until a grace period has elapsed.
666 * Control will return to the caller some time after a full grace
667 * period has elapsed, in other words after all currently executing RCU
668 * read-side critical sections have completed. Note, however, that
669 * upon return from synchronize_rcu(), the caller might well be executing
670 * concurrently with new RCU read-side critical sections that began while
671 * synchronize_rcu() was waiting. RCU read-side critical sections are
672 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
674 * See the description of synchronize_sched() for more detailed information
675 * on memory ordering guarantees.
677 void synchronize_rcu(void)
679 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
680 !lock_is_held(&rcu_lock_map) &&
681 !lock_is_held(&rcu_sched_lock_map),
682 "Illegal synchronize_rcu() in RCU read-side critical section");
683 if (!rcu_scheduler_active)
686 synchronize_rcu_expedited();
688 wait_rcu_gp(call_rcu);
690 EXPORT_SYMBOL_GPL(synchronize_rcu);
692 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
693 static unsigned long sync_rcu_preempt_exp_count;
694 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
697 * Return non-zero if there are any tasks in RCU read-side critical
698 * sections blocking the current preemptible-RCU expedited grace period.
699 * If there is no preemptible-RCU expedited grace period currently in
700 * progress, returns zero unconditionally.
702 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
704 return rnp->exp_tasks != NULL;
708 * return non-zero if there is no RCU expedited grace period in progress
709 * for the specified rcu_node structure, in other words, if all CPUs and
710 * tasks covered by the specified rcu_node structure have done their bit
711 * for the current expedited grace period. Works only for preemptible
712 * RCU -- other RCU implementation use other means.
714 * Caller must hold sync_rcu_preempt_exp_mutex.
716 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
718 return !rcu_preempted_readers_exp(rnp) &&
719 ACCESS_ONCE(rnp->expmask) == 0;
723 * Report the exit from RCU read-side critical section for the last task
724 * that queued itself during or before the current expedited preemptible-RCU
725 * grace period. This event is reported either to the rcu_node structure on
726 * which the task was queued or to one of that rcu_node structure's ancestors,
727 * recursively up the tree. (Calm down, calm down, we do the recursion
730 * Most callers will set the "wake" flag, but the task initiating the
731 * expedited grace period need not wake itself.
733 * Caller must hold sync_rcu_preempt_exp_mutex.
735 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
741 raw_spin_lock_irqsave(&rnp->lock, flags);
743 if (!sync_rcu_preempt_exp_done(rnp)) {
744 raw_spin_unlock_irqrestore(&rnp->lock, flags);
747 if (rnp->parent == NULL) {
748 raw_spin_unlock_irqrestore(&rnp->lock, flags);
750 wake_up(&sync_rcu_preempt_exp_wq);
754 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
756 raw_spin_lock(&rnp->lock); /* irqs already disabled */
757 rnp->expmask &= ~mask;
762 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
763 * grace period for the specified rcu_node structure. If there are no such
764 * tasks, report it up the rcu_node hierarchy.
766 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
767 * CPU hotplug operations.
770 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
775 raw_spin_lock_irqsave(&rnp->lock, flags);
776 if (list_empty(&rnp->blkd_tasks)) {
777 raw_spin_unlock_irqrestore(&rnp->lock, flags);
779 rnp->exp_tasks = rnp->blkd_tasks.next;
780 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
784 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
788 * synchronize_rcu_expedited - Brute-force RCU grace period
790 * Wait for an RCU-preempt grace period, but expedite it. The basic
791 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
792 * the ->blkd_tasks lists and wait for this list to drain. This consumes
793 * significant time on all CPUs and is unfriendly to real-time workloads,
794 * so is thus not recommended for any sort of common-case code.
795 * In fact, if you are using synchronize_rcu_expedited() in a loop,
796 * please restructure your code to batch your updates, and then Use a
797 * single synchronize_rcu() instead.
799 * Note that it is illegal to call this function while holding any lock
800 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
801 * to call this function from a CPU-hotplug notifier. Failing to observe
802 * these restriction will result in deadlock.
804 void synchronize_rcu_expedited(void)
807 struct rcu_node *rnp;
808 struct rcu_state *rsp = &rcu_preempt_state;
812 smp_mb(); /* Caller's modifications seen first by other CPUs. */
813 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
814 smp_mb(); /* Above access cannot bleed into critical section. */
817 * Block CPU-hotplug operations. This means that any CPU-hotplug
818 * operation that finds an rcu_node structure with tasks in the
819 * process of being boosted will know that all tasks blocking
820 * this expedited grace period will already be in the process of
821 * being boosted. This simplifies the process of moving tasks
822 * from leaf to root rcu_node structures.
827 * Acquire lock, falling back to synchronize_rcu() if too many
828 * lock-acquisition failures. Of course, if someone does the
829 * expedited grace period for us, just leave.
831 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
832 if (ULONG_CMP_LT(snap,
833 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
835 goto mb_ret; /* Others did our work for us. */
837 if (trycount++ < 10) {
838 udelay(trycount * num_online_cpus());
841 wait_rcu_gp(call_rcu);
845 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
847 goto unlock_mb_ret; /* Others did our work for us. */
850 /* force all RCU readers onto ->blkd_tasks lists. */
851 synchronize_sched_expedited();
853 /* Initialize ->expmask for all non-leaf rcu_node structures. */
854 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
855 raw_spin_lock_irqsave(&rnp->lock, flags);
856 rnp->expmask = rnp->qsmaskinit;
857 raw_spin_unlock_irqrestore(&rnp->lock, flags);
860 /* Snapshot current state of ->blkd_tasks lists. */
861 rcu_for_each_leaf_node(rsp, rnp)
862 sync_rcu_preempt_exp_init(rsp, rnp);
863 if (NUM_RCU_NODES > 1)
864 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
868 /* Wait for snapshotted ->blkd_tasks lists to drain. */
869 rnp = rcu_get_root(rsp);
870 wait_event(sync_rcu_preempt_exp_wq,
871 sync_rcu_preempt_exp_done(rnp));
873 /* Clean up and exit. */
874 smp_mb(); /* ensure expedited GP seen before counter increment. */
875 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
877 mutex_unlock(&sync_rcu_preempt_exp_mutex);
879 smp_mb(); /* ensure subsequent action seen after grace period. */
881 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
884 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
886 * Note that this primitive will not always wait for an RCU grace period
887 * to complete. For example, if there are no RCU callbacks queued anywhere
888 * in the system, then rcu_barrier() is within its rights to return
889 * immediately, without waiting for anything, much less an RCU grace period.
891 void rcu_barrier(void)
893 _rcu_barrier(&rcu_preempt_state);
895 EXPORT_SYMBOL_GPL(rcu_barrier);
898 * Initialize preemptible RCU's state structures.
900 static void __init __rcu_init_preempt(void)
902 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
905 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
907 static struct rcu_state *rcu_state = &rcu_sched_state;
910 * Tell them what RCU they are running.
912 static void __init rcu_bootup_announce(void)
914 printk(KERN_INFO "Hierarchical RCU implementation.\n");
915 rcu_bootup_announce_oddness();
919 * Return the number of RCU batches processed thus far for debug & stats.
921 long rcu_batches_completed(void)
923 return rcu_batches_completed_sched();
925 EXPORT_SYMBOL_GPL(rcu_batches_completed);
928 * Force a quiescent state for RCU, which, because there is no preemptible
929 * RCU, becomes the same as rcu-sched.
931 void rcu_force_quiescent_state(void)
933 rcu_sched_force_quiescent_state();
935 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
938 * Because preemptible RCU does not exist, we never have to check for
939 * CPUs being in quiescent states.
941 static void rcu_preempt_note_context_switch(int cpu)
946 * Because preemptible RCU does not exist, there are never any preempted
949 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
954 #ifdef CONFIG_HOTPLUG_CPU
956 /* Because preemptible RCU does not exist, no quieting of tasks. */
957 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
959 raw_spin_unlock_irqrestore(&rnp->lock, flags);
962 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
965 * Because preemptible RCU does not exist, we never have to check for
966 * tasks blocked within RCU read-side critical sections.
968 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
973 * Because preemptible RCU does not exist, we never have to check for
974 * tasks blocked within RCU read-side critical sections.
976 static int rcu_print_task_stall(struct rcu_node *rnp)
982 * Because there is no preemptible RCU, there can be no readers blocked,
983 * so there is no need to check for blocked tasks. So check only for
984 * bogus qsmask values.
986 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
988 WARN_ON_ONCE(rnp->qsmask);
991 #ifdef CONFIG_HOTPLUG_CPU
994 * Because preemptible RCU does not exist, it never needs to migrate
995 * tasks that were blocked within RCU read-side critical sections, and
996 * such non-existent tasks cannot possibly have been blocking the current
999 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1000 struct rcu_node *rnp,
1001 struct rcu_data *rdp)
1006 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1009 * Because preemptible RCU does not exist, it never has any callbacks
1012 static void rcu_preempt_check_callbacks(int cpu)
1017 * Queue an RCU callback for lazy invocation after a grace period.
1018 * This will likely be later named something like "call_rcu_lazy()",
1019 * but this change will require some way of tagging the lazy RCU
1020 * callbacks in the list of pending callbacks. Until then, this
1021 * function may only be called from __kfree_rcu().
1023 * Because there is no preemptible RCU, we use RCU-sched instead.
1025 void kfree_call_rcu(struct rcu_head *head,
1026 void (*func)(struct rcu_head *rcu))
1028 __call_rcu(head, func, &rcu_sched_state, 1);
1030 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1033 * Wait for an rcu-preempt grace period, but make it happen quickly.
1034 * But because preemptible RCU does not exist, map to rcu-sched.
1036 void synchronize_rcu_expedited(void)
1038 synchronize_sched_expedited();
1040 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1042 #ifdef CONFIG_HOTPLUG_CPU
1045 * Because preemptible RCU does not exist, there is never any need to
1046 * report on tasks preempted in RCU read-side critical sections during
1047 * expedited RCU grace periods.
1049 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1054 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1057 * Because preemptible RCU does not exist, rcu_barrier() is just
1058 * another name for rcu_barrier_sched().
1060 void rcu_barrier(void)
1062 rcu_barrier_sched();
1064 EXPORT_SYMBOL_GPL(rcu_barrier);
1067 * Because preemptible RCU does not exist, it need not be initialized.
1069 static void __init __rcu_init_preempt(void)
1073 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1075 #ifdef CONFIG_RCU_BOOST
1077 #include "rtmutex_common.h"
1079 #ifdef CONFIG_RCU_TRACE
1081 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1083 if (list_empty(&rnp->blkd_tasks))
1084 rnp->n_balk_blkd_tasks++;
1085 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1086 rnp->n_balk_exp_gp_tasks++;
1087 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1088 rnp->n_balk_boost_tasks++;
1089 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1090 rnp->n_balk_notblocked++;
1091 else if (rnp->gp_tasks != NULL &&
1092 ULONG_CMP_LT(jiffies, rnp->boost_time))
1093 rnp->n_balk_notyet++;
1098 #else /* #ifdef CONFIG_RCU_TRACE */
1100 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1104 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1106 static void rcu_wake_cond(struct task_struct *t, int status)
1109 * If the thread is yielding, only wake it when this
1110 * is invoked from idle
1112 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1117 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1118 * or ->boost_tasks, advancing the pointer to the next task in the
1119 * ->blkd_tasks list.
1121 * Note that irqs must be enabled: boosting the task can block.
1122 * Returns 1 if there are more tasks needing to be boosted.
1124 static int rcu_boost(struct rcu_node *rnp)
1126 unsigned long flags;
1127 struct rt_mutex mtx;
1128 struct task_struct *t;
1129 struct list_head *tb;
1131 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1132 return 0; /* Nothing left to boost. */
1134 raw_spin_lock_irqsave(&rnp->lock, flags);
1137 * Recheck under the lock: all tasks in need of boosting
1138 * might exit their RCU read-side critical sections on their own.
1140 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1141 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1146 * Preferentially boost tasks blocking expedited grace periods.
1147 * This cannot starve the normal grace periods because a second
1148 * expedited grace period must boost all blocked tasks, including
1149 * those blocking the pre-existing normal grace period.
1151 if (rnp->exp_tasks != NULL) {
1152 tb = rnp->exp_tasks;
1153 rnp->n_exp_boosts++;
1155 tb = rnp->boost_tasks;
1156 rnp->n_normal_boosts++;
1158 rnp->n_tasks_boosted++;
1161 * We boost task t by manufacturing an rt_mutex that appears to
1162 * be held by task t. We leave a pointer to that rt_mutex where
1163 * task t can find it, and task t will release the mutex when it
1164 * exits its outermost RCU read-side critical section. Then
1165 * simply acquiring this artificial rt_mutex will boost task
1166 * t's priority. (Thanks to tglx for suggesting this approach!)
1168 * Note that task t must acquire rnp->lock to remove itself from
1169 * the ->blkd_tasks list, which it will do from exit() if from
1170 * nowhere else. We therefore are guaranteed that task t will
1171 * stay around at least until we drop rnp->lock. Note that
1172 * rnp->lock also resolves races between our priority boosting
1173 * and task t's exiting its outermost RCU read-side critical
1176 t = container_of(tb, struct task_struct, rcu_node_entry);
1177 rt_mutex_init_proxy_locked(&mtx, t);
1178 t->rcu_boost_mutex = &mtx;
1179 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1180 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1181 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1183 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1184 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1188 * Priority-boosting kthread. One per leaf rcu_node and one for the
1191 static int rcu_boost_kthread(void *arg)
1193 struct rcu_node *rnp = (struct rcu_node *)arg;
1197 trace_rcu_utilization("Start boost kthread@init");
1199 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1200 trace_rcu_utilization("End boost kthread@rcu_wait");
1201 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1202 trace_rcu_utilization("Start boost kthread@rcu_wait");
1203 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1204 more2boost = rcu_boost(rnp);
1210 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1211 trace_rcu_utilization("End boost kthread@rcu_yield");
1212 schedule_timeout_interruptible(2);
1213 trace_rcu_utilization("Start boost kthread@rcu_yield");
1218 trace_rcu_utilization("End boost kthread@notreached");
1223 * Check to see if it is time to start boosting RCU readers that are
1224 * blocking the current grace period, and, if so, tell the per-rcu_node
1225 * kthread to start boosting them. If there is an expedited grace
1226 * period in progress, it is always time to boost.
1228 * The caller must hold rnp->lock, which this function releases.
1229 * The ->boost_kthread_task is immortal, so we don't need to worry
1230 * about it going away.
1232 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1234 struct task_struct *t;
1236 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1237 rnp->n_balk_exp_gp_tasks++;
1238 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1241 if (rnp->exp_tasks != NULL ||
1242 (rnp->gp_tasks != NULL &&
1243 rnp->boost_tasks == NULL &&
1245 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1246 if (rnp->exp_tasks == NULL)
1247 rnp->boost_tasks = rnp->gp_tasks;
1248 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1249 t = rnp->boost_kthread_task;
1251 rcu_wake_cond(t, rnp->boost_kthread_status);
1253 rcu_initiate_boost_trace(rnp);
1254 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1259 * Wake up the per-CPU kthread to invoke RCU callbacks.
1261 static void invoke_rcu_callbacks_kthread(void)
1263 unsigned long flags;
1265 local_irq_save(flags);
1266 __this_cpu_write(rcu_cpu_has_work, 1);
1267 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1268 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1269 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1270 __this_cpu_read(rcu_cpu_kthread_status));
1272 local_irq_restore(flags);
1276 * Is the current CPU running the RCU-callbacks kthread?
1277 * Caller must have preemption disabled.
1279 static bool rcu_is_callbacks_kthread(void)
1281 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1284 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1287 * Do priority-boost accounting for the start of a new grace period.
1289 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1291 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1295 * Create an RCU-boost kthread for the specified node if one does not
1296 * already exist. We only create this kthread for preemptible RCU.
1297 * Returns zero if all is well, a negated errno otherwise.
1299 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1300 struct rcu_node *rnp)
1302 int rnp_index = rnp - &rsp->node[0];
1303 unsigned long flags;
1304 struct sched_param sp;
1305 struct task_struct *t;
1307 if (&rcu_preempt_state != rsp)
1310 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1314 if (rnp->boost_kthread_task != NULL)
1316 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1317 "rcub/%d", rnp_index);
1320 raw_spin_lock_irqsave(&rnp->lock, flags);
1321 rnp->boost_kthread_task = t;
1322 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1323 sp.sched_priority = RCU_BOOST_PRIO;
1324 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1325 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1329 static void rcu_kthread_do_work(void)
1331 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1332 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1333 rcu_preempt_do_callbacks();
1336 static void rcu_cpu_kthread_setup(unsigned int cpu)
1338 struct sched_param sp;
1340 sp.sched_priority = RCU_KTHREAD_PRIO;
1341 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1344 static void rcu_cpu_kthread_park(unsigned int cpu)
1346 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1349 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1351 return __get_cpu_var(rcu_cpu_has_work);
1355 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1356 * RCU softirq used in flavors and configurations of RCU that do not
1357 * support RCU priority boosting.
1359 static void rcu_cpu_kthread(unsigned int cpu)
1361 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1362 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1365 for (spincnt = 0; spincnt < 10; spincnt++) {
1366 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1368 *statusp = RCU_KTHREAD_RUNNING;
1369 this_cpu_inc(rcu_cpu_kthread_loops);
1370 local_irq_disable();
1375 rcu_kthread_do_work();
1378 trace_rcu_utilization("End CPU kthread@rcu_wait");
1379 *statusp = RCU_KTHREAD_WAITING;
1383 *statusp = RCU_KTHREAD_YIELDING;
1384 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1385 schedule_timeout_interruptible(2);
1386 trace_rcu_utilization("End CPU kthread@rcu_yield");
1387 *statusp = RCU_KTHREAD_WAITING;
1391 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1392 * served by the rcu_node in question. The CPU hotplug lock is still
1393 * held, so the value of rnp->qsmaskinit will be stable.
1395 * We don't include outgoingcpu in the affinity set, use -1 if there is
1396 * no outgoing CPU. If there are no CPUs left in the affinity set,
1397 * this function allows the kthread to execute on any CPU.
1399 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1401 struct task_struct *t = rnp->boost_kthread_task;
1402 unsigned long mask = rnp->qsmaskinit;
1408 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1410 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1411 if ((mask & 0x1) && cpu != outgoingcpu)
1412 cpumask_set_cpu(cpu, cm);
1413 if (cpumask_weight(cm) == 0) {
1415 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1416 cpumask_clear_cpu(cpu, cm);
1417 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1419 set_cpus_allowed_ptr(t, cm);
1420 free_cpumask_var(cm);
1423 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1424 .store = &rcu_cpu_kthread_task,
1425 .thread_should_run = rcu_cpu_kthread_should_run,
1426 .thread_fn = rcu_cpu_kthread,
1427 .thread_comm = "rcuc/%u",
1428 .setup = rcu_cpu_kthread_setup,
1429 .park = rcu_cpu_kthread_park,
1433 * Spawn all kthreads -- called as soon as the scheduler is running.
1435 static int __init rcu_spawn_kthreads(void)
1437 struct rcu_node *rnp;
1440 rcu_scheduler_fully_active = 1;
1441 for_each_possible_cpu(cpu)
1442 per_cpu(rcu_cpu_has_work, cpu) = 0;
1443 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1444 rnp = rcu_get_root(rcu_state);
1445 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1446 if (NUM_RCU_NODES > 1) {
1447 rcu_for_each_leaf_node(rcu_state, rnp)
1448 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1452 early_initcall(rcu_spawn_kthreads);
1454 static void __cpuinit rcu_prepare_kthreads(int cpu)
1456 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1457 struct rcu_node *rnp = rdp->mynode;
1459 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1460 if (rcu_scheduler_fully_active)
1461 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1464 #else /* #ifdef CONFIG_RCU_BOOST */
1466 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1468 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1471 static void invoke_rcu_callbacks_kthread(void)
1476 static bool rcu_is_callbacks_kthread(void)
1481 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1485 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1489 static int __init rcu_scheduler_really_started(void)
1491 rcu_scheduler_fully_active = 1;
1494 early_initcall(rcu_scheduler_really_started);
1496 static void __cpuinit rcu_prepare_kthreads(int cpu)
1500 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1502 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1505 * Check to see if any future RCU-related work will need to be done
1506 * by the current CPU, even if none need be done immediately, returning
1507 * 1 if so. This function is part of the RCU implementation; it is -not-
1508 * an exported member of the RCU API.
1510 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1511 * any flavor of RCU.
1513 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1515 *delta_jiffies = ULONG_MAX;
1516 return rcu_cpu_has_callbacks(cpu);
1520 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1522 static void rcu_prepare_for_idle_init(int cpu)
1527 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1530 static void rcu_cleanup_after_idle(int cpu)
1535 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1538 static void rcu_prepare_for_idle(int cpu)
1543 * Don't bother keeping a running count of the number of RCU callbacks
1544 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1546 static void rcu_idle_count_callbacks_posted(void)
1550 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1553 * This code is invoked when a CPU goes idle, at which point we want
1554 * to have the CPU do everything required for RCU so that it can enter
1555 * the energy-efficient dyntick-idle mode. This is handled by a
1556 * state machine implemented by rcu_prepare_for_idle() below.
1558 * The following three proprocessor symbols control this state machine:
1560 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1561 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1562 * scheduling-clock interrupt than to loop through the state machine
1564 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1565 * optional if RCU does not need anything immediately from this
1566 * CPU, even if this CPU still has RCU callbacks queued. The first
1567 * times through the state machine are mandatory: we need to give
1568 * the state machine a chance to communicate a quiescent state
1570 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1571 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1572 * is sized to be roughly one RCU grace period. Those energy-efficiency
1573 * benchmarkers who might otherwise be tempted to set this to a large
1574 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1575 * system. And if you are -that- concerned about energy efficiency,
1576 * just power the system down and be done with it!
1577 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1578 * permitted to sleep in dyntick-idle mode with only lazy RCU
1579 * callbacks pending. Setting this too high can OOM your system.
1581 * The values below work well in practice. If future workloads require
1582 * adjustment, they can be converted into kernel config parameters, though
1583 * making the state machine smarter might be a better option.
1585 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1586 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1587 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1588 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1590 extern int tick_nohz_enabled;
1593 * Does the specified flavor of RCU have non-lazy callbacks pending on
1594 * the specified CPU? Both RCU flavor and CPU are specified by the
1595 * rcu_data structure.
1597 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1599 return rdp->qlen != rdp->qlen_lazy;
1602 #ifdef CONFIG_TREE_PREEMPT_RCU
1605 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1606 * is no RCU-preempt in the kernel.)
1608 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1610 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1612 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1615 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1617 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1622 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1625 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1627 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
1629 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
1630 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
1631 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
1635 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1636 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1637 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1638 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1639 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1640 * it is better to incur scheduling-clock interrupts than to spin
1641 * continuously for the same time duration!
1643 * The delta_jiffies argument is used to store the time when RCU is
1644 * going to need the CPU again if it still has callbacks. The reason
1645 * for this is that rcu_prepare_for_idle() might need to post a timer,
1646 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1647 * the wakeup time for this CPU. This means that RCU's timer can be
1648 * delayed until the wakeup time, which defeats the purpose of posting
1651 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1653 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1655 /* Flag a new idle sojourn to the idle-entry state machine. */
1656 rdtp->idle_first_pass = 1;
1657 /* If no callbacks, RCU doesn't need the CPU. */
1658 if (!rcu_cpu_has_callbacks(cpu)) {
1659 *delta_jiffies = ULONG_MAX;
1662 if (rdtp->dyntick_holdoff == jiffies) {
1663 /* RCU recently tried and failed, so don't try again. */
1667 /* Set up for the possibility that RCU will post a timer. */
1668 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1669 *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
1670 RCU_IDLE_GP_DELAY) - jiffies;
1672 *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
1673 *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
1679 * Handler for smp_call_function_single(). The only point of this
1680 * handler is to wake the CPU up, so the handler does only tracing.
1682 void rcu_idle_demigrate(void *unused)
1684 trace_rcu_prep_idle("Demigrate");
1688 * Timer handler used to force CPU to start pushing its remaining RCU
1689 * callbacks in the case where it entered dyntick-idle mode with callbacks
1690 * pending. The hander doesn't really need to do anything because the
1691 * real work is done upon re-entry to idle, or by the next scheduling-clock
1692 * interrupt should idle not be re-entered.
1694 * One special case: the timer gets migrated without awakening the CPU
1695 * on which the timer was scheduled on. In this case, we must wake up
1696 * that CPU. We do so with smp_call_function_single().
1698 static void rcu_idle_gp_timer_func(unsigned long cpu_in)
1700 int cpu = (int)cpu_in;
1702 trace_rcu_prep_idle("Timer");
1703 if (cpu != smp_processor_id())
1704 smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
1706 WARN_ON_ONCE(1); /* Getting here can hang the system... */
1710 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1712 static void rcu_prepare_for_idle_init(int cpu)
1714 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1716 rdtp->dyntick_holdoff = jiffies - 1;
1717 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
1718 rdtp->idle_gp_timer_expires = jiffies - 1;
1719 rdtp->idle_first_pass = 1;
1723 * Clean up for exit from idle. Because we are exiting from idle, there
1724 * is no longer any point to ->idle_gp_timer, so cancel it. This will
1725 * do nothing if this timer is not active, so just cancel it unconditionally.
1727 static void rcu_cleanup_after_idle(int cpu)
1729 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1731 del_timer(&rdtp->idle_gp_timer);
1732 trace_rcu_prep_idle("Cleanup after idle");
1733 rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
1737 * Check to see if any RCU-related work can be done by the current CPU,
1738 * and if so, schedule a softirq to get it done. This function is part
1739 * of the RCU implementation; it is -not- an exported member of the RCU API.
1741 * The idea is for the current CPU to clear out all work required by the
1742 * RCU core for the current grace period, so that this CPU can be permitted
1743 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1744 * at the end of the grace period by whatever CPU ends the grace period.
1745 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1746 * number of wakeups by a modest integer factor.
1748 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1749 * disabled, we do one pass of force_quiescent_state(), then do a
1750 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1751 * later. The ->dyntick_drain field controls the sequencing.
1753 * The caller must have disabled interrupts.
1755 static void rcu_prepare_for_idle(int cpu)
1757 struct timer_list *tp;
1758 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1761 /* Handle nohz enablement switches conservatively. */
1762 tne = ACCESS_ONCE(tick_nohz_enabled);
1763 if (tne != rdtp->tick_nohz_enabled_snap) {
1764 if (rcu_cpu_has_callbacks(cpu))
1765 invoke_rcu_core(); /* force nohz to see update. */
1766 rdtp->tick_nohz_enabled_snap = tne;
1772 /* Adaptive-tick mode, where usermode execution is idle to RCU. */
1773 if (!is_idle_task(current)) {
1774 rdtp->dyntick_holdoff = jiffies - 1;
1775 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1776 trace_rcu_prep_idle("User dyntick with callbacks");
1777 rdtp->idle_gp_timer_expires =
1778 round_up(jiffies + RCU_IDLE_GP_DELAY,
1780 } else if (rcu_cpu_has_callbacks(cpu)) {
1781 rdtp->idle_gp_timer_expires =
1782 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1783 trace_rcu_prep_idle("User dyntick with lazy callbacks");
1787 tp = &rdtp->idle_gp_timer;
1788 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1793 * If this is an idle re-entry, for example, due to use of
1794 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
1795 * loop, then don't take any state-machine actions, unless the
1796 * momentary exit from idle queued additional non-lazy callbacks.
1797 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
1800 if (!rdtp->idle_first_pass &&
1801 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
1802 if (rcu_cpu_has_callbacks(cpu)) {
1803 tp = &rdtp->idle_gp_timer;
1804 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1808 rdtp->idle_first_pass = 0;
1809 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
1812 * If there are no callbacks on this CPU, enter dyntick-idle mode.
1813 * Also reset state to avoid prejudicing later attempts.
1815 if (!rcu_cpu_has_callbacks(cpu)) {
1816 rdtp->dyntick_holdoff = jiffies - 1;
1817 rdtp->dyntick_drain = 0;
1818 trace_rcu_prep_idle("No callbacks");
1823 * If in holdoff mode, just return. We will presumably have
1824 * refrained from disabling the scheduling-clock tick.
1826 if (rdtp->dyntick_holdoff == jiffies) {
1827 trace_rcu_prep_idle("In holdoff");
1831 /* Check and update the ->dyntick_drain sequencing. */
1832 if (rdtp->dyntick_drain <= 0) {
1833 /* First time through, initialize the counter. */
1834 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
1835 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
1836 !rcu_pending(cpu) &&
1837 !local_softirq_pending()) {
1838 /* Can we go dyntick-idle despite still having callbacks? */
1839 rdtp->dyntick_drain = 0;
1840 rdtp->dyntick_holdoff = jiffies;
1841 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1842 trace_rcu_prep_idle("Dyntick with callbacks");
1843 rdtp->idle_gp_timer_expires =
1844 round_up(jiffies + RCU_IDLE_GP_DELAY,
1847 rdtp->idle_gp_timer_expires =
1848 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1849 trace_rcu_prep_idle("Dyntick with lazy callbacks");
1851 tp = &rdtp->idle_gp_timer;
1852 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1853 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1854 return; /* Nothing more to do immediately. */
1855 } else if (--(rdtp->dyntick_drain) <= 0) {
1856 /* We have hit the limit, so time to give up. */
1857 rdtp->dyntick_holdoff = jiffies;
1858 trace_rcu_prep_idle("Begin holdoff");
1859 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
1864 * Do one step of pushing the remaining RCU callbacks through
1865 * the RCU core state machine.
1867 #ifdef CONFIG_TREE_PREEMPT_RCU
1868 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
1869 rcu_preempt_qs(cpu);
1870 force_quiescent_state(&rcu_preempt_state);
1872 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1873 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1875 force_quiescent_state(&rcu_sched_state);
1877 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1879 force_quiescent_state(&rcu_bh_state);
1883 * If RCU callbacks are still pending, RCU still needs this CPU.
1884 * So try forcing the callbacks through the grace period.
1886 if (rcu_cpu_has_callbacks(cpu)) {
1887 trace_rcu_prep_idle("More callbacks");
1890 trace_rcu_prep_idle("Callbacks drained");
1895 * Keep a running count of the number of non-lazy callbacks posted
1896 * on this CPU. This running counter (which is never decremented) allows
1897 * rcu_prepare_for_idle() to detect when something out of the idle loop
1898 * posts a callback, even if an equal number of callbacks are invoked.
1899 * Of course, callbacks should only be posted from within a trace event
1900 * designed to be called from idle or from within RCU_NONIDLE().
1902 static void rcu_idle_count_callbacks_posted(void)
1904 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1908 * Data for flushing lazy RCU callbacks at OOM time.
1910 static atomic_t oom_callback_count;
1911 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1914 * RCU OOM callback -- decrement the outstanding count and deliver the
1915 * wake-up if we are the last one.
1917 static void rcu_oom_callback(struct rcu_head *rhp)
1919 if (atomic_dec_and_test(&oom_callback_count))
1920 wake_up(&oom_callback_wq);
1924 * Post an rcu_oom_notify callback on the current CPU if it has at
1925 * least one lazy callback. This will unnecessarily post callbacks
1926 * to CPUs that already have a non-lazy callback at the end of their
1927 * callback list, but this is an infrequent operation, so accept some
1928 * extra overhead to keep things simple.
1930 static void rcu_oom_notify_cpu(void *unused)
1932 struct rcu_state *rsp;
1933 struct rcu_data *rdp;
1935 for_each_rcu_flavor(rsp) {
1936 rdp = __this_cpu_ptr(rsp->rda);
1937 if (rdp->qlen_lazy != 0) {
1938 atomic_inc(&oom_callback_count);
1939 rsp->call(&rdp->oom_head, rcu_oom_callback);
1945 * If low on memory, ensure that each CPU has a non-lazy callback.
1946 * This will wake up CPUs that have only lazy callbacks, in turn
1947 * ensuring that they free up the corresponding memory in a timely manner.
1948 * Because an uncertain amount of memory will be freed in some uncertain
1949 * timeframe, we do not claim to have freed anything.
1951 static int rcu_oom_notify(struct notifier_block *self,
1952 unsigned long notused, void *nfreed)
1956 /* Wait for callbacks from earlier instance to complete. */
1957 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1960 * Prevent premature wakeup: ensure that all increments happen
1961 * before there is a chance of the counter reaching zero.
1963 atomic_set(&oom_callback_count, 1);
1966 for_each_online_cpu(cpu) {
1967 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1972 /* Unconditionally decrement: no need to wake ourselves up. */
1973 atomic_dec(&oom_callback_count);
1978 static struct notifier_block rcu_oom_nb = {
1979 .notifier_call = rcu_oom_notify
1982 static int __init rcu_register_oom_notifier(void)
1984 register_oom_notifier(&rcu_oom_nb);
1987 early_initcall(rcu_register_oom_notifier);
1989 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1991 #ifdef CONFIG_RCU_CPU_STALL_INFO
1993 #ifdef CONFIG_RCU_FAST_NO_HZ
1995 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1997 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1998 struct timer_list *tltp = &rdtp->idle_gp_timer;
2001 c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.';
2002 if (timer_pending(tltp))
2003 sprintf(cp, "drain=%d %c timer=%lu",
2004 rdtp->dyntick_drain, c, tltp->expires - jiffies);
2006 sprintf(cp, "drain=%d %c timer not pending",
2007 rdtp->dyntick_drain, c);
2010 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2012 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2017 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2019 /* Initiate the stall-info list. */
2020 static void print_cpu_stall_info_begin(void)
2022 printk(KERN_CONT "\n");
2026 * Print out diagnostic information for the specified stalled CPU.
2028 * If the specified CPU is aware of the current RCU grace period
2029 * (flavor specified by rsp), then print the number of scheduling
2030 * clock interrupts the CPU has taken during the time that it has
2031 * been aware. Otherwise, print the number of RCU grace periods
2032 * that this CPU is ignorant of, for example, "1" if the CPU was
2033 * aware of the previous grace period.
2035 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2037 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2039 char fast_no_hz[72];
2040 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2041 struct rcu_dynticks *rdtp = rdp->dynticks;
2043 unsigned long ticks_value;
2045 if (rsp->gpnum == rdp->gpnum) {
2046 ticks_title = "ticks this GP";
2047 ticks_value = rdp->ticks_this_gp;
2049 ticks_title = "GPs behind";
2050 ticks_value = rsp->gpnum - rdp->gpnum;
2052 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2053 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2054 cpu, ticks_value, ticks_title,
2055 atomic_read(&rdtp->dynticks) & 0xfff,
2056 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2060 /* Terminate the stall-info list. */
2061 static void print_cpu_stall_info_end(void)
2063 printk(KERN_ERR "\t");
2066 /* Zero ->ticks_this_gp for all flavors of RCU. */
2067 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2069 rdp->ticks_this_gp = 0;
2072 /* Increment ->ticks_this_gp for all flavors of RCU. */
2073 static void increment_cpu_stall_ticks(void)
2075 struct rcu_state *rsp;
2077 for_each_rcu_flavor(rsp)
2078 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
2081 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2083 static void print_cpu_stall_info_begin(void)
2085 printk(KERN_CONT " {");
2088 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2090 printk(KERN_CONT " %d", cpu);
2093 static void print_cpu_stall_info_end(void)
2095 printk(KERN_CONT "} ");
2098 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2102 static void increment_cpu_stall_ticks(void)
2106 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */