2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/export.h>
47 #include <linux/hardirq.h>
48 #include <linux/delay.h>
49 #include <linux/moduleparam.h>
50 #include <linux/kthread.h>
51 #include <linux/tick.h>
53 #define CREATE_TRACE_POINTS
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
60 #define MODULE_PARAM_PREFIX "rcupdate."
62 #ifndef CONFIG_TINY_RCU
63 module_param(rcu_expedited, int, 0);
64 module_param(rcu_normal, int, 0);
65 static int rcu_normal_after_boot;
66 module_param(rcu_normal_after_boot, int, 0);
67 #endif /* #ifndef CONFIG_TINY_RCU */
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
71 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
73 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
74 * RCU-sched read-side critical section. In absence of
75 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
76 * critical section unless it can prove otherwise. Note that disabling
77 * of preemption (including disabling irqs) counts as an RCU-sched
78 * read-side critical section. This is useful for debug checks in functions
79 * that required that they be called within an RCU-sched read-side
82 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
83 * and while lockdep is disabled.
85 * Note that if the CPU is in the idle loop from an RCU point of
86 * view (ie: that we are in the section between rcu_idle_enter() and
87 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
88 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
89 * that are in such a section, considering these as in extended quiescent
90 * state, so such a CPU is effectively never in an RCU read-side critical
91 * section regardless of what RCU primitives it invokes. This state of
92 * affairs is required --- we need to keep an RCU-free window in idle
93 * where the CPU may possibly enter into low power mode. This way we can
94 * notice an extended quiescent state to other CPUs that started a grace
95 * period. Otherwise we would delay any grace period as long as we run in
98 * Similarly, we avoid claiming an SRCU read lock held if the current
101 int rcu_read_lock_sched_held(void)
103 int lockdep_opinion = 0;
105 if (!debug_lockdep_rcu_enabled())
107 if (!rcu_is_watching())
109 if (!rcu_lockdep_current_cpu_online())
112 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
113 return lockdep_opinion || !preemptible();
115 EXPORT_SYMBOL(rcu_read_lock_sched_held);
118 #ifndef CONFIG_TINY_RCU
121 * Should expedited grace-period primitives always fall back to their
122 * non-expedited counterparts? Intended for use within RCU. Note
123 * that if the user specifies both rcu_expedited and rcu_normal, then
124 * rcu_normal wins. (Except during the time period during boot from
125 * when the first task is spawned until the rcu_exp_runtime_mode()
126 * core_initcall() is invoked, at which point everything is expedited.)
128 bool rcu_gp_is_normal(void)
130 return READ_ONCE(rcu_normal) &&
131 rcu_scheduler_active != RCU_SCHEDULER_INIT;
133 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
135 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
138 * Should normal grace-period primitives be expedited? Intended for
139 * use within RCU. Note that this function takes the rcu_expedited
140 * sysfs/boot variable and rcu_scheduler_active into account as well
141 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
142 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
144 bool rcu_gp_is_expedited(void)
146 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
147 rcu_scheduler_active == RCU_SCHEDULER_INIT;
149 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
152 * rcu_expedite_gp - Expedite future RCU grace periods
154 * After a call to this function, future calls to synchronize_rcu() and
155 * friends act as the corresponding synchronize_rcu_expedited() function
156 * had instead been called.
158 void rcu_expedite_gp(void)
160 atomic_inc(&rcu_expedited_nesting);
162 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
165 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
167 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
168 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
169 * and if the rcu_expedited sysfs/boot parameter is not set, then all
170 * subsequent calls to synchronize_rcu() and friends will return to
171 * their normal non-expedited behavior.
173 void rcu_unexpedite_gp(void)
175 atomic_dec(&rcu_expedited_nesting);
177 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
180 * Inform RCU of the end of the in-kernel boot sequence.
182 void rcu_end_inkernel_boot(void)
185 if (rcu_normal_after_boot)
186 WRITE_ONCE(rcu_normal, 1);
189 #endif /* #ifndef CONFIG_TINY_RCU */
191 #ifdef CONFIG_PREEMPT_RCU
194 * Preemptible RCU implementation for rcu_read_lock().
195 * Just increment ->rcu_read_lock_nesting, shared state will be updated
198 void __rcu_read_lock(void)
200 current->rcu_read_lock_nesting++;
201 barrier(); /* critical section after entry code. */
203 EXPORT_SYMBOL_GPL(__rcu_read_lock);
206 * Preemptible RCU implementation for rcu_read_unlock().
207 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
208 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
209 * invoke rcu_read_unlock_special() to clean up after a context switch
210 * in an RCU read-side critical section and other special cases.
212 void __rcu_read_unlock(void)
214 struct task_struct *t = current;
216 if (t->rcu_read_lock_nesting != 1) {
217 --t->rcu_read_lock_nesting;
219 barrier(); /* critical section before exit code. */
220 t->rcu_read_lock_nesting = INT_MIN;
221 barrier(); /* assign before ->rcu_read_unlock_special load */
222 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
223 rcu_read_unlock_special(t);
224 barrier(); /* ->rcu_read_unlock_special load before assign */
225 t->rcu_read_lock_nesting = 0;
227 #ifdef CONFIG_PROVE_LOCKING
229 int rrln = READ_ONCE(t->rcu_read_lock_nesting);
231 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
233 #endif /* #ifdef CONFIG_PROVE_LOCKING */
235 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
237 #endif /* #ifdef CONFIG_PREEMPT_RCU */
239 #ifdef CONFIG_DEBUG_LOCK_ALLOC
240 static struct lock_class_key rcu_lock_key;
241 struct lockdep_map rcu_lock_map =
242 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
243 EXPORT_SYMBOL_GPL(rcu_lock_map);
245 static struct lock_class_key rcu_bh_lock_key;
246 struct lockdep_map rcu_bh_lock_map =
247 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
248 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
250 static struct lock_class_key rcu_sched_lock_key;
251 struct lockdep_map rcu_sched_lock_map =
252 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
253 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
255 static struct lock_class_key rcu_callback_key;
256 struct lockdep_map rcu_callback_map =
257 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
258 EXPORT_SYMBOL_GPL(rcu_callback_map);
260 int notrace debug_lockdep_rcu_enabled(void)
262 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
263 current->lockdep_recursion == 0;
265 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
268 * rcu_read_lock_held() - might we be in RCU read-side critical section?
270 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
271 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
272 * this assumes we are in an RCU read-side critical section unless it can
273 * prove otherwise. This is useful for debug checks in functions that
274 * require that they be called within an RCU read-side critical section.
276 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
277 * and while lockdep is disabled.
279 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
280 * occur in the same context, for example, it is illegal to invoke
281 * rcu_read_unlock() in process context if the matching rcu_read_lock()
282 * was invoked from within an irq handler.
284 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
285 * offline from an RCU perspective, so check for those as well.
287 int rcu_read_lock_held(void)
289 if (!debug_lockdep_rcu_enabled())
291 if (!rcu_is_watching())
293 if (!rcu_lockdep_current_cpu_online())
295 return lock_is_held(&rcu_lock_map);
297 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
300 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
302 * Check for bottom half being disabled, which covers both the
303 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
304 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
305 * will show the situation. This is useful for debug checks in functions
306 * that require that they be called within an RCU read-side critical
309 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
311 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
312 * offline from an RCU perspective, so check for those as well.
314 int rcu_read_lock_bh_held(void)
316 if (!debug_lockdep_rcu_enabled())
318 if (!rcu_is_watching())
320 if (!rcu_lockdep_current_cpu_online())
322 return in_softirq() || irqs_disabled();
324 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
326 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
329 * wakeme_after_rcu() - Callback function to awaken a task after grace period
330 * @head: Pointer to rcu_head member within rcu_synchronize structure
332 * Awaken the corresponding task now that a grace period has elapsed.
334 void wakeme_after_rcu(struct rcu_head *head)
336 struct rcu_synchronize *rcu;
338 rcu = container_of(head, struct rcu_synchronize, head);
339 complete(&rcu->completion);
341 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
343 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
344 struct rcu_synchronize *rs_array)
348 /* Initialize and register callbacks for each flavor specified. */
349 for (i = 0; i < n; i++) {
351 (crcu_array[i] == call_rcu ||
352 crcu_array[i] == call_rcu_bh)) {
356 init_rcu_head_on_stack(&rs_array[i].head);
357 init_completion(&rs_array[i].completion);
358 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
361 /* Wait for all callbacks to be invoked. */
362 for (i = 0; i < n; i++) {
364 (crcu_array[i] == call_rcu ||
365 crcu_array[i] == call_rcu_bh))
367 wait_for_completion(&rs_array[i].completion);
368 destroy_rcu_head_on_stack(&rs_array[i].head);
371 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
373 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
374 void init_rcu_head(struct rcu_head *head)
376 debug_object_init(head, &rcuhead_debug_descr);
379 void destroy_rcu_head(struct rcu_head *head)
381 debug_object_free(head, &rcuhead_debug_descr);
384 static bool rcuhead_is_static_object(void *addr)
390 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
391 * @head: pointer to rcu_head structure to be initialized
393 * This function informs debugobjects of a new rcu_head structure that
394 * has been allocated as an auto variable on the stack. This function
395 * is not required for rcu_head structures that are statically defined or
396 * that are dynamically allocated on the heap. This function has no
397 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
399 void init_rcu_head_on_stack(struct rcu_head *head)
401 debug_object_init_on_stack(head, &rcuhead_debug_descr);
403 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
406 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
407 * @head: pointer to rcu_head structure to be initialized
409 * This function informs debugobjects that an on-stack rcu_head structure
410 * is about to go out of scope. As with init_rcu_head_on_stack(), this
411 * function is not required for rcu_head structures that are statically
412 * defined or that are dynamically allocated on the heap. Also as with
413 * init_rcu_head_on_stack(), this function has no effect for
414 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
416 void destroy_rcu_head_on_stack(struct rcu_head *head)
418 debug_object_free(head, &rcuhead_debug_descr);
420 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
422 struct debug_obj_descr rcuhead_debug_descr = {
424 .is_static_object = rcuhead_is_static_object,
426 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
427 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
429 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
430 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
432 unsigned long c_old, unsigned long c)
434 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
436 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
438 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
442 #ifdef CONFIG_RCU_STALL_COMMON
444 #ifdef CONFIG_PROVE_RCU
445 #define RCU_STALL_DELAY_DELTA (5 * HZ)
447 #define RCU_STALL_DELAY_DELTA 0
450 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
451 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
453 module_param(rcu_cpu_stall_suppress, int, 0644);
454 module_param(rcu_cpu_stall_timeout, int, 0644);
456 int rcu_jiffies_till_stall_check(void)
458 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
461 * Limit check must be consistent with the Kconfig limits
462 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
464 if (till_stall_check < 3) {
465 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
466 till_stall_check = 3;
467 } else if (till_stall_check > 300) {
468 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
469 till_stall_check = 300;
471 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
474 void rcu_sysrq_start(void)
476 if (!rcu_cpu_stall_suppress)
477 rcu_cpu_stall_suppress = 2;
480 void rcu_sysrq_end(void)
482 if (rcu_cpu_stall_suppress == 2)
483 rcu_cpu_stall_suppress = 0;
486 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
488 rcu_cpu_stall_suppress = 1;
492 static struct notifier_block rcu_panic_block = {
493 .notifier_call = rcu_panic,
496 static int __init check_cpu_stall_init(void)
498 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
501 early_initcall(check_cpu_stall_init);
503 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
505 #ifdef CONFIG_TASKS_RCU
508 * Simple variant of RCU whose quiescent states are voluntary context switch,
509 * user-space execution, and idle. As such, grace periods can take one good
510 * long time. There are no read-side primitives similar to rcu_read_lock()
511 * and rcu_read_unlock() because this implementation is intended to get
512 * the system into a safe state for some of the manipulations involved in
513 * tracing and the like. Finally, this implementation does not support
514 * high call_rcu_tasks() rates from multiple CPUs. If this is required,
515 * per-CPU callback lists will be needed.
518 /* Global list of callbacks and associated lock. */
519 static struct rcu_head *rcu_tasks_cbs_head;
520 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
521 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
522 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
524 /* Track exiting tasks in order to allow them to be waited for. */
525 DEFINE_SRCU(tasks_rcu_exit_srcu);
527 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
528 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
529 module_param(rcu_task_stall_timeout, int, 0644);
531 static void rcu_spawn_tasks_kthread(void);
532 static struct task_struct *rcu_tasks_kthread_ptr;
535 * Post an RCU-tasks callback. First call must be from process context
536 * after the scheduler if fully operational.
538 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
542 bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
546 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
547 needwake = !rcu_tasks_cbs_head;
548 *rcu_tasks_cbs_tail = rhp;
549 rcu_tasks_cbs_tail = &rhp->next;
550 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
551 /* We can't create the thread unless interrupts are enabled. */
552 if ((needwake && havetask) ||
553 (!havetask && !irqs_disabled_flags(flags))) {
554 rcu_spawn_tasks_kthread();
555 wake_up(&rcu_tasks_cbs_wq);
558 EXPORT_SYMBOL_GPL(call_rcu_tasks);
561 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
563 * Control will return to the caller some time after a full rcu-tasks
564 * grace period has elapsed, in other words after all currently
565 * executing rcu-tasks read-side critical sections have elapsed. These
566 * read-side critical sections are delimited by calls to schedule(),
567 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
568 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
570 * This is a very specialized primitive, intended only for a few uses in
571 * tracing and other situations requiring manipulation of function
572 * preambles and profiling hooks. The synchronize_rcu_tasks() function
573 * is not (yet) intended for heavy use from multiple CPUs.
575 * Note that this guarantee implies further memory-ordering guarantees.
576 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
577 * each CPU is guaranteed to have executed a full memory barrier since the
578 * end of its last RCU-tasks read-side critical section whose beginning
579 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
580 * having an RCU-tasks read-side critical section that extends beyond
581 * the return from synchronize_rcu_tasks() is guaranteed to have executed
582 * a full memory barrier after the beginning of synchronize_rcu_tasks()
583 * and before the beginning of that RCU-tasks read-side critical section.
584 * Note that these guarantees include CPUs that are offline, idle, or
585 * executing in user mode, as well as CPUs that are executing in the kernel.
587 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
588 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
589 * to have executed a full memory barrier during the execution of
590 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
591 * (but again only if the system has more than one CPU).
593 void synchronize_rcu_tasks(void)
595 /* Complain if the scheduler has not started. */
596 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
597 "synchronize_rcu_tasks called too soon");
599 /* Wait for the grace period. */
600 wait_rcu_gp(call_rcu_tasks);
602 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
605 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
607 * Although the current implementation is guaranteed to wait, it is not
608 * obligated to, for example, if there are no pending callbacks.
610 void rcu_barrier_tasks(void)
612 /* There is only one callback queue, so this is easy. ;-) */
613 synchronize_rcu_tasks();
615 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
617 /* See if tasks are still holding out, complain if so. */
618 static void check_holdout_task(struct task_struct *t,
619 bool needreport, bool *firstreport)
623 if (!READ_ONCE(t->rcu_tasks_holdout) ||
624 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
625 !READ_ONCE(t->on_rq) ||
626 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
627 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
628 WRITE_ONCE(t->rcu_tasks_holdout, false);
629 list_del_init(&t->rcu_tasks_holdout_list);
636 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
637 *firstreport = false;
640 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
641 t, ".I"[is_idle_task(t)],
642 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
643 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
644 t->rcu_tasks_idle_cpu, cpu);
648 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
649 static int __noreturn rcu_tasks_kthread(void *arg)
652 struct task_struct *g, *t;
653 unsigned long lastreport;
654 struct rcu_head *list;
655 struct rcu_head *next;
656 LIST_HEAD(rcu_tasks_holdouts);
658 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
659 housekeeping_affine(current);
662 * Each pass through the following loop makes one check for
663 * newly arrived callbacks, and, if there are some, waits for
664 * one RCU-tasks grace period and then invokes the callbacks.
665 * This loop is terminated by the system going down. ;-)
669 /* Pick up any new callbacks. */
670 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
671 list = rcu_tasks_cbs_head;
672 rcu_tasks_cbs_head = NULL;
673 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
674 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
676 /* If there were none, wait a bit and start over. */
678 wait_event_interruptible(rcu_tasks_cbs_wq,
680 if (!rcu_tasks_cbs_head) {
681 WARN_ON(signal_pending(current));
682 schedule_timeout_interruptible(HZ/10);
688 * Wait for all pre-existing t->on_rq and t->nvcsw
689 * transitions to complete. Invoking synchronize_sched()
690 * suffices because all these transitions occur with
691 * interrupts disabled. Without this synchronize_sched(),
692 * a read-side critical section that started before the
693 * grace period might be incorrectly seen as having started
694 * after the grace period.
696 * This synchronize_sched() also dispenses with the
697 * need for a memory barrier on the first store to
698 * ->rcu_tasks_holdout, as it forces the store to happen
699 * after the beginning of the grace period.
704 * There were callbacks, so we need to wait for an
705 * RCU-tasks grace period. Start off by scanning
706 * the task list for tasks that are not already
707 * voluntarily blocked. Mark these tasks and make
708 * a list of them in rcu_tasks_holdouts.
711 for_each_process_thread(g, t) {
712 if (t != current && READ_ONCE(t->on_rq) &&
715 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
716 WRITE_ONCE(t->rcu_tasks_holdout, true);
717 list_add(&t->rcu_tasks_holdout_list,
718 &rcu_tasks_holdouts);
724 * Wait for tasks that are in the process of exiting.
725 * This does only part of the job, ensuring that all
726 * tasks that were previously exiting reach the point
727 * where they have disabled preemption, allowing the
728 * later synchronize_sched() to finish the job.
730 synchronize_srcu(&tasks_rcu_exit_srcu);
733 * Each pass through the following loop scans the list
734 * of holdout tasks, removing any that are no longer
735 * holdouts. When the list is empty, we are done.
737 lastreport = jiffies;
738 while (!list_empty(&rcu_tasks_holdouts)) {
742 struct task_struct *t1;
744 schedule_timeout_interruptible(HZ);
745 rtst = READ_ONCE(rcu_task_stall_timeout);
746 needreport = rtst > 0 &&
747 time_after(jiffies, lastreport + rtst);
749 lastreport = jiffies;
751 WARN_ON(signal_pending(current));
752 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
753 rcu_tasks_holdout_list) {
754 check_holdout_task(t, needreport, &firstreport);
760 * Because ->on_rq and ->nvcsw are not guaranteed
761 * to have a full memory barriers prior to them in the
762 * schedule() path, memory reordering on other CPUs could
763 * cause their RCU-tasks read-side critical sections to
764 * extend past the end of the grace period. However,
765 * because these ->nvcsw updates are carried out with
766 * interrupts disabled, we can use synchronize_sched()
767 * to force the needed ordering on all such CPUs.
769 * This synchronize_sched() also confines all
770 * ->rcu_tasks_holdout accesses to be within the grace
771 * period, avoiding the need for memory barriers for
772 * ->rcu_tasks_holdout accesses.
774 * In addition, this synchronize_sched() waits for exiting
775 * tasks to complete their final preempt_disable() region
776 * of execution, cleaning up after the synchronize_srcu()
781 /* Invoke the callbacks. */
790 schedule_timeout_uninterruptible(HZ/10);
794 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
795 static void rcu_spawn_tasks_kthread(void)
797 static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
798 struct task_struct *t;
800 if (READ_ONCE(rcu_tasks_kthread_ptr)) {
801 smp_mb(); /* Ensure caller sees full kthread. */
804 mutex_lock(&rcu_tasks_kthread_mutex);
805 if (rcu_tasks_kthread_ptr) {
806 mutex_unlock(&rcu_tasks_kthread_mutex);
809 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
811 smp_mb(); /* Ensure others see full kthread. */
812 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
813 mutex_unlock(&rcu_tasks_kthread_mutex);
816 #endif /* #ifdef CONFIG_TASKS_RCU */
819 * Test each non-SRCU synchronous grace-period wait API. This is
820 * useful just after a change in mode for these primitives, and
823 void rcu_test_sync_prims(void)
825 if (!IS_ENABLED(CONFIG_PROVE_RCU))
828 synchronize_rcu_bh();
830 synchronize_rcu_expedited();
831 synchronize_rcu_bh_expedited();
832 synchronize_sched_expedited();
835 #ifdef CONFIG_PROVE_RCU
838 * Early boot self test parameters, one for each flavor
840 static bool rcu_self_test;
841 static bool rcu_self_test_bh;
842 static bool rcu_self_test_sched;
844 module_param(rcu_self_test, bool, 0444);
845 module_param(rcu_self_test_bh, bool, 0444);
846 module_param(rcu_self_test_sched, bool, 0444);
848 static int rcu_self_test_counter;
850 static void test_callback(struct rcu_head *r)
852 rcu_self_test_counter++;
853 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
856 static void early_boot_test_call_rcu(void)
858 static struct rcu_head head;
860 call_rcu(&head, test_callback);
863 static void early_boot_test_call_rcu_bh(void)
865 static struct rcu_head head;
867 call_rcu_bh(&head, test_callback);
870 static void early_boot_test_call_rcu_sched(void)
872 static struct rcu_head head;
874 call_rcu_sched(&head, test_callback);
877 void rcu_early_boot_tests(void)
879 pr_info("Running RCU self tests\n");
882 early_boot_test_call_rcu();
883 if (rcu_self_test_bh)
884 early_boot_test_call_rcu_bh();
885 if (rcu_self_test_sched)
886 early_boot_test_call_rcu_sched();
887 rcu_test_sync_prims();
890 static int rcu_verify_early_boot_tests(void)
893 int early_boot_test_counter = 0;
896 early_boot_test_counter++;
899 if (rcu_self_test_bh) {
900 early_boot_test_counter++;
903 if (rcu_self_test_sched) {
904 early_boot_test_counter++;
908 if (rcu_self_test_counter != early_boot_test_counter) {
915 late_initcall(rcu_verify_early_boot_tests);
917 void rcu_early_boot_tests(void) {}
918 #endif /* CONFIG_PROVE_RCU */