2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
25 #include <linux/moduleparam.h>
26 #include <linux/completion.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
29 #include <linux/rcupdate.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
33 #include <linux/sched.h>
34 #include <linux/types.h>
35 #include <linux/init.h>
36 #include <linux/time.h>
37 #include <linux/cpu.h>
38 #include <linux/prefetch.h>
40 #ifdef CONFIG_RCU_TRACE
41 #include <trace/events/rcu.h>
42 #endif /* #else #ifdef CONFIG_RCU_TRACE */
46 /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
47 static struct task_struct *rcu_kthread_task;
48 static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
49 static unsigned long have_rcu_kthread_work;
51 /* Forward declarations for rcutiny_plugin.h. */
53 static void invoke_rcu_kthread(void);
54 static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
55 static int rcu_kthread(void *arg);
56 static void __call_rcu(struct rcu_head *head,
57 void (*func)(struct rcu_head *rcu),
58 struct rcu_ctrlblk *rcp);
60 #include "rcutiny_plugin.h"
64 static long rcu_dynticks_nesting = 1;
67 * Enter dynticks-idle mode, which is an extended quiescent state
68 * if we have fully entered that mode (i.e., if the new value of
69 * dynticks_nesting is zero).
71 void rcu_enter_nohz(void)
73 if (--rcu_dynticks_nesting == 0)
74 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
78 * Exit dynticks-idle mode, so that we are no longer in an extended
81 void rcu_exit_nohz(void)
83 rcu_dynticks_nesting++;
86 #endif /* #ifdef CONFIG_NO_HZ */
89 * Helper function for rcu_sched_qs() and rcu_bh_qs().
90 * Also irqs are disabled to avoid confusion due to interrupt handlers
91 * invoking call_rcu().
93 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
95 if (rcp->rcucblist != NULL &&
96 rcp->donetail != rcp->curtail) {
97 rcp->donetail = rcp->curtail;
105 * Wake up rcu_kthread() to process callbacks now eligible for invocation
106 * or to boost readers.
108 static void invoke_rcu_kthread(void)
110 have_rcu_kthread_work = 1;
111 wake_up(&rcu_kthread_wq);
115 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
116 * are at it, given that any rcu quiescent state is also an rcu_bh
117 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
119 void rcu_sched_qs(int cpu)
123 local_irq_save(flags);
124 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
125 rcu_qsctr_help(&rcu_bh_ctrlblk))
126 invoke_rcu_kthread();
127 local_irq_restore(flags);
131 * Record an rcu_bh quiescent state.
133 void rcu_bh_qs(int cpu)
137 local_irq_save(flags);
138 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
139 invoke_rcu_kthread();
140 local_irq_restore(flags);
144 * Check to see if the scheduling-clock interrupt came from an extended
145 * quiescent state, and, if so, tell RCU about it.
147 void rcu_check_callbacks(int cpu, int user)
152 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
154 else if (!in_softirq())
156 rcu_preempt_check_callbacks();
160 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
161 * whose grace period has elapsed.
163 static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
165 struct rcu_head *next, *list;
167 RCU_TRACE(int cb_count = 0);
169 /* If no RCU callbacks ready to invoke, just return. */
170 if (&rcp->rcucblist == rcp->donetail) {
171 RCU_TRACE(trace_rcu_batch_start(0, -1));
172 RCU_TRACE(trace_rcu_batch_end(0));
176 /* Move the ready-to-invoke callbacks to a local list. */
177 local_irq_save(flags);
178 RCU_TRACE(trace_rcu_batch_start(0, -1));
179 list = rcp->rcucblist;
180 rcp->rcucblist = *rcp->donetail;
181 *rcp->donetail = NULL;
182 if (rcp->curtail == rcp->donetail)
183 rcp->curtail = &rcp->rcucblist;
184 rcu_preempt_remove_callbacks(rcp);
185 rcp->donetail = &rcp->rcucblist;
186 local_irq_restore(flags);
188 /* Invoke the callbacks on the local list. */
192 debug_rcu_head_unqueue(list);
197 RCU_TRACE(cb_count++);
199 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
200 RCU_TRACE(trace_rcu_batch_end(cb_count));
204 * This kthread invokes RCU callbacks whose grace periods have
205 * elapsed. It is awakened as needed, and takes the place of the
206 * RCU_SOFTIRQ that was used previously for this purpose.
207 * This is a kthread, but it is never stopped, at least not until
208 * the system goes down.
210 static int rcu_kthread(void *arg)
213 unsigned long morework;
217 wait_event_interruptible(rcu_kthread_wq,
218 have_rcu_kthread_work != 0);
219 morework = rcu_boost();
220 local_irq_save(flags);
221 work = have_rcu_kthread_work;
222 have_rcu_kthread_work = morework;
223 local_irq_restore(flags);
225 rcu_process_callbacks(&rcu_sched_ctrlblk);
226 rcu_process_callbacks(&rcu_bh_ctrlblk);
227 rcu_preempt_process_callbacks();
229 schedule_timeout_interruptible(1); /* Leave CPU for others. */
232 return 0; /* Not reached, but needed to shut gcc up. */
236 * Wait for a grace period to elapse. But it is illegal to invoke
237 * synchronize_sched() from within an RCU read-side critical section.
238 * Therefore, any legal call to synchronize_sched() is a quiescent
239 * state, and so on a UP system, synchronize_sched() need do nothing.
240 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
241 * benefits of doing might_sleep() to reduce latency.)
243 * Cool, huh? (Due to Josh Triplett.)
245 * But we want to make this a static inline later. The cond_resched()
246 * currently makes this problematic.
248 void synchronize_sched(void)
252 EXPORT_SYMBOL_GPL(synchronize_sched);
255 * Helper function for call_rcu() and call_rcu_bh().
257 static void __call_rcu(struct rcu_head *head,
258 void (*func)(struct rcu_head *rcu),
259 struct rcu_ctrlblk *rcp)
263 debug_rcu_head_queue(head);
267 local_irq_save(flags);
268 *rcp->curtail = head;
269 rcp->curtail = &head->next;
270 RCU_TRACE(rcp->qlen++);
271 local_irq_restore(flags);
275 * Post an RCU callback to be invoked after the end of an RCU-sched grace
276 * period. But since we have but one CPU, that would be after any
279 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
281 __call_rcu(head, func, &rcu_sched_ctrlblk);
283 EXPORT_SYMBOL_GPL(call_rcu_sched);
286 * Post an RCU bottom-half callback to be invoked after any subsequent
289 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
291 __call_rcu(head, func, &rcu_bh_ctrlblk);
293 EXPORT_SYMBOL_GPL(call_rcu_bh);
296 * Spawn the kthread that invokes RCU callbacks.
298 static int __init rcu_spawn_kthreads(void)
300 struct sched_param sp;
302 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
303 sp.sched_priority = RCU_BOOST_PRIO;
304 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
307 early_initcall(rcu_spawn_kthreads);