2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
20 * Copyright (c) 2010 Linaro
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 #include <linux/kthread.h>
26 #include <linux/init.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
30 /* Global control variables for rcupdate callback mechanism. */
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35 RCU_TRACE(long qlen); /* Number of pending CBs. */
36 RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
37 RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
38 RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
39 RCU_TRACE(const char *name); /* Name of RCU type. */
42 /* Definition for rcupdate control block. */
43 static struct rcu_ctrlblk rcu_sched_ctrlblk = {
44 .donetail = &rcu_sched_ctrlblk.rcucblist,
45 .curtail = &rcu_sched_ctrlblk.rcucblist,
46 RCU_TRACE(.name = "rcu_sched")
49 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50 .donetail = &rcu_bh_ctrlblk.rcucblist,
51 .curtail = &rcu_bh_ctrlblk.rcucblist,
52 RCU_TRACE(.name = "rcu_bh")
55 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
56 #include <linux/kernel_stat.h>
58 int rcu_scheduler_active __read_mostly;
59 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
62 * During boot, we forgive RCU lockdep issues. After this function is
63 * invoked, we start taking RCU lockdep issues seriously. Note that unlike
64 * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
65 * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
66 * The reason for this is that Tiny RCU does not need kthreads, so does
67 * not have to care about the fact that the scheduler is half-initialized
68 * at a certain phase of the boot process. Unless SRCU is in the mix.
70 void __init rcu_scheduler_starting(void)
72 WARN_ON(nr_context_switches() > 0);
73 rcu_scheduler_active = IS_ENABLED(CONFIG_SRCU)
74 ? RCU_SCHEDULER_INIT : RCU_SCHEDULER_RUNNING;
77 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
79 #ifdef CONFIG_RCU_TRACE
81 static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
85 local_irq_save(flags);
87 local_irq_restore(flags);
91 * Dump statistics for TINY_RCU, such as they are.
93 static int show_tiny_stats(struct seq_file *m, void *unused)
95 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
96 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
100 static int show_tiny_stats_open(struct inode *inode, struct file *file)
102 return single_open(file, show_tiny_stats, NULL);
105 static const struct file_operations show_tiny_stats_fops = {
106 .owner = THIS_MODULE,
107 .open = show_tiny_stats_open,
110 .release = single_release,
113 static struct dentry *rcudir;
115 static int __init rcutiny_trace_init(void)
117 struct dentry *retval;
119 rcudir = debugfs_create_dir("rcu", NULL);
122 retval = debugfs_create_file("rcudata", 0444, rcudir,
123 NULL, &show_tiny_stats_fops);
128 debugfs_remove_recursive(rcudir);
131 device_initcall(rcutiny_trace_init);
133 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
138 if (rcu_cpu_stall_suppress)
140 rcp->ticks_this_gp++;
142 js = READ_ONCE(rcp->jiffies_stall);
143 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
144 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
145 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
146 jiffies - rcp->gp_start, rcp->qlen);
148 WRITE_ONCE(rcp->jiffies_stall,
149 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
150 } else if (ULONG_CMP_GE(j, js)) {
151 WRITE_ONCE(rcp->jiffies_stall,
152 jiffies + rcu_jiffies_till_stall_check());
156 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
158 rcp->ticks_this_gp = 0;
159 rcp->gp_start = jiffies;
160 WRITE_ONCE(rcp->jiffies_stall,
161 jiffies + rcu_jiffies_till_stall_check());
164 static void check_cpu_stalls(void)
166 RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk);)
167 RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk);)
170 #endif /* #ifdef CONFIG_RCU_TRACE */