2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
29 #include <linux/export.h>
30 #include <linux/mutex.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate_wait.h>
34 #include <linux/sched.h>
35 #include <linux/smp.h>
36 #include <linux/delay.h>
37 #include <linux/module.h>
38 #include <linux/srcu.h>
41 #include "rcu_segcblist.h"
43 ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */
44 module_param(exp_holdoff, ulong, 0444);
46 static void srcu_invoke_callbacks(struct work_struct *work);
47 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
50 * Initialize SRCU combining tree. Note that statically allocated
51 * srcu_struct structures might already have srcu_read_lock() and
52 * srcu_read_unlock() running against them. So if the is_static parameter
53 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
55 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
60 int levelspread[RCU_NUM_LVLS];
61 struct srcu_data *sdp;
62 struct srcu_node *snp;
63 struct srcu_node *snp_first;
65 /* Work out the overall tree geometry. */
66 sp->level[0] = &sp->node[0];
67 for (i = 1; i < rcu_num_lvls; i++)
68 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
69 rcu_init_levelspread(levelspread, num_rcu_lvl);
71 /* Each pass through this loop initializes one srcu_node structure. */
72 rcu_for_each_node_breadth_first(sp, snp) {
73 spin_lock_init(&snp->lock);
74 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
75 ARRAY_SIZE(snp->srcu_data_have_cbs));
76 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
77 snp->srcu_have_cbs[i] = 0;
78 snp->srcu_data_have_cbs[i] = 0;
80 snp->srcu_gp_seq_needed_exp = 0;
83 if (snp == &sp->node[0]) {
84 /* Root node, special case. */
85 snp->srcu_parent = NULL;
90 if (snp == sp->level[level + 1])
92 snp->srcu_parent = sp->level[level - 1] +
93 (snp - sp->level[level]) /
94 levelspread[level - 1];
98 * Initialize the per-CPU srcu_data array, which feeds into the
99 * leaves of the srcu_node tree.
101 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
102 ARRAY_SIZE(sdp->srcu_unlock_count));
103 level = rcu_num_lvls - 1;
104 snp_first = sp->level[level];
105 for_each_possible_cpu(cpu) {
106 sdp = per_cpu_ptr(sp->sda, cpu);
107 spin_lock_init(&sdp->lock);
108 rcu_segcblist_init(&sdp->srcu_cblist);
109 sdp->srcu_cblist_invoking = false;
110 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
111 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
112 sdp->mynode = &snp_first[cpu / levelspread[level]];
113 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
119 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
121 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
125 /* Dynamically allocated, better be no srcu_read_locks()! */
126 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
127 sdp->srcu_lock_count[i] = 0;
128 sdp->srcu_unlock_count[i] = 0;
134 * Initialize non-compile-time initialized fields, including the
135 * associated srcu_node and srcu_data structures. The is_static
136 * parameter is passed through to init_srcu_struct_nodes(), and
137 * also tells us that ->sda has already been wired up to srcu_data.
139 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
141 mutex_init(&sp->srcu_cb_mutex);
142 mutex_init(&sp->srcu_gp_mutex);
145 sp->srcu_barrier_seq = 0;
146 mutex_init(&sp->srcu_barrier_mutex);
147 atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
148 INIT_DELAYED_WORK(&sp->work, process_srcu);
150 sp->sda = alloc_percpu(struct srcu_data);
151 init_srcu_struct_nodes(sp, is_static);
152 sp->srcu_gp_seq_needed_exp = 0;
153 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
154 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
155 return sp->sda ? 0 : -ENOMEM;
158 #ifdef CONFIG_DEBUG_LOCK_ALLOC
160 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
161 struct lock_class_key *key)
163 /* Don't re-initialize a lock while it is held. */
164 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
165 lockdep_init_map(&sp->dep_map, name, key, 0);
166 spin_lock_init(&sp->gp_lock);
167 return init_srcu_struct_fields(sp, false);
169 EXPORT_SYMBOL_GPL(__init_srcu_struct);
171 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
174 * init_srcu_struct - initialize a sleep-RCU structure
175 * @sp: structure to initialize.
177 * Must invoke this on a given srcu_struct before passing that srcu_struct
178 * to any other function. Each srcu_struct represents a separate domain
179 * of SRCU protection.
181 int init_srcu_struct(struct srcu_struct *sp)
183 spin_lock_init(&sp->gp_lock);
184 return init_srcu_struct_fields(sp, false);
186 EXPORT_SYMBOL_GPL(init_srcu_struct);
188 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
191 * First-use initialization of statically allocated srcu_struct
192 * structure. Wiring up the combining tree is more than can be
193 * done with compile-time initialization, so this check is added
194 * to each update-side SRCU primitive. Use ->gp_lock, which -is-
195 * compile-time initialized, to resolve races involving multiple
196 * CPUs trying to garner first-use privileges.
198 static void check_init_srcu_struct(struct srcu_struct *sp)
202 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
203 /* The smp_load_acquire() pairs with the smp_store_release(). */
204 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
205 return; /* Already initialized. */
206 spin_lock_irqsave(&sp->gp_lock, flags);
207 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
208 spin_unlock_irqrestore(&sp->gp_lock, flags);
211 init_srcu_struct_fields(sp, true);
212 spin_unlock_irqrestore(&sp->gp_lock, flags);
216 * Returns approximate total of the readers' ->srcu_lock_count[] values
217 * for the rank of per-CPU counters specified by idx.
219 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
222 unsigned long sum = 0;
224 for_each_possible_cpu(cpu) {
225 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
227 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
233 * Returns approximate total of the readers' ->srcu_unlock_count[] values
234 * for the rank of per-CPU counters specified by idx.
236 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
239 unsigned long sum = 0;
241 for_each_possible_cpu(cpu) {
242 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
244 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
250 * Return true if the number of pre-existing readers is determined to
253 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
255 unsigned long unlocks;
257 unlocks = srcu_readers_unlock_idx(sp, idx);
260 * Make sure that a lock is always counted if the corresponding
261 * unlock is counted. Needs to be a smp_mb() as the read side may
262 * contain a read from a variable that is written to before the
263 * synchronize_srcu() in the write side. In this case smp_mb()s
264 * A and B act like the store buffering pattern.
266 * This smp_mb() also pairs with smp_mb() C to prevent accesses
267 * after the synchronize_srcu() from being executed before the
273 * If the locks are the same as the unlocks, then there must have
274 * been no readers on this index at some time in between. This does
275 * not mean that there are no more readers, as one could have read
276 * the current index but not have incremented the lock counter yet.
278 * Possible bug: There is no guarantee that there haven't been
279 * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were
280 * counted, meaning that this could return true even if there are
281 * still active readers. Since there are no memory barriers around
282 * srcu_flip(), the CPU is not required to increment ->srcu_idx
283 * before running srcu_readers_unlock_idx(), which means that there
284 * could be an arbitrarily large number of critical sections that
285 * execute after srcu_readers_unlock_idx() but use the old value
288 return srcu_readers_lock_idx(sp, idx) == unlocks;
292 * srcu_readers_active - returns true if there are readers. and false
294 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
296 * Note that this is not an atomic primitive, and can therefore suffer
297 * severe errors when invoked on an active srcu_struct. That said, it
298 * can be useful as an error check at cleanup time.
300 static bool srcu_readers_active(struct srcu_struct *sp)
303 unsigned long sum = 0;
305 for_each_possible_cpu(cpu) {
306 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
308 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
309 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
310 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
311 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
316 #define SRCU_INTERVAL 1
319 * Return grace-period delay, zero if there are expedited grace
320 * periods pending, SRCU_INTERVAL otherwise.
322 static unsigned long srcu_get_delay(struct srcu_struct *sp)
324 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
325 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
327 return SRCU_INTERVAL;
331 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
332 * @sp: structure to clean up.
334 * Must invoke this after you are finished using a given srcu_struct that
335 * was initialized via init_srcu_struct(), else you leak memory.
337 void cleanup_srcu_struct(struct srcu_struct *sp)
341 if (WARN_ON(!srcu_get_delay(sp)))
342 return; /* Leakage unless caller handles error. */
343 if (WARN_ON(srcu_readers_active(sp)))
344 return; /* Leakage unless caller handles error. */
345 flush_delayed_work(&sp->work);
346 for_each_possible_cpu(cpu)
347 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
348 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
349 WARN_ON(srcu_readers_active(sp))) {
350 pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
351 return; /* Caller forgot to stop doing call_srcu()? */
353 free_percpu(sp->sda);
356 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
359 * Counts the new reader in the appropriate per-CPU element of the
360 * srcu_struct. Must be called from process context.
361 * Returns an index that must be passed to the matching srcu_read_unlock().
363 int __srcu_read_lock(struct srcu_struct *sp)
367 idx = READ_ONCE(sp->srcu_idx) & 0x1;
368 __this_cpu_inc(sp->sda->srcu_lock_count[idx]);
369 smp_mb(); /* B */ /* Avoid leaking the critical section. */
372 EXPORT_SYMBOL_GPL(__srcu_read_lock);
375 * Removes the count for the old reader from the appropriate per-CPU
376 * element of the srcu_struct. Note that this may well be a different
377 * CPU than that which was incremented by the corresponding srcu_read_lock().
378 * Must be called from process context.
380 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
382 smp_mb(); /* C */ /* Avoid leaking the critical section. */
383 this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
385 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
388 * We use an adaptive strategy for synchronize_srcu() and especially for
389 * synchronize_srcu_expedited(). We spin for a fixed time period
390 * (defined below) to allow SRCU readers to exit their read-side critical
391 * sections. If there are still some readers after a few microseconds,
392 * we repeatedly block for 1-millisecond time periods.
394 #define SRCU_RETRY_CHECK_DELAY 5
397 * Start an SRCU grace period.
399 static void srcu_gp_start(struct srcu_struct *sp)
401 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
404 RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock),
405 "Invoked srcu_gp_start() without ->gp_lock!");
406 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
407 rcu_segcblist_advance(&sdp->srcu_cblist,
408 rcu_seq_current(&sp->srcu_gp_seq));
409 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
410 rcu_seq_snap(&sp->srcu_gp_seq));
411 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
412 rcu_seq_start(&sp->srcu_gp_seq);
413 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
414 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
418 * Track online CPUs to guide callback workqueue placement.
420 DEFINE_PER_CPU(bool, srcu_online);
422 void srcu_online_cpu(unsigned int cpu)
424 WRITE_ONCE(per_cpu(srcu_online, cpu), true);
427 void srcu_offline_cpu(unsigned int cpu)
429 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
433 * Place the workqueue handler on the specified CPU if online, otherwise
434 * just run it whereever. This is useful for placing workqueue handlers
435 * that are to invoke the specified CPU's callbacks.
437 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
438 struct delayed_work *dwork,
444 if (READ_ONCE(per_cpu(srcu_online, cpu)))
445 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
447 ret = queue_delayed_work(wq, dwork, delay);
453 * Schedule callback invocation for the specified srcu_data structure,
454 * if possible, on the corresponding CPU.
456 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
458 srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq,
463 * Schedule callback invocation for all srcu_data structures associated
464 * with the specified srcu_node structure that have callbacks for the
465 * just-completed grace period, the one corresponding to idx. If possible,
466 * schedule this invocation on the corresponding CPUs.
468 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
469 unsigned long mask, unsigned long delay)
473 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
474 if (!(mask & (1 << (cpu - snp->grplo))))
476 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
481 * Note the end of an SRCU grace period. Initiates callback invocation
482 * and starts a new grace period if needed.
484 * The ->srcu_cb_mutex acquisition does not protect any data, but
485 * instead prevents more than one grace period from starting while we
486 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
487 * array to have a finite number of elements.
489 static void srcu_gp_end(struct srcu_struct *sp)
491 unsigned long cbdelay;
497 struct srcu_node *snp;
499 /* Prevent more than one additional grace period. */
500 mutex_lock(&sp->srcu_cb_mutex);
502 /* End the current grace period. */
503 spin_lock_irq(&sp->gp_lock);
504 idx = rcu_seq_state(sp->srcu_gp_seq);
505 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
506 cbdelay = srcu_get_delay(sp);
507 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
508 rcu_seq_end(&sp->srcu_gp_seq);
509 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
510 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
511 sp->srcu_gp_seq_needed_exp = gpseq;
512 spin_unlock_irq(&sp->gp_lock);
513 mutex_unlock(&sp->srcu_gp_mutex);
514 /* A new grace period can start at this point. But only one. */
516 /* Initiate callback invocation as needed. */
517 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
518 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
519 rcu_for_each_node_breadth_first(sp, snp) {
520 spin_lock_irq(&snp->lock);
522 if (snp >= sp->level[rcu_num_lvls - 1])
523 cbs = snp->srcu_have_cbs[idx] == gpseq;
524 snp->srcu_have_cbs[idx] = gpseq;
525 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
526 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
527 snp->srcu_gp_seq_needed_exp = gpseq;
528 mask = snp->srcu_data_have_cbs[idx];
529 snp->srcu_data_have_cbs[idx] = 0;
530 spin_unlock_irq(&snp->lock);
532 smp_mb(); /* GP end before CB invocation. */
533 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
537 /* Callback initiation done, allow grace periods after next. */
538 mutex_unlock(&sp->srcu_cb_mutex);
540 /* Start a new grace period if needed. */
541 spin_lock_irq(&sp->gp_lock);
542 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
543 if (!rcu_seq_state(gpseq) &&
544 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
546 spin_unlock_irq(&sp->gp_lock);
547 /* Throttle expedited grace periods: Should be rare! */
548 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
549 ? 0 : SRCU_INTERVAL);
551 spin_unlock_irq(&sp->gp_lock);
556 * Funnel-locking scheme to scalably mediate many concurrent expedited
557 * grace-period requests. This function is invoked for the first known
558 * expedited request for a grace period that has already been requested,
559 * but without expediting. To start a completely new grace period,
560 * whether expedited or not, use srcu_funnel_gp_start() instead.
562 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
567 for (; snp != NULL; snp = snp->srcu_parent) {
568 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
569 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
571 spin_lock_irqsave(&snp->lock, flags);
572 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
573 spin_unlock_irqrestore(&snp->lock, flags);
576 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
577 spin_unlock_irqrestore(&snp->lock, flags);
579 spin_lock_irqsave(&sp->gp_lock, flags);
580 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
581 sp->srcu_gp_seq_needed_exp = s;
582 spin_unlock_irqrestore(&sp->gp_lock, flags);
586 * Funnel-locking scheme to scalably mediate many concurrent grace-period
587 * requests. The winner has to do the work of actually starting grace
588 * period s. Losers must either ensure that their desired grace-period
589 * number is recorded on at least their leaf srcu_node structure, or they
590 * must take steps to invoke their own callbacks.
592 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
593 unsigned long s, bool do_norm)
596 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
597 struct srcu_node *snp = sdp->mynode;
598 unsigned long snp_seq;
600 /* Each pass through the loop does one level of the srcu_node tree. */
601 for (; snp != NULL; snp = snp->srcu_parent) {
602 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
603 return; /* GP already done and CBs recorded. */
604 spin_lock_irqsave(&snp->lock, flags);
605 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
606 snp_seq = snp->srcu_have_cbs[idx];
607 if (snp == sdp->mynode && snp_seq == s)
608 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
609 spin_unlock_irqrestore(&snp->lock, flags);
610 if (snp == sdp->mynode && snp_seq != s) {
611 smp_mb(); /* CBs after GP! */
612 srcu_schedule_cbs_sdp(sdp, do_norm
618 srcu_funnel_exp_start(sp, snp, s);
621 snp->srcu_have_cbs[idx] = s;
622 if (snp == sdp->mynode)
623 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
624 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
625 snp->srcu_gp_seq_needed_exp = s;
626 spin_unlock_irqrestore(&snp->lock, flags);
629 /* Top of tree, must ensure the grace period will be started. */
630 spin_lock_irqsave(&sp->gp_lock, flags);
631 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
633 * Record need for grace period s. Pair with load
634 * acquire setting up for initialization.
636 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
638 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
639 sp->srcu_gp_seq_needed_exp = s;
641 /* If grace period not already done and none in progress, start it. */
642 if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
643 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
644 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
646 queue_delayed_work(system_power_efficient_wq, &sp->work,
649 spin_unlock_irqrestore(&sp->gp_lock, flags);
653 * Wait until all readers counted by array index idx complete, but
654 * loop an additional time if there is an expedited grace period pending.
655 * The caller must ensure that ->srcu_idx is not changed while checking.
657 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
660 if (srcu_readers_active_idx_check(sp, idx))
662 if (--trycount + !srcu_get_delay(sp) <= 0)
664 udelay(SRCU_RETRY_CHECK_DELAY);
669 * Increment the ->srcu_idx counter so that future SRCU readers will
670 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
671 * us to wait for pre-existing readers in a starvation-free manner.
673 static void srcu_flip(struct srcu_struct *sp)
675 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
678 * Ensure that if the updater misses an __srcu_read_unlock()
679 * increment, that task's next __srcu_read_lock() will see the
680 * above counter update. Note that both this memory barrier
681 * and the one in srcu_readers_active_idx_check() provide the
682 * guarantee for __srcu_read_lock().
684 smp_mb(); /* D */ /* Pairs with C. */
688 * If SRCU is likely idle, return true, otherwise return false.
690 * Note that it is OK for several current from-idle requests for a new
691 * grace period from idle to specify expediting because they will all end
692 * up requesting the same grace period anyhow. So no loss.
694 * Note also that if any CPU (including the current one) is still invoking
695 * callbacks, this function will nevertheless say "idle". This is not
696 * ideal, but the overhead of checking all CPUs' callback lists is even
697 * less ideal, especially on large systems. Furthermore, the wakeup
698 * can happen before the callback is fully removed, so we have no choice
699 * but to accept this type of error.
701 * This function is also subject to counter-wrap errors, but let's face
702 * it, if this function was preempted for enough time for the counters
703 * to wrap, it really doesn't matter whether or not we expedite the grace
704 * period. The extra overhead of a needlessly expedited grace period is
705 * negligible when amoritized over that time period, and the extra latency
706 * of a needlessly non-expedited grace period is similarly negligible.
708 static bool srcu_might_be_idle(struct srcu_struct *sp)
710 unsigned long curseq;
712 struct srcu_data *sdp;
715 /* If the local srcu_data structure has callbacks, not idle. */
716 local_irq_save(flags);
717 sdp = this_cpu_ptr(sp->sda);
718 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
719 local_irq_restore(flags);
720 return false; /* Callbacks already present, so not idle. */
722 local_irq_restore(flags);
725 * No local callbacks, so probabalistically probe global state.
726 * Exact information would require acquiring locks, which would
727 * kill scalability, hence the probabalistic nature of the probe.
730 /* First, see if enough time has passed since the last GP. */
731 t = ktime_get_mono_fast_ns();
732 if (exp_holdoff == 0 ||
733 time_in_range_open(t, sp->srcu_last_gp_end,
734 sp->srcu_last_gp_end + exp_holdoff))
735 return false; /* Too soon after last GP. */
737 /* Next, check for probable idleness. */
738 curseq = rcu_seq_current(&sp->srcu_gp_seq);
739 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
740 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
741 return false; /* Grace period in progress, so not idle. */
742 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
743 if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
744 return false; /* GP # changed, so not idle. */
745 return true; /* With reasonable probability, idle! */
749 * Enqueue an SRCU callback on the srcu_data structure associated with
750 * the current CPU and the specified srcu_struct structure, initiating
751 * grace-period processing if it is not already running.
753 * Note that all CPUs must agree that the grace period extended beyond
754 * all pre-existing SRCU read-side critical section. On systems with
755 * more than one CPU, this means that when "func()" is invoked, each CPU
756 * is guaranteed to have executed a full memory barrier since the end of
757 * its last corresponding SRCU read-side critical section whose beginning
758 * preceded the call to call_rcu(). It also means that each CPU executing
759 * an SRCU read-side critical section that continues beyond the start of
760 * "func()" must have executed a memory barrier after the call_rcu()
761 * but before the beginning of that SRCU read-side critical section.
762 * Note that these guarantees include CPUs that are offline, idle, or
763 * executing in user mode, as well as CPUs that are executing in the kernel.
765 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
766 * resulting SRCU callback function "func()", then both CPU A and CPU
767 * B are guaranteed to execute a full memory barrier during the time
768 * interval between the call to call_rcu() and the invocation of "func()".
769 * This guarantee applies even if CPU A and CPU B are the same CPU (but
770 * again only if the system has more than one CPU).
772 * Of course, these guarantees apply only for invocations of call_srcu(),
773 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
774 * srcu_struct structure.
776 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
777 rcu_callback_t func, bool do_norm)
780 bool needexp = false;
783 struct srcu_data *sdp;
785 check_init_srcu_struct(sp);
787 local_irq_save(flags);
788 sdp = this_cpu_ptr(sp->sda);
789 spin_lock(&sdp->lock);
790 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
791 rcu_segcblist_advance(&sdp->srcu_cblist,
792 rcu_seq_current(&sp->srcu_gp_seq));
793 s = rcu_seq_snap(&sp->srcu_gp_seq);
794 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
795 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
796 sdp->srcu_gp_seq_needed = s;
799 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
800 sdp->srcu_gp_seq_needed_exp = s;
803 spin_unlock_irqrestore(&sdp->lock, flags);
805 srcu_funnel_gp_start(sp, sdp, s, do_norm);
807 srcu_funnel_exp_start(sp, sdp->mynode, s);
810 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
813 __call_srcu(sp, rhp, func, true);
815 EXPORT_SYMBOL_GPL(call_srcu);
818 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
820 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
822 struct rcu_synchronize rcu;
824 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
825 lock_is_held(&rcu_bh_lock_map) ||
826 lock_is_held(&rcu_lock_map) ||
827 lock_is_held(&rcu_sched_lock_map),
828 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
830 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
833 check_init_srcu_struct(sp);
834 init_completion(&rcu.completion);
835 init_rcu_head_on_stack(&rcu.head);
836 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
837 wait_for_completion(&rcu.completion);
838 destroy_rcu_head_on_stack(&rcu.head);
842 * synchronize_srcu_expedited - Brute-force SRCU grace period
843 * @sp: srcu_struct with which to synchronize.
845 * Wait for an SRCU grace period to elapse, but be more aggressive about
846 * spinning rather than blocking when waiting.
848 * Note that synchronize_srcu_expedited() has the same deadlock and
849 * memory-ordering properties as does synchronize_srcu().
851 void synchronize_srcu_expedited(struct srcu_struct *sp)
853 __synchronize_srcu(sp, rcu_gp_is_normal());
855 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
858 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
859 * @sp: srcu_struct with which to synchronize.
861 * Wait for the count to drain to zero of both indexes. To avoid the
862 * possible starvation of synchronize_srcu(), it waits for the count of
863 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
864 * and then flip the srcu_idx and wait for the count of the other index.
866 * Can block; must be called from process context.
868 * Note that it is illegal to call synchronize_srcu() from the corresponding
869 * SRCU read-side critical section; doing so will result in deadlock.
870 * However, it is perfectly legal to call synchronize_srcu() on one
871 * srcu_struct from some other srcu_struct's read-side critical section,
872 * as long as the resulting graph of srcu_structs is acyclic.
874 * There are memory-ordering constraints implied by synchronize_srcu().
875 * On systems with more than one CPU, when synchronize_srcu() returns,
876 * each CPU is guaranteed to have executed a full memory barrier since
877 * the end of its last corresponding SRCU-sched read-side critical section
878 * whose beginning preceded the call to synchronize_srcu(). In addition,
879 * each CPU having an SRCU read-side critical section that extends beyond
880 * the return from synchronize_srcu() is guaranteed to have executed a
881 * full memory barrier after the beginning of synchronize_srcu() and before
882 * the beginning of that SRCU read-side critical section. Note that these
883 * guarantees include CPUs that are offline, idle, or executing in user mode,
884 * as well as CPUs that are executing in the kernel.
886 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
887 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
888 * to have executed a full memory barrier during the execution of
889 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
890 * are the same CPU, but again only if the system has more than one CPU.
892 * Of course, these memory-ordering guarantees apply only when
893 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
894 * passed the same srcu_struct structure.
896 * If SRCU is likely idle, expedite the first request. This semantic
897 * was provided by Classic SRCU, and is relied upon by its users, so TREE
898 * SRCU must also provide it. Note that detecting idleness is heuristic
899 * and subject to both false positives and negatives.
901 void synchronize_srcu(struct srcu_struct *sp)
903 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
904 synchronize_srcu_expedited(sp);
906 __synchronize_srcu(sp, true);
908 EXPORT_SYMBOL_GPL(synchronize_srcu);
911 * Callback function for srcu_barrier() use.
913 static void srcu_barrier_cb(struct rcu_head *rhp)
915 struct srcu_data *sdp;
916 struct srcu_struct *sp;
918 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
920 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
921 complete(&sp->srcu_barrier_completion);
925 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
926 * @sp: srcu_struct on which to wait for in-flight callbacks.
928 void srcu_barrier(struct srcu_struct *sp)
931 struct srcu_data *sdp;
932 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
934 check_init_srcu_struct(sp);
935 mutex_lock(&sp->srcu_barrier_mutex);
936 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
937 smp_mb(); /* Force ordering following return. */
938 mutex_unlock(&sp->srcu_barrier_mutex);
939 return; /* Someone else did our work for us. */
941 rcu_seq_start(&sp->srcu_barrier_seq);
942 init_completion(&sp->srcu_barrier_completion);
944 /* Initial count prevents reaching zero until all CBs are posted. */
945 atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
948 * Each pass through this loop enqueues a callback, but only
949 * on CPUs already having callbacks enqueued. Note that if
950 * a CPU already has callbacks enqueue, it must have already
951 * registered the need for a future grace period, so all we
952 * need do is enqueue a callback that will use the same
953 * grace period as the last callback already in the queue.
955 for_each_possible_cpu(cpu) {
956 sdp = per_cpu_ptr(sp->sda, cpu);
957 spin_lock_irq(&sdp->lock);
958 atomic_inc(&sp->srcu_barrier_cpu_cnt);
959 sdp->srcu_barrier_head.func = srcu_barrier_cb;
960 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
961 &sdp->srcu_barrier_head, 0))
962 atomic_dec(&sp->srcu_barrier_cpu_cnt);
963 spin_unlock_irq(&sdp->lock);
966 /* Remove the initial count, at which point reaching zero can happen. */
967 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
968 complete(&sp->srcu_barrier_completion);
969 wait_for_completion(&sp->srcu_barrier_completion);
971 rcu_seq_end(&sp->srcu_barrier_seq);
972 mutex_unlock(&sp->srcu_barrier_mutex);
974 EXPORT_SYMBOL_GPL(srcu_barrier);
977 * srcu_batches_completed - return batches completed.
978 * @sp: srcu_struct on which to report batch completion.
980 * Report the number of batches, correlated with, but not necessarily
981 * precisely the same as, the number of grace periods that have elapsed.
983 unsigned long srcu_batches_completed(struct srcu_struct *sp)
987 EXPORT_SYMBOL_GPL(srcu_batches_completed);
990 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
991 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
992 * completed in that state.
994 static void srcu_advance_state(struct srcu_struct *sp)
998 mutex_lock(&sp->srcu_gp_mutex);
1001 * Because readers might be delayed for an extended period after
1002 * fetching ->srcu_idx for their index, at any point in time there
1003 * might well be readers using both idx=0 and idx=1. We therefore
1004 * need to wait for readers to clear from both index values before
1005 * invoking a callback.
1007 * The load-acquire ensures that we see the accesses performed
1008 * by the prior grace period.
1010 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1011 if (idx == SRCU_STATE_IDLE) {
1012 spin_lock_irq(&sp->gp_lock);
1013 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1014 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1015 spin_unlock_irq(&sp->gp_lock);
1016 mutex_unlock(&sp->srcu_gp_mutex);
1019 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1020 if (idx == SRCU_STATE_IDLE)
1022 spin_unlock_irq(&sp->gp_lock);
1023 if (idx != SRCU_STATE_IDLE) {
1024 mutex_unlock(&sp->srcu_gp_mutex);
1025 return; /* Someone else started the grace period. */
1029 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1030 idx = 1 ^ (sp->srcu_idx & 1);
1031 if (!try_check_zero(sp, idx, 1)) {
1032 mutex_unlock(&sp->srcu_gp_mutex);
1033 return; /* readers present, retry later. */
1036 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1039 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1042 * SRCU read-side critical sections are normally short,
1043 * so check at least twice in quick succession after a flip.
1045 idx = 1 ^ (sp->srcu_idx & 1);
1046 if (!try_check_zero(sp, idx, 2)) {
1047 mutex_unlock(&sp->srcu_gp_mutex);
1048 return; /* readers present, retry later. */
1050 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
1055 * Invoke a limited number of SRCU callbacks that have passed through
1056 * their grace period. If there are more to do, SRCU will reschedule
1057 * the workqueue. Note that needed memory barriers have been executed
1058 * in this task's context by srcu_readers_active_idx_check().
1060 static void srcu_invoke_callbacks(struct work_struct *work)
1063 struct rcu_cblist ready_cbs;
1064 struct rcu_head *rhp;
1065 struct srcu_data *sdp;
1066 struct srcu_struct *sp;
1068 sdp = container_of(work, struct srcu_data, work.work);
1070 rcu_cblist_init(&ready_cbs);
1071 spin_lock_irq(&sdp->lock);
1072 smp_mb(); /* Old grace periods before callback invocation! */
1073 rcu_segcblist_advance(&sdp->srcu_cblist,
1074 rcu_seq_current(&sp->srcu_gp_seq));
1075 if (sdp->srcu_cblist_invoking ||
1076 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1077 spin_unlock_irq(&sdp->lock);
1078 return; /* Someone else on the job or nothing to do. */
1081 /* We are on the job! Extract and invoke ready callbacks. */
1082 sdp->srcu_cblist_invoking = true;
1083 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1084 spin_unlock_irq(&sdp->lock);
1085 rhp = rcu_cblist_dequeue(&ready_cbs);
1086 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1093 * Update counts, accelerate new callbacks, and if needed,
1094 * schedule another round of callback invocation.
1096 spin_lock_irq(&sdp->lock);
1097 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1098 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1099 rcu_seq_snap(&sp->srcu_gp_seq));
1100 sdp->srcu_cblist_invoking = false;
1101 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1102 spin_unlock_irq(&sdp->lock);
1104 srcu_schedule_cbs_sdp(sdp, 0);
1108 * Finished one round of SRCU grace period. Start another if there are
1109 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1111 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1115 spin_lock_irq(&sp->gp_lock);
1116 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1117 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1118 /* All requests fulfilled, time to go idle. */
1121 } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1122 /* Outstanding request and no GP. Start one. */
1125 spin_unlock_irq(&sp->gp_lock);
1128 queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
1132 * This is the work-queue function that handles SRCU grace periods.
1134 void process_srcu(struct work_struct *work)
1136 struct srcu_struct *sp;
1138 sp = container_of(work, struct srcu_struct, work.work);
1140 srcu_advance_state(sp);
1141 srcu_reschedule(sp, srcu_get_delay(sp));
1143 EXPORT_SYMBOL_GPL(process_srcu);
1145 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1146 struct srcu_struct *sp, int *flags,
1147 unsigned long *gpnum, unsigned long *completed)
1149 if (test_type != SRCU_FLAVOR)
1152 *completed = rcu_seq_ctr(sp->srcu_gp_seq);
1153 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
1155 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);