]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
rcu: Make expedited grace periods recheck dyntick idle state
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 11 Oct 2016 13:09:59 +0000 (06:09 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 14 Nov 2016 18:46:31 +0000 (10:46 -0800)
Expedited grace periods check dyntick-idle state, and avoid sending
IPIs to idle CPUs, including those running guest OSes, and, on NOHZ_FULL
kernels, nohz_full CPUs.  However, the kernel has been observed checking
a CPU while it was non-idle, but sending the IPI after it has gone
idle.  This commit therefore rechecks idle state immediately before
sending the IPI, refraining from IPIing CPUs that have since gone idle.

Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.h
kernel/rcu/tree_exp.h

index e99a5234d9ed86261cc56a882e42f957079cc87d..fe98dd24adf895216b52c80bd102a561368df7bb 100644 (file)
@@ -404,6 +404,7 @@ struct rcu_data {
        atomic_long_t exp_workdone1;    /* # done by others #1. */
        atomic_long_t exp_workdone2;    /* # done by others #2. */
        atomic_long_t exp_workdone3;    /* # done by others #3. */
+       int exp_dynticks_snap;          /* Double-check need for IPI. */
 
        /* 7) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
index 24343eb87b582e4f1dd5d436e6d7ef830e935c19..d3053e99fdb67deb01a35a9af998a66658d0ee22 100644 (file)
@@ -358,8 +358,10 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
                        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
+                       rdp->exp_dynticks_snap =
+                               atomic_add_return(0, &rdtp->dynticks);
                        if (raw_smp_processor_id() == cpu ||
-                           !(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
+                           !(rdp->exp_dynticks_snap & 0x1) ||
                            !(rnp->qsmaskinitnext & rdp->grpmask))
                                mask_ofl_test |= rdp->grpmask;
                }
@@ -377,9 +379,17 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                /* IPI the remaining CPUs for expedited quiescent state. */
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
+                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
                        if (!(mask_ofl_ipi & mask))
                                continue;
 retry_ipi:
+                       if (atomic_add_return(0, &rdtp->dynticks) !=
+                           rdp->exp_dynticks_snap) {
+                               mask_ofl_test |= mask;
+                               continue;
+                       }
                        ret = smp_call_function_single(cpu, func, rsp, 0);
                        if (!ret) {
                                mask_ofl_ipi &= ~mask;