]> git.karo-electronics.de Git - linux-beck.git/commitdiff
rcu: Use smp_hotplug_thread facility for RCUs per-CPU kthread
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 16 Jul 2012 10:42:38 +0000 (10:42 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 13 Aug 2012 15:01:08 +0000 (17:01 +0200)
Bring RCU into the new-age CPU-hotplug fold by modifying RCU's per-CPU
kthread code to use the new smp_hotplug_thread facility.

[ tglx: Adapted it to use callbacks and to the simplified rcu yield ]

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20120716103948.673354828@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c

index f08ee3bc5741b999aeb5b28037226969412233cf..11a4fdca1df747e4a21303b33b12df6785ea1ee6 100644 (file)
@@ -133,7 +133,6 @@ static int rcu_scheduler_fully_active __read_mostly;
  */
 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
-DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 
@@ -1468,7 +1467,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
        /* Adjust any no-longer-needed kthreads. */
-       rcu_stop_cpu_kthread(cpu);
        rcu_boost_kthread_setaffinity(rnp, -1);
 
        /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
@@ -2595,11 +2593,9 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
                rcu_boost_kthread_setaffinity(rnp, -1);
-               rcu_cpu_kthread_setrt(cpu, 1);
                break;
        case CPU_DOWN_PREPARE:
                rcu_boost_kthread_setaffinity(rnp, cpu);
-               rcu_cpu_kthread_setrt(cpu, 0);
                break;
        case CPU_DYING:
        case CPU_DYING_FROZEN:
index f08176172546f21d7c0c3025c9a0f0e24b73e537..1224d4c053823fe7ecdbc2bab9590779c5cc9b7c 100644 (file)
@@ -196,12 +196,6 @@ struct rcu_node {
                                /* Refused to boost: not sure why, though. */
                                /*  This can happen due to race conditions. */
 #endif /* #ifdef CONFIG_RCU_BOOST */
-       struct task_struct *node_kthread_task;
-                               /* kthread that takes care of this rcu_node */
-                               /*  structure, for example, awakening the */
-                               /*  per-CPU kthreads as needed. */
-       unsigned int node_kthread_status;
-                               /* State of node_kthread_task for tracing. */
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -468,7 +462,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
                                      unsigned long flags);
-static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
@@ -494,7 +487,6 @@ static void rcu_preempt_do_callbacks(void);
 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                                                 struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
 static void __cpuinit rcu_prepare_kthreads(int cpu);
 static void rcu_prepare_for_idle_init(int cpu);
 static void rcu_cleanup_after_idle(int cpu);
index 0f8b5ec64a7d774fe8798ad4907f54282a0332c5..c1961aed12138a39e04ee352d1a070dd238dceb1 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/smpboot.h>
 
 #define RCU_KTHREAD_PRIO 1
 
@@ -1292,25 +1293,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        return 0;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Stop the RCU's per-CPU kthread when its CPU goes offline,.
- */
-static void rcu_stop_cpu_kthread(int cpu)
-{
-       struct task_struct *t;
-
-       /* Stop the CPU's kthread. */
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t != NULL) {
-               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
-               kthread_stop(t);
-       }
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 static void rcu_kthread_do_work(void)
 {
        rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
@@ -1318,59 +1300,22 @@ static void rcu_kthread_do_work(void)
        rcu_preempt_do_callbacks();
 }
 
-/*
- * Set the specified CPU's kthread to run RT or not, as specified by
- * the to_rt argument.  The CPU-hotplug locks are held, so the task
- * is not going away.
- */
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+static void rcu_cpu_kthread_setup(unsigned int cpu)
 {
-       int policy;
        struct sched_param sp;
-       struct task_struct *t;
 
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t == NULL)
-               return;
-       if (to_rt) {
-               policy = SCHED_FIFO;
-               sp.sched_priority = RCU_KTHREAD_PRIO;
-       } else {
-               policy = SCHED_NORMAL;
-               sp.sched_priority = 0;
-       }
-       sched_setscheduler_nocheck(t, policy, &sp);
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
 }
 
-/*
- * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
- * This can happen while the corresponding CPU is either coming online
- * or going offline.  We cannot wait until the CPU is fully online
- * before starting the kthread, because the various notifier functions
- * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
- * the corresponding CPU is online.
- *
- * Return 1 if the kthread needs to stop, 0 otherwise.
- *
- * Caller must disable bh.  This function can momentarily enable it.
- */
-static int rcu_cpu_kthread_should_stop(int cpu)
+static void rcu_cpu_kthread_park(unsigned int cpu)
 {
-       while (cpu_is_offline(cpu) ||
-              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
-              smp_processor_id() != cpu) {
-               if (kthread_should_stop())
-                       return 1;
-               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
-               local_bh_enable();
-               schedule_timeout_uninterruptible(1);
-               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
-                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
-               local_bh_disable();
-       }
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       return 0;
+       per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+       return __get_cpu_var(rcu_cpu_has_work);
 }
 
 /*
@@ -1378,96 +1323,35 @@ static int rcu_cpu_kthread_should_stop(int cpu)
  * RCU softirq used in flavors and configurations of RCU that do not
  * support RCU priority boosting.
  */
-static int rcu_cpu_kthread(void *arg)
+static void rcu_cpu_kthread(unsigned int cpu)
 {
-       int cpu = (int)(long)arg;
-       unsigned long flags;
-       int spincnt = 0;
-       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
-       char work;
-       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+       unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
+       char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
+       int spincnt;
 
-       trace_rcu_utilization("Start CPU kthread@init");
-       for (;;) {
-               *statusp = RCU_KTHREAD_WAITING;
-               trace_rcu_utilization("End CPU kthread@rcu_wait");
-               rcu_wait(*workp != 0 || kthread_should_stop());
+       for (spincnt = 0; spincnt < 10; spincnt++) {
                trace_rcu_utilization("Start CPU kthread@rcu_wait");
                local_bh_disable();
-               if (rcu_cpu_kthread_should_stop(cpu)) {
-                       local_bh_enable();
-                       break;
-               }
                *statusp = RCU_KTHREAD_RUNNING;
-               per_cpu(rcu_cpu_kthread_loops, cpu)++;
-               local_irq_save(flags);
+               this_cpu_inc(rcu_cpu_kthread_loops);
+               local_irq_disable();
                work = *workp;
                *workp = 0;
-               local_irq_restore(flags);
+               local_irq_enable();
                if (work)
                        rcu_kthread_do_work();
                local_bh_enable();
-               if (*workp != 0)
-                       spincnt++;
-               else
-                       spincnt = 0;
-               if (spincnt > 10) {
-                       *statusp = RCU_KTHREAD_YIELDING;
-                       trace_rcu_utilization("End CPU kthread@rcu_yield");
-                       schedule_timeout_interruptible(2);
-                       trace_rcu_utilization("Start CPU kthread@rcu_yield");
-                       spincnt = 0;
+               if (*workp == 0) {
+                       trace_rcu_utilization("End CPU kthread@rcu_wait");
+                       *statusp = RCU_KTHREAD_WAITING;
+                       return;
                }
        }
-       *statusp = RCU_KTHREAD_STOPPED;
-       trace_rcu_utilization("End CPU kthread@term");
-       return 0;
-}
-
-/*
- * Spawn a per-CPU kthread, setting up affinity and priority.
- * Because the CPU hotplug lock is held, no other CPU will be attempting
- * to manipulate rcu_cpu_kthread_task.  There might be another CPU
- * attempting to access it during boot, but the locking in kthread_bind()
- * will enforce sufficient ordering.
- *
- * Please note that we cannot simply refuse to wake up the per-CPU
- * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
- * which can result in softlockup complaints if the task ends up being
- * idle for more than a couple of minutes.
- *
- * However, please note also that we cannot bind the per-CPU kthread to its
- * CPU until that CPU is fully online.  We also cannot wait until the
- * CPU is fully online before we create its per-CPU kthread, as this would
- * deadlock the system when CPU notifiers tried waiting for grace
- * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
- * is online.  If its CPU is not yet fully online, then the code in
- * rcu_cpu_kthread() will wait until it is fully online, and then do
- * the binding.
- */
-static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
-{
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_scheduler_fully_active ||
-           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
-               return 0;
-       t = kthread_create_on_node(rcu_cpu_kthread,
-                                  (void *)(long)cpu,
-                                  cpu_to_node(cpu),
-                                  "rcuc/%d", cpu);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
-       if (cpu_online(cpu))
-               kthread_bind(t, cpu);
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       per_cpu(rcu_cpu_kthread_task, cpu) = t;
-       wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
-       return 0;
+       *statusp = RCU_KTHREAD_YIELDING;
+       trace_rcu_utilization("Start CPU kthread@rcu_yield");
+       schedule_timeout_interruptible(2);
+       trace_rcu_utilization("End CPU kthread@rcu_yield");
+       *statusp = RCU_KTHREAD_WAITING;
 }
 
 /*
@@ -1503,6 +1387,15 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
        free_cpumask_var(cm);
 }
 
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+       .store                  = &rcu_cpu_kthread_task,
+       .thread_should_run      = rcu_cpu_kthread_should_run,
+       .thread_fn              = rcu_cpu_kthread,
+       .thread_comm            = "rcuc/%u",
+       .setup                  = rcu_cpu_kthread_setup,
+       .park                   = rcu_cpu_kthread_park,
+};
+
 /*
  * Spawn all kthreads -- called as soon as the scheduler is running.
  */
@@ -1512,11 +1405,9 @@ static int __init rcu_spawn_kthreads(void)
        int cpu;
 
        rcu_scheduler_fully_active = 1;
-       for_each_possible_cpu(cpu) {
+       for_each_possible_cpu(cpu)
                per_cpu(rcu_cpu_has_work, cpu) = 0;
-               if (cpu_online(cpu))
-                       (void)rcu_spawn_one_cpu_kthread(cpu);
-       }
+       BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
        rnp = rcu_get_root(rcu_state);
        (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
        if (NUM_RCU_NODES > 1) {
@@ -1533,10 +1424,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
        struct rcu_node *rnp = rdp->mynode;
 
        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_scheduler_fully_active) {
-               (void)rcu_spawn_one_cpu_kthread(cpu);
+       if (rcu_scheduler_fully_active)
                (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
-       }
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1560,22 +1449,10 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void rcu_stop_cpu_kthread(int cpu)
-{
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 {
 }
 
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
-{
-}
-
 static int __init rcu_scheduler_really_started(void)
 {
        rcu_scheduler_fully_active = 1;
index abffb486e94ed7ea581b54861f253ca8567bddda..31968931f14647c9d60250bf63ae3271875f10c5 100644 (file)
@@ -108,11 +108,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                        rdp->nxttail[RCU_WAIT_TAIL]],
                   ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
 #ifdef CONFIG_RCU_BOOST
-       seq_printf(m, " kt=%d/%c/%d ktl=%x",
+       seq_printf(m, " kt=%d/%c ktl=%x",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
                                          rdp->cpu)),
-                  per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
                   per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
 #endif /* #ifdef CONFIG_RCU_BOOST */
        seq_printf(m, " b=%ld", rdp->blimit);