]> git.karo-electronics.de Git - linux-beck.git/commitdiff
softirq: Use hotplug thread infrastructure
authorThomas Gleixner <tglx@linutronix.de>
Mon, 16 Jul 2012 10:42:37 +0000 (10:42 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 13 Aug 2012 15:01:07 +0000 (17:01 +0200)
[ paulmck: Call rcu_note_context_switch() with interrupts enabled. ]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20120716103948.456416747@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/softirq.c

index b73e681df09ea23e951b04672ca41227e9e0787f..5c6a5bd8462fb33fa8737d0ba1b6dbd7e2144dec 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/rcupdate.h>
 #include <linux/ftrace.h>
 #include <linux/smp.h>
+#include <linux/smpboot.h>
 #include <linux/tick.h>
 
 #define CREATE_TRACE_POINTS
@@ -742,49 +743,22 @@ void __init softirq_init(void)
        open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }
 
-static int run_ksoftirqd(void * __bind_cpu)
+static int ksoftirqd_should_run(unsigned int cpu)
 {
-       set_current_state(TASK_INTERRUPTIBLE);
-
-       while (!kthread_should_stop()) {
-               preempt_disable();
-               if (!local_softirq_pending()) {
-                       schedule_preempt_disabled();
-               }
-
-               __set_current_state(TASK_RUNNING);
-
-               while (local_softirq_pending()) {
-                       /* Preempt disable stops cpu going offline.
-                          If already offline, we'll be on wrong CPU:
-                          don't process */
-                       if (cpu_is_offline((long)__bind_cpu))
-                               goto wait_to_die;
-                       local_irq_disable();
-                       if (local_softirq_pending())
-                               __do_softirq();
-                       local_irq_enable();
-                       sched_preempt_enable_no_resched();
-                       cond_resched();
-                       preempt_disable();
-                       rcu_note_context_switch((long)__bind_cpu);
-               }
-               preempt_enable();
-               set_current_state(TASK_INTERRUPTIBLE);
-       }
-       __set_current_state(TASK_RUNNING);
-       return 0;
+       return local_softirq_pending();
+}
 
-wait_to_die:
-       preempt_enable();
-       /* Wait for kthread_stop */
-       set_current_state(TASK_INTERRUPTIBLE);
-       while (!kthread_should_stop()) {
-               schedule();
-               set_current_state(TASK_INTERRUPTIBLE);
+static void run_ksoftirqd(unsigned int cpu)
+{
+       local_irq_disable();
+       if (local_softirq_pending()) {
+               __do_softirq();
+               rcu_note_context_switch(cpu);
+               local_irq_enable();
+               cond_resched();
+               return;
        }
-       __set_current_state(TASK_RUNNING);
-       return 0;
+       local_irq_enable();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -850,50 +824,14 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
 {
-       int hotcpu = (unsigned long)hcpu;
-       struct task_struct *p;
-
        switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               p = kthread_create_on_node(run_ksoftirqd,
-                                          hcpu,
-                                          cpu_to_node(hotcpu),
-                                          "ksoftirqd/%d", hotcpu);
-               if (IS_ERR(p)) {
-                       printk("ksoftirqd for %i failed\n", hotcpu);
-                       return notifier_from_errno(PTR_ERR(p));
-               }
-               kthread_bind(p, hotcpu);
-               per_cpu(ksoftirqd, hotcpu) = p;
-               break;
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               wake_up_process(per_cpu(ksoftirqd, hotcpu));
-               break;
 #ifdef CONFIG_HOTPLUG_CPU
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               if (!per_cpu(ksoftirqd, hotcpu))
-                       break;
-               /* Unbind so it can run.  Fall thru. */
-               kthread_bind(per_cpu(ksoftirqd, hotcpu),
-                            cpumask_any(cpu_online_mask));
        case CPU_DEAD:
-       case CPU_DEAD_FROZEN: {
-               static const struct sched_param param = {
-                       .sched_priority = MAX_RT_PRIO-1
-               };
-
-               p = per_cpu(ksoftirqd, hotcpu);
-               per_cpu(ksoftirqd, hotcpu) = NULL;
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
-               kthread_stop(p);
-               takeover_tasklets(hotcpu);
+       case CPU_DEAD_FROZEN:
+               takeover_tasklets((unsigned long)hcpu);
                break;
-       }
 #endif /* CONFIG_HOTPLUG_CPU */
-       }
+       }
        return NOTIFY_OK;
 }
 
@@ -901,14 +839,19 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
        .notifier_call = cpu_callback
 };
 
+static struct smp_hotplug_thread softirq_threads = {
+       .store                  = &ksoftirqd,
+       .thread_should_run      = ksoftirqd_should_run,
+       .thread_fn              = run_ksoftirqd,
+       .thread_comm            = "ksoftirqd/%u",
+};
+
 static __init int spawn_ksoftirqd(void)
 {
-       void *cpu = (void *)(long)smp_processor_id();
-       int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
-
-       BUG_ON(err != NOTIFY_OK);
-       cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
        register_cpu_notifier(&cpu_nfb);
+
+       BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
+
        return 0;
 }
 early_initcall(spawn_ksoftirqd);