]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
powerpc: Bring all threads online prior to migration/hibernation
authorRobert Jennings <rcj@linux.vnet.ibm.com>
Tue, 7 May 2013 04:34:11 +0000 (04:34 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 19 May 2013 17:54:40 +0000 (10:54 -0700)
commit 120496ac2d2d60aee68d3123a68169502a85f4b5 upstream.

This patch brings online all threads which are present but not online
prior to migration/hibernation.  After migration/hibernation those
threads are taken back offline.

During migration/hibernation all online CPUs must call H_JOIN, this is
required by the hypervisor.  Without this patch, threads that are offline
(H_CEDE'd) will not be woken to make the H_JOIN call and the OS will be
deadlocked (all threads either JOIN'd or CEDE'd).

Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/powerpc/include/asm/rtas.h
arch/powerpc/kernel/rtas.c
arch/powerpc/platforms/pseries/suspend.c

index 557cff845deed0b6281bf983ccb8684352ea9874..5e7e00889d6f1734fb518eb51567597e71d0d96e 100644 (file)
@@ -262,6 +262,8 @@ extern void rtas_progress(char *s, unsigned short hex);
 extern void rtas_initialize(void);
 extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
 extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
 extern int rtas_ibm_suspend_me(struct rtas_args *);
 
 struct rtc_time;
index fcec38241f794f204e4f44a052c4b16ce25b125c..225e9f2251268eb5cdadda0737eee014fcbdbcca 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/capability.h>
 #include <linux/delay.h>
+#include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
@@ -808,6 +809,95 @@ static void rtas_percpu_suspend_me(void *info)
        __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
 }
 
+enum rtas_cpu_state {
+       DOWN,
+       UP,
+};
+
+#ifndef CONFIG_SMP
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+                               cpumask_var_t cpus)
+{
+       if (!cpumask_empty(cpus)) {
+               cpumask_clear(cpus);
+               return -EINVAL;
+       } else
+               return 0;
+}
+#else
+/* On return cpumask will be altered to indicate CPUs changed.
+ * CPUs with states changed will be set in the mask,
+ * CPUs with status unchanged will be unset in the mask. */
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+                               cpumask_var_t cpus)
+{
+       int cpu;
+       int cpuret = 0;
+       int ret = 0;
+
+       if (cpumask_empty(cpus))
+               return 0;
+
+       for_each_cpu(cpu, cpus) {
+               switch (state) {
+               case DOWN:
+                       cpuret = cpu_down(cpu);
+                       break;
+               case UP:
+                       cpuret = cpu_up(cpu);
+                       break;
+               }
+               if (cpuret) {
+                       pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+                                       __func__,
+                                       ((state == UP) ? "up" : "down"),
+                                       cpu, cpuret);
+                       if (!ret)
+                               ret = cpuret;
+                       if (state == UP) {
+                               /* clear bits for unchanged cpus, return */
+                               cpumask_shift_right(cpus, cpus, cpu);
+                               cpumask_shift_left(cpus, cpus, cpu);
+                               break;
+                       } else {
+                               /* clear bit for unchanged cpu, continue */
+                               cpumask_clear_cpu(cpu, cpus);
+                       }
+               }
+       }
+
+       return ret;
+}
+#endif
+
+int rtas_online_cpus_mask(cpumask_var_t cpus)
+{
+       int ret;
+
+       ret = rtas_cpu_state_change_mask(UP, cpus);
+
+       if (ret) {
+               cpumask_var_t tmp_mask;
+
+               if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+                       return ret;
+
+               /* Use tmp_mask to preserve cpus mask from first failure */
+               cpumask_copy(tmp_mask, cpus);
+               rtas_offline_cpus_mask(tmp_mask);
+               free_cpumask_var(tmp_mask);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(rtas_online_cpus_mask);
+
+int rtas_offline_cpus_mask(cpumask_var_t cpus)
+{
+       return rtas_cpu_state_change_mask(DOWN, cpus);
+}
+EXPORT_SYMBOL(rtas_offline_cpus_mask);
+
 int rtas_ibm_suspend_me(struct rtas_args *args)
 {
        long state;
@@ -815,6 +905,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
        struct rtas_suspend_me_data data;
        DECLARE_COMPLETION_ONSTACK(done);
+       cpumask_var_t offline_mask;
+       int cpuret;
 
        if (!rtas_service_present("ibm,suspend-me"))
                return -ENOSYS;
@@ -838,11 +930,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
                return 0;
        }
 
+       if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+               return -ENOMEM;
+
        atomic_set(&data.working, 0);
        atomic_set(&data.done, 0);
        atomic_set(&data.error, 0);
        data.token = rtas_token("ibm,suspend-me");
        data.complete = &done;
+
+       /* All present CPUs must be online */
+       cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+       cpuret = rtas_online_cpus_mask(offline_mask);
+       if (cpuret) {
+               pr_err("%s: Could not bring present CPUs online.\n", __func__);
+               atomic_set(&data.error, cpuret);
+               goto out;
+       }
+
        stop_topology_update();
 
        /* Call function on all CPUs.  One of us will make the
@@ -858,6 +963,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
 
        start_topology_update();
 
+       /* Take down CPUs not online prior to suspend */
+       cpuret = rtas_offline_cpus_mask(offline_mask);
+       if (cpuret)
+               pr_warn("%s: Could not restore CPUs to offline state.\n",
+                               __func__);
+
+out:
+       free_cpumask_var(offline_mask);
        return atomic_read(&data.error);
 }
 #else /* CONFIG_PPC_PSERIES */
index 47226e04126d10e930a7a4a1bf42aa1c9abedb7a..5f997e79d570f8fb704cf9ec79ef49cb538b3b28 100644 (file)
@@ -16,6 +16,7 @@
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   */
 
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/suspend.h>
 #include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
                               struct device_attribute *attr,
                               const char *buf, size_t count)
 {
+       cpumask_var_t offline_mask;
        int rc;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+               return -ENOMEM;
+
        stream_id = simple_strtoul(buf, NULL, 16);
 
        do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
        } while (rc == -EAGAIN);
 
        if (!rc) {
+               /* All present CPUs must be online */
+               cpumask_andnot(offline_mask, cpu_present_mask,
+                               cpu_online_mask);
+               rc = rtas_online_cpus_mask(offline_mask);
+               if (rc) {
+                       pr_err("%s: Could not bring present CPUs online.\n",
+                                       __func__);
+                       goto out;
+               }
+
                stop_topology_update();
                rc = pm_suspend(PM_SUSPEND_MEM);
                start_topology_update();
+
+               /* Take down CPUs not online prior to suspend */
+               if (!rtas_offline_cpus_mask(offline_mask))
+                       pr_warn("%s: Could not restore CPUs to offline "
+                                       "state.\n", __func__);
        }
 
        stream_id = 0;
 
        if (!rc)
                rc = count;
+out:
+       free_cpumask_var(offline_mask);
        return rc;
 }