2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/kthread.h>
21 #include <linux/wait.h>
22 #include <linux/clockchips.h>
23 #include <linux/hrtimer.h>
24 #include <linux/tick.h>
25 #include <linux/notifier.h>
27 #include <linux/mutex.h>
28 #include <linux/string.h>
29 #include <linux/sysfs.h>
30 #include <linux/irqchip/arm-gic.h>
31 #include <linux/moduleparam.h>
33 #include <asm/smp_plat.h>
34 #include <asm/suspend.h>
36 #include <asm/bL_switcher.h>
40 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
41 * __attribute_const__ and we don't want the compiler to assume any
42 * constness here as the value _does_ change along some code paths.
45 static int read_mpidr(void)
48 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
49 return id & MPIDR_HWID_BITMASK;
53 * bL switcher core code.
56 static void bL_do_switch(void *_unused)
58 unsigned ib_mpidr, ib_cpu, ib_cluster;
60 pr_debug("%s\n", __func__);
62 ib_mpidr = cpu_logical_map(smp_processor_id());
63 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
64 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
67 * Our state has been saved at this point. Let's release our
70 mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
74 * From this point, we must assume that our counterpart CPU might
75 * have taken over in its parallel world already, as if execution
76 * just returned from cpu_suspend(). It is therefore important to
77 * be very careful not to make any change the other guy is not
78 * expecting. This is why we need stack isolation.
80 * Fancy under cover tasks could be performed here. For now
84 /* Let's put ourself down. */
85 mcpm_cpu_power_down();
87 /* should never get here */
92 * Stack isolation. To ensure 'current' remains valid, we just use another
93 * piece of our thread's stack space which should be fairly lightly used.
94 * The selected area starts just above the thread_info structure located
95 * at the very bottom of the stack, aligned to a cache line, and indexed
96 * with the cluster number.
98 #define STACK_SIZE 512
99 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
100 static int bL_switchpoint(unsigned long _arg)
102 unsigned int mpidr = read_mpidr();
103 unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
104 void *stack = current_thread_info() + 1;
105 stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
106 stack += clusterid * STACK_SIZE + STACK_SIZE;
107 call_with_stack(bL_do_switch, (void *)_arg, stack);
112 * Generic switcher interface
115 static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
116 static int bL_switcher_cpu_pairing[NR_CPUS];
119 * bL_switch_to - Switch to a specific cluster for the current CPU
120 * @new_cluster_id: the ID of the cluster to switch to.
122 * This function must be called on the CPU to be switched.
123 * Returns 0 on success, else a negative status code.
125 static int bL_switch_to(unsigned int new_cluster_id)
127 unsigned int mpidr, this_cpu, that_cpu;
128 unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
129 struct tick_device *tdev;
130 enum clock_event_mode tdev_mode;
133 this_cpu = smp_processor_id();
134 ob_mpidr = read_mpidr();
135 ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
136 ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
137 BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
139 if (new_cluster_id == ob_cluster)
142 that_cpu = bL_switcher_cpu_pairing[this_cpu];
143 ib_mpidr = cpu_logical_map(that_cpu);
144 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
145 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
147 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
148 this_cpu, ob_mpidr, ib_mpidr);
150 /* Close the gate for our entry vectors */
151 mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
152 mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
155 * Let's wake up the inbound CPU now in case it requires some delay
156 * to come online, but leave it gated in our entry vector code.
158 ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
160 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
165 * From this point we are entering the switch critical zone
166 * and can't take any interrupts anymore.
171 /* redirect GIC's SGIs to our counterpart */
172 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
175 * Raise a SGI on the inbound CPU to make sure it doesn't stall
176 * in a possible WFI, such as in mcpm_power_down().
178 arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
180 tdev = tick_get_device(this_cpu);
181 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
184 tdev_mode = tdev->evtdev->mode;
185 clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
188 ret = cpu_pm_enter();
190 /* we can not tolerate errors at this point */
192 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
194 /* Swap the physical CPUs in the logical map for this logical CPU. */
195 cpu_logical_map(this_cpu) = ib_mpidr;
196 cpu_logical_map(that_cpu) = ob_mpidr;
198 /* Let's do the actual CPU switch. */
199 ret = cpu_suspend(0, bL_switchpoint);
201 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
203 /* We are executing on the inbound CPU at this point */
204 mpidr = read_mpidr();
205 pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
206 BUG_ON(mpidr != ib_mpidr);
208 mcpm_cpu_powered_up();
213 clockevents_set_mode(tdev->evtdev, tdev_mode);
214 clockevents_program_event(tdev->evtdev,
215 tdev->evtdev->next_event, 1);
222 pr_err("%s exiting with error %d\n", __func__, ret);
227 struct task_struct *task;
228 wait_queue_head_t wq;
230 struct completion started;
233 static struct bL_thread bL_threads[NR_CPUS];
235 static int bL_switcher_thread(void *arg)
237 struct bL_thread *t = arg;
238 struct sched_param param = { .sched_priority = 1 };
241 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
242 complete(&t->started);
245 if (signal_pending(current))
246 flush_signals(current);
247 wait_event_interruptible(t->wq,
248 t->wanted_cluster != -1 ||
249 kthread_should_stop());
250 cluster = xchg(&t->wanted_cluster, -1);
252 bL_switch_to(cluster);
253 } while (!kthread_should_stop());
258 static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
260 struct task_struct *task;
262 task = kthread_create_on_node(bL_switcher_thread, arg,
263 cpu_to_node(cpu), "kswitcher_%d", cpu);
265 kthread_bind(task, cpu);
266 wake_up_process(task);
268 pr_err("%s failed for CPU %d\n", __func__, cpu);
273 * bL_switch_request - Switch to a specific cluster for the given CPU
275 * @cpu: the CPU to switch
276 * @new_cluster_id: the ID of the cluster to switch to.
278 * This function causes a cluster switch on the given CPU by waking up
279 * the appropriate switcher thread. This function may or may not return
280 * before the switch has occurred.
282 int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
286 if (cpu >= ARRAY_SIZE(bL_threads)) {
287 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
291 t = &bL_threads[cpu];
293 return PTR_ERR(t->task);
297 t->wanted_cluster = new_cluster_id;
301 EXPORT_SYMBOL_GPL(bL_switch_request);
304 * Activation and configuration code.
307 static DEFINE_MUTEX(bL_switcher_activation_lock);
308 static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
309 static unsigned int bL_switcher_active;
310 static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
311 static cpumask_t bL_switcher_removed_logical_cpus;
313 int bL_switcher_register_notifier(struct notifier_block *nb)
315 return blocking_notifier_chain_register(&bL_activation_notifier, nb);
317 EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
319 int bL_switcher_unregister_notifier(struct notifier_block *nb)
321 return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
323 EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
325 static int bL_activation_notify(unsigned long val)
329 ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
330 if (ret & NOTIFY_STOP_MASK)
331 pr_err("%s: notifier chain failed with status 0x%x\n",
333 return notifier_to_errno(ret);
336 static void bL_switcher_restore_cpus(void)
340 for_each_cpu(i, &bL_switcher_removed_logical_cpus)
344 static int bL_switcher_halve_cpus(void)
346 int i, j, cluster_0, gic_id, ret;
347 unsigned int cpu, cluster, mask;
348 cpumask_t available_cpus;
350 /* First pass to validate what we have */
352 for_each_online_cpu(i) {
353 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
354 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
356 pr_err("%s: only dual cluster systems are supported\n", __func__);
359 if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
361 mask |= (1 << cluster);
364 pr_err("%s: no CPU pairing possible\n", __func__);
369 * Now let's do the pairing. We match each CPU with another CPU
370 * from a different cluster. To get a uniform scheduling behavior
371 * without fiddling with CPU topology and compute capacity data,
372 * we'll use logical CPUs initially belonging to the same cluster.
374 memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
375 cpumask_copy(&available_cpus, cpu_online_mask);
377 for_each_cpu(i, &available_cpus) {
379 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
382 if (cluster != cluster_0)
384 cpumask_clear_cpu(i, &available_cpus);
385 for_each_cpu(j, &available_cpus) {
386 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
388 * Let's remember the last match to create "odd"
389 * pairings on purpose in order for other code not
390 * to assume any relation between physical and
391 * logical CPU numbers.
393 if (cluster != cluster_0)
397 bL_switcher_cpu_pairing[i] = match;
398 cpumask_clear_cpu(match, &available_cpus);
399 pr_info("CPU%d paired with CPU%d\n", i, match);
404 * Now we disable the unwanted CPUs i.e. everything that has no
405 * pairing information (that includes the pairing counterparts).
407 cpumask_clear(&bL_switcher_removed_logical_cpus);
408 for_each_online_cpu(i) {
409 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
410 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
412 /* Let's take note of the GIC ID for this CPU */
413 gic_id = gic_get_cpu_id(i);
415 pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
416 bL_switcher_restore_cpus();
419 bL_gic_id[cpu][cluster] = gic_id;
420 pr_info("GIC ID for CPU %u cluster %u is %u\n",
421 cpu, cluster, gic_id);
423 if (bL_switcher_cpu_pairing[i] != -1) {
424 bL_switcher_cpu_original_cluster[i] = cluster;
430 bL_switcher_restore_cpus();
433 cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
439 static int bL_switcher_enable(void)
443 mutex_lock(&bL_switcher_activation_lock);
444 cpu_hotplug_driver_lock();
445 if (bL_switcher_active) {
446 cpu_hotplug_driver_unlock();
447 mutex_unlock(&bL_switcher_activation_lock);
451 pr_info("big.LITTLE switcher initializing\n");
453 ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
457 ret = bL_switcher_halve_cpus();
461 for_each_online_cpu(cpu) {
462 struct bL_thread *t = &bL_threads[cpu];
463 init_waitqueue_head(&t->wq);
464 init_completion(&t->started);
465 t->wanted_cluster = -1;
466 t->task = bL_switcher_thread_create(cpu, t);
469 bL_switcher_active = 1;
470 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
471 pr_info("big.LITTLE switcher initialized\n");
475 pr_warn("big.LITTLE switcher initialization failed\n");
476 bL_activation_notify(BL_NOTIFY_POST_DISABLE);
479 cpu_hotplug_driver_unlock();
480 mutex_unlock(&bL_switcher_activation_lock);
486 static void bL_switcher_disable(void)
488 unsigned int cpu, cluster;
490 struct task_struct *task;
492 mutex_lock(&bL_switcher_activation_lock);
493 cpu_hotplug_driver_lock();
495 if (!bL_switcher_active)
498 if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
499 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
503 bL_switcher_active = 0;
506 * To deactivate the switcher, we must shut down the switcher
507 * threads to prevent any other requests from being accepted.
508 * Then, if the final cluster for given logical CPU is not the
509 * same as the original one, we'll recreate a switcher thread
510 * just for the purpose of switching the CPU back without any
511 * possibility for interference from external requests.
513 for_each_online_cpu(cpu) {
514 t = &bL_threads[cpu];
517 if (!task || IS_ERR(task))
520 /* no more switch may happen on this CPU at this point */
521 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
522 if (cluster == bL_switcher_cpu_original_cluster[cpu])
524 init_completion(&t->started);
525 t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
526 task = bL_switcher_thread_create(cpu, t);
528 wait_for_completion(&t->started);
530 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
531 if (cluster == bL_switcher_cpu_original_cluster[cpu])
534 /* If execution gets here, we're in trouble. */
535 pr_crit("%s: unable to restore original cluster for CPU %d\n",
537 pr_crit("%s: CPU %d can't be restored\n",
538 __func__, bL_switcher_cpu_pairing[cpu]);
539 cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
540 &bL_switcher_removed_logical_cpus);
543 bL_switcher_restore_cpus();
544 bL_activation_notify(BL_NOTIFY_POST_DISABLE);
547 cpu_hotplug_driver_unlock();
548 mutex_unlock(&bL_switcher_activation_lock);
551 static ssize_t bL_switcher_active_show(struct kobject *kobj,
552 struct kobj_attribute *attr, char *buf)
554 return sprintf(buf, "%u\n", bL_switcher_active);
557 static ssize_t bL_switcher_active_store(struct kobject *kobj,
558 struct kobj_attribute *attr, const char *buf, size_t count)
564 bL_switcher_disable();
568 ret = bL_switcher_enable();
574 return (ret >= 0) ? count : ret;
577 static struct kobj_attribute bL_switcher_active_attr =
578 __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
580 static struct attribute *bL_switcher_attrs[] = {
581 &bL_switcher_active_attr.attr,
585 static struct attribute_group bL_switcher_attr_group = {
586 .attrs = bL_switcher_attrs,
589 static struct kobject *bL_switcher_kobj;
591 static int __init bL_switcher_sysfs_init(void)
595 bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
596 if (!bL_switcher_kobj)
598 ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
600 kobject_put(bL_switcher_kobj);
604 #endif /* CONFIG_SYSFS */
606 bool bL_switcher_get_enabled(void)
608 mutex_lock(&bL_switcher_activation_lock);
610 return bL_switcher_active;
612 EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
614 void bL_switcher_put_enabled(void)
616 mutex_unlock(&bL_switcher_activation_lock);
618 EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
621 * Veto any CPU hotplug operation on those CPUs we've removed
622 * while the switcher is active.
623 * We're just not ready to deal with that given the trickery involved.
625 static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
626 unsigned long action, void *hcpu)
628 if (bL_switcher_active) {
629 int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
630 switch (action & 0xf) {
632 case CPU_DOWN_PREPARE:
640 static bool no_bL_switcher;
641 core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
643 static int __init bL_switcher_init(void)
647 if (MAX_NR_CLUSTERS != 2) {
648 pr_err("%s: only dual cluster systems are supported\n", __func__);
652 cpu_notifier(bL_switcher_hotplug_callback, 0);
654 if (!no_bL_switcher) {
655 ret = bL_switcher_enable();
661 ret = bL_switcher_sysfs_init();
663 pr_err("%s: unable to create sysfs entry\n", __func__);
669 late_initcall(bL_switcher_init);