2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/acpi.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/interrupt.h>
26 #include <linux/cache.h>
27 #include <linux/profile.h>
28 #include <linux/errno.h>
30 #include <linux/err.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/seq_file.h>
34 #include <linux/irq.h>
35 #include <linux/percpu.h>
36 #include <linux/clockchips.h>
37 #include <linux/completion.h>
39 #include <linux/irq_work.h>
41 #include <asm/alternative.h>
42 #include <asm/atomic.h>
43 #include <asm/cacheflush.h>
45 #include <asm/cputype.h>
46 #include <asm/cpu_ops.h>
47 #include <asm/mmu_context.h>
49 #include <asm/pgtable.h>
50 #include <asm/pgalloc.h>
51 #include <asm/processor.h>
52 #include <asm/smp_plat.h>
53 #include <asm/sections.h>
54 #include <asm/tlbflush.h>
55 #include <asm/ptrace.h>
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/ipi.h>
62 * as from 2.5, kernels no longer have an init_tasks structure
63 * so we need some other way of telling a new secondary core
64 * where to place its SVC stack
66 struct secondary_data secondary_data;
67 /* Number of CPUs which aren't online, but looping in kernel text. */
68 int cpus_stuck_in_kernel;
79 #ifdef CONFIG_ARM64_VHE
81 /* Whether the boot CPU is running in HYP mode or not*/
82 static bool boot_cpu_hyp_mode;
84 static inline void save_boot_cpu_run_el(void)
86 boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
89 static inline bool is_boot_cpu_in_hyp_mode(void)
91 return boot_cpu_hyp_mode;
95 * Verify that a secondary CPU is running the kernel at the same
96 * EL as that of the boot CPU.
98 void verify_cpu_run_el(void)
100 bool in_el2 = is_kernel_in_hyp_mode();
101 bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
103 if (in_el2 ^ boot_cpu_el2) {
104 pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
107 boot_cpu_el2 ? 2 : 1);
113 static inline void save_boot_cpu_run_el(void) {}
116 #ifdef CONFIG_HOTPLUG_CPU
117 static int op_cpu_kill(unsigned int cpu);
119 static inline int op_cpu_kill(unsigned int cpu)
127 * Boot a secondary CPU, and assign it the specified idle task.
128 * This also gives us the initial stack to use for this CPU.
130 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
132 if (cpu_ops[cpu]->cpu_boot)
133 return cpu_ops[cpu]->cpu_boot(cpu);
138 static DECLARE_COMPLETION(cpu_running);
140 int __cpu_up(unsigned int cpu, struct task_struct *idle)
146 * We need to tell the secondary core where to find its stack and the
149 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
150 update_cpu_boot_status(CPU_MMU_OFF);
151 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
154 * Now bring the CPU into our world.
156 ret = boot_secondary(cpu, idle);
159 * CPU was successfully started, wait for it to come online or
162 wait_for_completion_timeout(&cpu_running,
163 msecs_to_jiffies(1000));
165 if (!cpu_online(cpu)) {
166 pr_crit("CPU%u: failed to come online\n", cpu);
170 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
173 secondary_data.stack = NULL;
174 status = READ_ONCE(secondary_data.status);
177 if (status == CPU_MMU_OFF)
178 status = READ_ONCE(__early_cpu_boot_status);
182 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
186 if (!op_cpu_kill(cpu)) {
187 pr_crit("CPU%u: died during early boot\n", cpu);
191 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
192 case CPU_STUCK_IN_KERNEL:
193 pr_crit("CPU%u: is stuck in kernel\n", cpu);
194 cpus_stuck_in_kernel++;
196 case CPU_PANIC_KERNEL:
197 panic("CPU%u detected unsupported configuration\n", cpu);
205 * This is the secondary CPU boot entry. We're using this CPUs
206 * idle thread stack, but a set of temporary page tables.
208 asmlinkage void secondary_start_kernel(void)
210 struct mm_struct *mm = &init_mm;
211 unsigned int cpu = smp_processor_id();
214 * All kernel threads share the same mm context; grab a
215 * reference and switch to it.
217 atomic_inc(&mm->mm_count);
218 current->active_mm = mm;
220 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
223 * TTBR0 is only used for the identity mapping at this stage. Make it
224 * point to zero page to avoid speculatively fetching new entries.
226 cpu_uninstall_idmap();
229 trace_hardirqs_off();
232 * If the system has established the capabilities, make sure
233 * this CPU ticks all of those. If it doesn't, the CPU will
234 * fail to come online.
236 check_local_cpu_capabilities();
238 if (cpu_ops[cpu]->cpu_postboot)
239 cpu_ops[cpu]->cpu_postboot();
242 * Log the CPU info before it is marked online and might get read.
247 * Enable GIC and timers.
249 notify_cpu_starting(cpu);
251 store_cpu_topology(cpu);
254 * OK, now it's safe to let the boot CPU continue. Wait for
255 * the CPU migration code to notice that the CPU is online
256 * before we continue.
258 pr_info("CPU%u: Booted secondary processor [%08x]\n",
259 cpu, read_cpuid_id());
260 update_cpu_boot_status(CPU_BOOT_SUCCESS);
261 set_cpu_online(cpu, true);
262 complete(&cpu_running);
265 local_async_enable();
268 * OK, it's off to the idle thread for us
270 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
273 #ifdef CONFIG_HOTPLUG_CPU
274 static int op_cpu_disable(unsigned int cpu)
277 * If we don't have a cpu_die method, abort before we reach the point
278 * of no return. CPU0 may not have an cpu_ops, so test for it.
280 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
284 * We may need to abort a hot unplug for some other mechanism-specific
287 if (cpu_ops[cpu]->cpu_disable)
288 return cpu_ops[cpu]->cpu_disable(cpu);
294 * __cpu_disable runs on the processor to be shutdown.
296 int __cpu_disable(void)
298 unsigned int cpu = smp_processor_id();
301 ret = op_cpu_disable(cpu);
306 * Take this CPU offline. Once we clear this, we can't return,
307 * and we must not schedule until we're ready to give up the cpu.
309 set_cpu_online(cpu, false);
312 * OK - migrate IRQs away from this CPU
314 irq_migrate_all_off_this_cpu();
319 static int op_cpu_kill(unsigned int cpu)
322 * If we have no means of synchronising with the dying CPU, then assume
323 * that it is really dead. We can only wait for an arbitrary length of
324 * time and hope that it's dead, so let's skip the wait and just hope.
326 if (!cpu_ops[cpu]->cpu_kill)
329 return cpu_ops[cpu]->cpu_kill(cpu);
333 * called on the thread which is asking for a CPU to be shutdown -
334 * waits until shutdown has completed, or it is timed out.
336 void __cpu_die(unsigned int cpu)
340 if (!cpu_wait_death(cpu, 5)) {
341 pr_crit("CPU%u: cpu didn't die\n", cpu);
344 pr_notice("CPU%u: shutdown\n", cpu);
347 * Now that the dying CPU is beyond the point of no return w.r.t.
348 * in-kernel synchronisation, try to get the firwmare to help us to
349 * verify that it has really left the kernel before we consider
350 * clobbering anything it might still be using.
352 err = op_cpu_kill(cpu);
354 pr_warn("CPU%d may not have shut down cleanly: %d\n",
359 * Called from the idle thread for the CPU which has been shutdown.
361 * Note that we disable IRQs here, but do not re-enable them
362 * before returning to the caller. This is also the behaviour
363 * of the other hotplug-cpu capable cores, so presumably coming
364 * out of idle fixes this.
368 unsigned int cpu = smp_processor_id();
374 /* Tell __cpu_die() that this CPU is now safe to dispose of */
375 (void)cpu_report_death();
378 * Actually shutdown the CPU. This must never fail. The specific hotplug
379 * mechanism must perform all required cache maintenance to ensure that
380 * no dirty lines are lost in the process of shutting down the CPU.
382 cpu_ops[cpu]->cpu_die(cpu);
389 * Kill the calling secondary CPU, early in bringup before it is turned
392 void cpu_die_early(void)
394 int cpu = smp_processor_id();
396 pr_crit("CPU%d: will not boot\n", cpu);
398 /* Mark this CPU absent */
399 set_cpu_present(cpu, 0);
401 #ifdef CONFIG_HOTPLUG_CPU
402 update_cpu_boot_status(CPU_KILL_ME);
403 /* Check if we can park ourselves */
404 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
405 cpu_ops[cpu]->cpu_die(cpu);
407 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
412 static void __init hyp_mode_check(void)
414 if (is_hyp_mode_available())
415 pr_info("CPU: All CPU(s) started at EL2\n");
416 else if (is_hyp_mode_mismatched())
417 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
418 "CPU: CPUs started in inconsistent modes");
420 pr_info("CPU: All CPU(s) started at EL1\n");
423 void __init smp_cpus_done(unsigned int max_cpus)
425 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
426 setup_cpu_features();
428 apply_alternatives_all();
431 void __init smp_prepare_boot_cpu(void)
433 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
435 * Initialise the static keys early as they may be enabled by the
439 cpuinfo_store_boot_cpu();
440 save_boot_cpu_run_el();
442 * Run the errata work around checks on the boot CPU, once we have
443 * initialised the cpu feature infrastructure from
444 * cpuinfo_store_boot_cpu() above.
446 update_cpu_errata_workarounds();
449 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
455 * A cpu node with missing "reg" property is
456 * considered invalid to build a cpu_logical_map
459 cell = of_get_property(dn, "reg", NULL);
461 pr_err("%s: missing reg property\n", dn->full_name);
465 hwid = of_read_number(cell, of_n_addr_cells(dn));
467 * Non affinity bits must be set to 0 in the DT
469 if (hwid & ~MPIDR_HWID_BITMASK) {
470 pr_err("%s: invalid reg property\n", dn->full_name);
477 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
478 * entries and check for duplicates. If any is found just ignore the
479 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
480 * matching valid MPIDR values.
482 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
486 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
487 if (cpu_logical_map(i) == hwid)
493 * Initialize cpu operations for a logical cpu and
494 * set it in the possible mask on success
496 static int __init smp_cpu_setup(int cpu)
498 if (cpu_read_ops(cpu))
501 if (cpu_ops[cpu]->cpu_init(cpu))
504 set_cpu_possible(cpu, true);
509 static bool bootcpu_valid __initdata;
510 static unsigned int cpu_count = 1;
514 * acpi_map_gic_cpu_interface - parse processor MADT entry
516 * Carry out sanity checks on MADT processor entry and initialize
517 * cpu_logical_map on success
520 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
522 u64 hwid = processor->arm_mpidr;
524 if (!(processor->flags & ACPI_MADT_ENABLED)) {
525 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
529 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
530 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
534 if (is_mpidr_duplicate(cpu_count, hwid)) {
535 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
539 /* Check if GICC structure of boot CPU is available in the MADT */
540 if (cpu_logical_map(0) == hwid) {
542 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
546 bootcpu_valid = true;
550 if (cpu_count >= NR_CPUS)
553 /* map the logical cpu id to cpu MPIDR */
554 cpu_logical_map(cpu_count) = hwid;
557 * Set-up the ACPI parking protocol cpu entries
558 * while initializing the cpu_logical_map to
559 * avoid parsing MADT entries multiple times for
560 * nothing (ie a valid cpu_logical_map entry should
561 * contain a valid parking protocol data set to
562 * initialize the cpu if the parking protocol is
563 * the only available enable method).
565 acpi_set_mailbox_entry(cpu_count, processor);
567 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid));
573 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
574 const unsigned long end)
576 struct acpi_madt_generic_interrupt *processor;
578 processor = (struct acpi_madt_generic_interrupt *)header;
579 if (BAD_MADT_GICC_ENTRY(processor, end))
582 acpi_table_print_madt_entry(header);
584 acpi_map_gic_cpu_interface(processor);
589 #define acpi_table_parse_madt(...) do { } while (0)
593 * Enumerate the possible CPU set from the device tree and build the
594 * cpu logical map array containing MPIDR values related to logical
595 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
597 static void __init of_parse_and_init_cpus(void)
599 struct device_node *dn = NULL;
601 while ((dn = of_find_node_by_type(dn, "cpu"))) {
602 u64 hwid = of_get_cpu_mpidr(dn);
604 if (hwid == INVALID_HWID)
607 if (is_mpidr_duplicate(cpu_count, hwid)) {
608 pr_err("%s: duplicate cpu reg properties in the DT\n",
614 * The numbering scheme requires that the boot CPU
615 * must be assigned logical id 0. Record it so that
616 * the logical map built from DT is validated and can
619 if (hwid == cpu_logical_map(0)) {
621 pr_err("%s: duplicate boot cpu reg property in DT\n",
626 bootcpu_valid = true;
627 early_map_cpu_to_node(0, of_node_to_nid(dn));
630 * cpu_logical_map has already been
631 * initialized and the boot cpu doesn't need
632 * the enable-method so continue without
638 if (cpu_count >= NR_CPUS)
641 pr_debug("cpu logical map 0x%llx\n", hwid);
642 cpu_logical_map(cpu_count) = hwid;
644 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
651 * Enumerate the possible CPU set from the device tree or ACPI and build the
652 * cpu logical map array containing MPIDR values related to logical
653 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
655 void __init smp_init_cpus(void)
660 of_parse_and_init_cpus();
663 * do a walk of MADT to determine how many CPUs
664 * we have including disabled CPUs, and get information
665 * we need for SMP init
667 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
668 acpi_parse_gic_cpu_interface, 0);
670 if (cpu_count > nr_cpu_ids)
671 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
672 cpu_count, nr_cpu_ids);
674 if (!bootcpu_valid) {
675 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
680 * We need to set the cpu_logical_map entries before enabling
681 * the cpus so that cpu processor description entries (DT cpu nodes
682 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
683 * with entries in cpu_logical_map while initializing the cpus.
684 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
686 for (i = 1; i < nr_cpu_ids; i++) {
687 if (cpu_logical_map(i) != INVALID_HWID) {
688 if (smp_cpu_setup(i))
689 cpu_logical_map(i) = INVALID_HWID;
694 void __init smp_prepare_cpus(unsigned int max_cpus)
698 unsigned int this_cpu;
702 this_cpu = smp_processor_id();
703 store_cpu_topology(this_cpu);
704 numa_store_cpu_info(this_cpu);
707 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
708 * secondary CPUs present.
714 * Initialise the present map (which describes the set of CPUs
715 * actually populated at the present time) and release the
716 * secondaries from the bootloader.
718 for_each_possible_cpu(cpu) {
720 if (cpu == smp_processor_id())
726 err = cpu_ops[cpu]->cpu_prepare(cpu);
730 set_cpu_present(cpu, true);
731 numa_store_cpu_info(cpu);
735 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
737 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
739 __smp_cross_call = fn;
742 static const char *ipi_types[NR_IPI] __tracepoint_string = {
743 #define S(x,s) [x] = s
744 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
745 S(IPI_CALL_FUNC, "Function call interrupts"),
746 S(IPI_CPU_STOP, "CPU stop interrupts"),
747 S(IPI_TIMER, "Timer broadcast interrupts"),
748 S(IPI_IRQ_WORK, "IRQ work interrupts"),
749 S(IPI_WAKEUP, "CPU wake-up interrupts"),
752 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
754 trace_ipi_raise(target, ipi_types[ipinr]);
755 __smp_cross_call(target, ipinr);
758 void show_ipi_list(struct seq_file *p, int prec)
762 for (i = 0; i < NR_IPI; i++) {
763 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
764 prec >= 4 ? " " : "");
765 for_each_online_cpu(cpu)
766 seq_printf(p, "%10u ",
767 __get_irq_stat(cpu, ipi_irqs[i]));
768 seq_printf(p, " %s\n", ipi_types[i]);
772 u64 smp_irq_stat_cpu(unsigned int cpu)
777 for (i = 0; i < NR_IPI; i++)
778 sum += __get_irq_stat(cpu, ipi_irqs[i]);
783 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
785 smp_cross_call(mask, IPI_CALL_FUNC);
788 void arch_send_call_function_single_ipi(int cpu)
790 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
793 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
794 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
796 smp_cross_call(mask, IPI_WAKEUP);
800 #ifdef CONFIG_IRQ_WORK
801 void arch_irq_work_raise(void)
803 if (__smp_cross_call)
804 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
809 * ipi_cpu_stop - handle IPI from smp_send_stop()
811 static void ipi_cpu_stop(unsigned int cpu)
813 set_cpu_online(cpu, false);
822 * Main handler for inter-processor interrupts
824 void handle_IPI(int ipinr, struct pt_regs *regs)
826 unsigned int cpu = smp_processor_id();
827 struct pt_regs *old_regs = set_irq_regs(regs);
829 if ((unsigned)ipinr < NR_IPI) {
830 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
831 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
841 generic_smp_call_function_interrupt();
851 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
854 tick_receive_broadcast();
859 #ifdef CONFIG_IRQ_WORK
867 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
869 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
870 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
876 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
880 if ((unsigned)ipinr < NR_IPI)
881 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
882 set_irq_regs(old_regs);
885 void smp_send_reschedule(int cpu)
887 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
890 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
891 void tick_broadcast(const struct cpumask *mask)
893 smp_cross_call(mask, IPI_TIMER);
897 void smp_send_stop(void)
899 unsigned long timeout;
901 if (num_online_cpus() > 1) {
904 cpumask_copy(&mask, cpu_online_mask);
905 cpumask_clear_cpu(smp_processor_id(), &mask);
907 if (system_state == SYSTEM_BOOTING ||
908 system_state == SYSTEM_RUNNING)
909 pr_crit("SMP: stopping secondary CPUs\n");
910 smp_cross_call(&mask, IPI_CPU_STOP);
913 /* Wait up to one second for other CPUs to stop */
914 timeout = USEC_PER_SEC;
915 while (num_online_cpus() > 1 && timeout--)
918 if (num_online_cpus() > 1)
919 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
920 cpumask_pr_args(cpu_online_mask));
926 int setup_profiling_timer(unsigned int multiplier)
931 static bool have_cpu_die(void)
933 #ifdef CONFIG_HOTPLUG_CPU
934 int any_cpu = raw_smp_processor_id();
936 if (cpu_ops[any_cpu]->cpu_die)
942 bool cpus_are_stuck_in_kernel(void)
944 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
946 return !!cpus_stuck_in_kernel || smp_spin_tables;