4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
21 #include <asm/paravirt.h>
23 #include <asm/pgtable.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
29 #include <asm/xen/interface.h>
30 #include <asm/xen/hypercall.h>
34 #include <xen/events.h>
36 #include <xen/hvc-console.h>
40 cpumask_var_t xen_cpu_initialized_map;
42 static DEFINE_PER_CPU(int, xen_resched_irq);
43 static DEFINE_PER_CPU(int, xen_callfunc_irq);
44 static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45 static DEFINE_PER_CPU(int, xen_irq_work);
46 static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
48 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
49 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
53 * Reschedule call back.
55 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
57 inc_irq_stat(irq_resched_count);
63 static void __cpuinit cpu_bringup(void)
68 touch_softlockup_watchdog();
71 xen_enable_sysenter();
74 cpu = smp_processor_id();
75 smp_store_cpu_info(cpu);
76 cpu_data(cpu).x86_max_cores = 1;
77 set_cpu_sibling_map(cpu);
79 xen_setup_cpu_clockevents();
81 notify_cpu_starting(cpu);
84 set_cpu_online(cpu, true);
87 this_cpu_write(cpu_state, CPU_ONLINE);
91 /* We can take interrupts now: we're officially "up". */
94 wmb(); /* make sure everything is out */
97 static void __cpuinit cpu_bringup_and_idle(void)
103 static int xen_smp_intr_init(unsigned int cpu)
106 const char *resched_name, *callfunc_name, *debug_name;
108 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
109 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
111 xen_reschedule_interrupt,
112 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
117 per_cpu(xen_resched_irq, cpu) = rc;
119 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
120 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
122 xen_call_function_interrupt,
123 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
128 per_cpu(xen_callfunc_irq, cpu) = rc;
130 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
131 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
132 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
136 per_cpu(xen_debug_irq, cpu) = rc;
138 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
139 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
141 xen_call_function_single_interrupt,
142 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
147 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
149 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
150 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
152 xen_irq_work_interrupt,
153 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
158 per_cpu(xen_irq_work, cpu) = rc;
163 if (per_cpu(xen_resched_irq, cpu) >= 0)
164 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
165 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
166 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
167 if (per_cpu(xen_debug_irq, cpu) >= 0)
168 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
169 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
170 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
172 if (per_cpu(xen_irq_work, cpu) >= 0)
173 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
178 static void __init xen_fill_possible_map(void)
182 if (xen_initial_domain())
185 for (i = 0; i < nr_cpu_ids; i++) {
186 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
189 set_cpu_possible(i, true);
194 static void __init xen_filter_cpu_maps(void)
198 if (!xen_initial_domain())
203 for (i = 0; i < nr_cpu_ids; i++) {
204 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
207 set_cpu_possible(i, true);
209 set_cpu_possible(i, false);
210 set_cpu_present(i, false);
215 static void __init xen_smp_prepare_boot_cpu(void)
217 BUG_ON(smp_processor_id() != 0);
218 native_smp_prepare_boot_cpu();
220 /* We've switched to the "real" per-cpu gdt, so make sure the
221 old memory can be recycled */
222 make_lowmem_page_readwrite(xen_initial_gdt);
224 xen_filter_cpu_maps();
225 xen_setup_vcpu_info_placement();
228 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
233 if (skip_ioapic_setup) {
234 char *m = (max_cpus == 0) ?
235 "The nosmp parameter is incompatible with Xen; " \
236 "use Xen dom0_max_vcpus=1 parameter" :
237 "The noapic parameter is incompatible with Xen";
242 xen_init_lock_cpu(0);
244 smp_store_cpu_info(0);
245 cpu_data(0).x86_max_cores = 1;
247 for_each_possible_cpu(i) {
248 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
249 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
250 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
252 set_cpu_sibling_map(0);
254 if (xen_smp_intr_init(0))
257 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
258 panic("could not allocate xen_cpu_initialized_map\n");
260 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
262 /* Restrict the possible_map according to max_cpus. */
263 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
264 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
266 set_cpu_possible(cpu, false);
269 for_each_possible_cpu (cpu) {
270 struct task_struct *idle;
275 idle = fork_idle(cpu);
277 panic("failed fork for CPU %d", cpu);
279 set_cpu_present(cpu, true);
284 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
286 struct vcpu_guest_context *ctxt;
287 struct desc_struct *gdt;
288 unsigned long gdt_mfn;
290 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
293 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
297 gdt = get_cpu_gdt_table(cpu);
299 ctxt->flags = VGCF_IN_KERNEL;
300 ctxt->user_regs.ds = __USER_DS;
301 ctxt->user_regs.es = __USER_DS;
302 ctxt->user_regs.ss = __KERNEL_DS;
304 ctxt->user_regs.fs = __KERNEL_PERCPU;
305 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
307 ctxt->gs_base_kernel = per_cpu_offset(cpu);
309 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
310 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
312 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
314 xen_copy_trap_info(ctxt->trap_ctxt);
318 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
320 gdt_mfn = arbitrary_virt_to_mfn(gdt);
321 make_lowmem_page_readonly(gdt);
322 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
324 ctxt->gdt_frames[0] = gdt_mfn;
325 ctxt->gdt_ents = GDT_ENTRIES;
327 ctxt->user_regs.cs = __KERNEL_CS;
328 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
330 ctxt->kernel_ss = __KERNEL_DS;
331 ctxt->kernel_sp = idle->thread.sp0;
334 ctxt->event_callback_cs = __KERNEL_CS;
335 ctxt->failsafe_callback_cs = __KERNEL_CS;
337 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
338 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
340 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
341 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
343 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
350 static int __cpuinit xen_cpu_up(unsigned int cpu)
352 struct task_struct *idle = idle_task(cpu);
355 per_cpu(current_task, cpu) = idle;
359 clear_tsk_thread_flag(idle, TIF_FORK);
360 per_cpu(kernel_stack, cpu) =
361 (unsigned long)task_stack_page(idle) -
362 KERNEL_STACK_OFFSET + THREAD_SIZE;
364 xen_setup_runstate_info(cpu);
365 xen_setup_timer(cpu);
366 xen_init_lock_cpu(cpu);
368 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
370 /* make sure interrupts start blocked */
371 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
373 rc = cpu_initialize_context(cpu, idle);
377 if (num_online_cpus() == 1)
378 alternatives_smp_switch(1);
380 rc = xen_smp_intr_init(cpu);
384 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
387 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
388 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
395 static void xen_smp_cpus_done(unsigned int max_cpus)
399 #ifdef CONFIG_HOTPLUG_CPU
400 static int xen_cpu_disable(void)
402 unsigned int cpu = smp_processor_id();
406 cpu_disable_common();
408 load_cr3(swapper_pg_dir);
412 static void xen_cpu_die(unsigned int cpu)
414 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
415 current->state = TASK_UNINTERRUPTIBLE;
416 schedule_timeout(HZ/10);
418 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
419 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
420 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
421 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
422 xen_uninit_lock_cpu(cpu);
423 xen_teardown_timer(cpu);
425 if (num_online_cpus() == 1)
426 alternatives_smp_switch(0);
429 static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
432 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
435 * Balance out the preempt calls - as we are running in cpu_idle
436 * loop which has been called at bootup from cpu_bringup_and_idle.
437 * The cpucpu_bringup_and_idle called cpu_bringup which made a
438 * preempt_disable() So this preempt_enable will balance it out.
443 #else /* !CONFIG_HOTPLUG_CPU */
444 static int xen_cpu_disable(void)
449 static void xen_cpu_die(unsigned int cpu)
454 static void xen_play_dead(void)
460 static void stop_self(void *v)
462 int cpu = smp_processor_id();
464 /* make sure we're not pinning something down */
465 load_cr3(swapper_pg_dir);
466 /* should set up a minimal gdt */
468 set_cpu_online(cpu, false);
470 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
474 static void xen_stop_other_cpus(int wait)
476 smp_call_function(stop_self, NULL, wait);
479 static void xen_smp_send_reschedule(int cpu)
481 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
484 static void __xen_send_IPI_mask(const struct cpumask *mask,
489 for_each_cpu_and(cpu, mask, cpu_online_mask)
490 xen_send_IPI_one(cpu, vector);
493 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
497 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
499 /* Make sure other vcpus get a chance to run if they need to. */
500 for_each_cpu(cpu, mask) {
501 if (xen_vcpu_stolen(cpu)) {
502 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
508 static void xen_smp_send_call_function_single_ipi(int cpu)
510 __xen_send_IPI_mask(cpumask_of(cpu),
511 XEN_CALL_FUNCTION_SINGLE_VECTOR);
514 static inline int xen_map_vector(int vector)
519 case RESCHEDULE_VECTOR:
520 xen_vector = XEN_RESCHEDULE_VECTOR;
522 case CALL_FUNCTION_VECTOR:
523 xen_vector = XEN_CALL_FUNCTION_VECTOR;
525 case CALL_FUNCTION_SINGLE_VECTOR:
526 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
528 case IRQ_WORK_VECTOR:
529 xen_vector = XEN_IRQ_WORK_VECTOR;
533 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
540 void xen_send_IPI_mask(const struct cpumask *mask,
543 int xen_vector = xen_map_vector(vector);
546 __xen_send_IPI_mask(mask, xen_vector);
549 void xen_send_IPI_all(int vector)
551 int xen_vector = xen_map_vector(vector);
554 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
557 void xen_send_IPI_self(int vector)
559 int xen_vector = xen_map_vector(vector);
562 xen_send_IPI_one(smp_processor_id(), xen_vector);
565 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
569 unsigned int this_cpu = smp_processor_id();
571 if (!(num_online_cpus() > 1))
574 for_each_cpu_and(cpu, mask, cpu_online_mask) {
578 xen_smp_send_call_function_single_ipi(cpu);
582 void xen_send_IPI_allbutself(int vector)
584 int xen_vector = xen_map_vector(vector);
587 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
590 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
593 generic_smp_call_function_interrupt();
594 inc_irq_stat(irq_call_count);
600 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
603 generic_smp_call_function_single_interrupt();
604 inc_irq_stat(irq_call_count);
610 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
614 inc_irq_stat(apic_irq_work_irqs);
620 static const struct smp_ops xen_smp_ops __initconst = {
621 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
622 .smp_prepare_cpus = xen_smp_prepare_cpus,
623 .smp_cpus_done = xen_smp_cpus_done,
625 .cpu_up = xen_cpu_up,
626 .cpu_die = xen_cpu_die,
627 .cpu_disable = xen_cpu_disable,
628 .play_dead = xen_play_dead,
630 .stop_other_cpus = xen_stop_other_cpus,
631 .smp_send_reschedule = xen_smp_send_reschedule,
633 .send_call_func_ipi = xen_smp_send_call_function_ipi,
634 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
637 void __init xen_smp_init(void)
639 smp_ops = xen_smp_ops;
640 xen_fill_possible_map();
641 xen_init_spinlocks();
644 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
646 native_smp_prepare_cpus(max_cpus);
647 WARN_ON(xen_smp_intr_init(0));
649 xen_init_lock_cpu(0);
652 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
655 rc = native_cpu_up(cpu);
656 WARN_ON (xen_smp_intr_init(cpu));
660 static void xen_hvm_cpu_die(unsigned int cpu)
662 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
663 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
664 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
665 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
666 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
670 void __init xen_hvm_smp_init(void)
672 if (!xen_have_vector_callback)
674 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
675 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
676 smp_ops.cpu_up = xen_hvm_cpu_up;
677 smp_ops.cpu_die = xen_hvm_cpu_die;
678 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
679 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;