1 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/percpu.h>
6 #include <linux/bootmem.h>
14 /* Number of siblings per CPU package */
15 int smp_num_siblings = 1;
16 EXPORT_SYMBOL(smp_num_siblings);
18 /* Last level cache ID of each logical CPU */
19 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
21 /* bitmap of online cpus */
22 cpumask_t cpu_online_map __read_mostly;
23 EXPORT_SYMBOL(cpu_online_map);
25 cpumask_t cpu_callin_map;
26 cpumask_t cpu_callout_map;
27 cpumask_t cpu_possible_map;
28 EXPORT_SYMBOL(cpu_possible_map);
30 /* representing HT siblings of each logical CPU */
31 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
32 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
34 /* representing HT and core siblings of each logical CPU */
35 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
36 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
38 /* Per CPU bogomips and other parameters */
39 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
40 EXPORT_PER_CPU_SYMBOL(cpu_info);
42 /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
43 unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
45 /* representing cpus for which sibling maps can be computed */
46 static cpumask_t cpu_sibling_setup_map;
49 /* Set if we find a B stepping CPU */
50 int __cpuinitdata smp_b_stepping;
53 static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
57 * Mask B, Pentium, but not Pentium MMX
59 if (c->x86_vendor == X86_VENDOR_INTEL &&
61 c->x86_mask >= 1 && c->x86_mask <= 4 &&
64 * Remember we have B step Pentia with bugs
69 * Certain Athlons might work (for various values of 'work') in SMP
70 * but they are not certified as MP capable.
72 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
74 if (num_possible_cpus() == 1)
77 /* Athlon 660/661 is valid. */
78 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
82 /* Duron 670 is valid */
83 if ((c->x86_model == 7) && (c->x86_mask == 0))
87 * Athlon 662, Duron 671, and Athlon >model 7 have capability
88 * bit. It's worth noting that the A5 stepping (662) of some
89 * Athlon XP's have the MP bit set.
90 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
93 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
94 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
99 /* If we get here, not a certified SMP capable AMD system. */
100 add_taint(TAINT_UNSAFE_SMP);
109 * The bootstrap kernel entry code has set these up. Save them for
113 void __cpuinit smp_store_cpu_info(int id)
115 struct cpuinfo_x86 *c = &cpu_data(id);
120 identify_secondary_cpu(c);
125 void __cpuinit set_cpu_sibling_map(int cpu)
128 struct cpuinfo_x86 *c = &cpu_data(cpu);
130 cpu_set(cpu, cpu_sibling_setup_map);
132 if (smp_num_siblings > 1) {
133 for_each_cpu_mask(i, cpu_sibling_setup_map) {
134 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
135 c->cpu_core_id == cpu_data(i).cpu_core_id) {
136 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
137 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
138 cpu_set(i, per_cpu(cpu_core_map, cpu));
139 cpu_set(cpu, per_cpu(cpu_core_map, i));
140 cpu_set(i, c->llc_shared_map);
141 cpu_set(cpu, cpu_data(i).llc_shared_map);
145 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
148 cpu_set(cpu, c->llc_shared_map);
150 if (current_cpu_data.x86_max_cores == 1) {
151 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
156 for_each_cpu_mask(i, cpu_sibling_setup_map) {
157 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
158 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
159 cpu_set(i, c->llc_shared_map);
160 cpu_set(cpu, cpu_data(i).llc_shared_map);
162 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
163 cpu_set(i, per_cpu(cpu_core_map, cpu));
164 cpu_set(cpu, per_cpu(cpu_core_map, i));
166 * Does this new cpu bringup a new core?
168 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
170 * for each core in package, increment
171 * the booted_cores for this new cpu
173 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
176 * increment the core count for all
177 * the other cpus in this package
180 cpu_data(i).booted_cores++;
181 } else if (i != cpu && !c->booted_cores)
182 c->booted_cores = cpu_data(i).booted_cores;
187 /* maps the cpu to the sched domain representing multi-core */
188 cpumask_t cpu_coregroup_map(int cpu)
190 struct cpuinfo_x86 *c = &cpu_data(cpu);
192 * For perf, we return last level cache shared map.
193 * And for power savings, we return cpu_core_map
195 if (sched_mc_power_savings || sched_smt_power_savings)
196 return per_cpu(cpu_core_map, cpu);
198 return c->llc_shared_map;
202 * Currently trivial. Write the real->protected mode
203 * bootstrap into the page concerned. The caller
204 * has made sure it's suitably aligned.
207 unsigned long __cpuinit setup_trampoline(void)
209 memcpy(trampoline_base, trampoline_data,
210 trampoline_end - trampoline_data);
211 return virt_to_phys(trampoline_base);
216 * We are called very early to get the low memory for the
217 * SMP bootup trampoline page.
219 void __init smp_alloc_memory(void)
221 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
223 * Has to be in very low memory so we can execute
226 if (__pa(trampoline_base) >= 0x9F000)
231 #ifdef CONFIG_HOTPLUG_CPU
232 void remove_siblinginfo(int cpu)
235 struct cpuinfo_x86 *c = &cpu_data(cpu);
237 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
238 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
240 * last thread sibling in this cpu core going down
242 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
243 cpu_data(sibling).booted_cores--;
246 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
247 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
248 cpus_clear(per_cpu(cpu_sibling_map, cpu));
249 cpus_clear(per_cpu(cpu_core_map, cpu));
252 cpu_clear(cpu, cpu_sibling_setup_map);
255 int additional_cpus __initdata = -1;
257 static __init int setup_additional_cpus(char *s)
259 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
261 early_param("additional_cpus", setup_additional_cpus);
264 * cpu_possible_map should be static, it cannot change as cpu's
265 * are onlined, or offlined. The reason is per-cpu data-structures
266 * are allocated by some modules at init time, and dont expect to
267 * do this dynamically on cpu arrival/departure.
268 * cpu_present_map on the other hand can change dynamically.
269 * In case when cpu_hotplug is not compiled, then we resort to current
270 * behaviour, which is cpu_possible == cpu_present.
273 * Three ways to find out the number of additional hotplug CPUs:
274 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
275 * - The user can overwrite it with additional_cpus=NUM
276 * - Otherwise don't reserve additional CPUs.
277 * We do this because additional CPUs waste a lot of memory.
280 __init void prefill_possible_map(void)
285 if (additional_cpus == -1) {
286 if (disabled_cpus > 0)
287 additional_cpus = disabled_cpus;
291 possible = num_processors + additional_cpus;
292 if (possible > NR_CPUS)
295 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
296 possible, max_t(int, possible - num_processors, 0));
298 for (i = 0; i < possible; i++)
299 cpu_set(i, cpu_possible_map);
302 static void __ref remove_cpu_from_maps(int cpu)
304 cpu_clear(cpu, cpu_online_map);
306 cpu_clear(cpu, cpu_callout_map);
307 cpu_clear(cpu, cpu_callin_map);
308 /* was set by cpu_init() */
309 clear_bit(cpu, (unsigned long *)&cpu_initialized);
310 clear_node_cpumask(cpu);
314 int __cpu_disable(void)
316 int cpu = smp_processor_id();
319 * Perhaps use cpufreq to drop frequency, but that could go
322 * We won't take down the boot processor on i386 due to some
323 * interrupts only being able to be serviced by the BSP.
324 * Especially so if we're not using an IOAPIC -zwane
329 if (nmi_watchdog == NMI_LOCAL_APIC)
330 stop_apic_nmi_watchdog(NULL);
335 * Allow any queued timer interrupts to get serviced
336 * This is only a temporary solution until we cleanup
337 * fixup_irqs as we do for IA64.
343 remove_siblinginfo(cpu);
345 /* It's now safe to remove this processor from the online map */
346 remove_cpu_from_maps(cpu);
347 fixup_irqs(cpu_online_map);
351 void __cpu_die(unsigned int cpu)
353 /* We don't do anything here: idle task is faking death itself. */
356 for (i = 0; i < 10; i++) {
357 /* They ack this in play_dead by setting CPU_DEAD */
358 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
359 printk(KERN_INFO "CPU %d is now offline\n", cpu);
360 if (1 == num_online_cpus())
361 alternatives_smp_switch(0);
366 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
368 #else /* ... !CONFIG_HOTPLUG_CPU */
369 int __cpu_disable(void)
374 void __cpu_die(unsigned int cpu)
376 /* We said "no" in __cpu_disable */
382 * If the BIOS enumerates physical processors before logical,
383 * maxcpus=N at enumeration-time can be used to disable HT.
385 static int __init parse_maxcpus(char *arg)
387 extern unsigned int maxcpus;
389 maxcpus = simple_strtoul(arg, NULL, 0);
392 early_param("maxcpus", parse_maxcpus);