2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
11 * This file handles the architecture-dependent parts of initialization
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/screen_info.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/initrd.h>
29 #include <linux/highmem.h>
30 #include <linux/bootmem.h>
31 #include <linux/module.h>
32 #include <asm/processor.h>
33 #include <linux/console.h>
34 #include <linux/seq_file.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/pci.h>
38 #include <linux/acpi.h>
39 #include <linux/kallsyms.h>
40 #include <linux/edd.h>
41 #include <linux/mmzone.h>
42 #include <linux/kexec.h>
43 #include <linux/cpufreq.h>
44 #include <linux/dmi.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/ctype.h>
49 #include <asm/uaccess.h>
50 #include <asm/system.h>
55 #include <video/edid.h>
58 #include <asm/mpspec.h>
59 #include <asm/mmu_context.h>
60 #include <asm/bootsetup.h>
61 #include <asm/proto.h>
62 #include <asm/setup.h>
63 #include <asm/mach_apic.h>
65 #include <asm/sections.h>
72 struct cpuinfo_x86 boot_cpu_data __read_mostly;
73 EXPORT_SYMBOL(boot_cpu_data);
75 unsigned long mmu_cr4_features;
77 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
80 unsigned long saved_video_mode;
86 char dmi_alloc_data[DMI_MAX_DATA];
91 struct screen_info screen_info;
92 EXPORT_SYMBOL(screen_info);
93 struct sys_desc_table_struct {
94 unsigned short length;
95 unsigned char table[0];
98 struct edid_info edid_info;
99 EXPORT_SYMBOL_GPL(edid_info);
101 extern int root_mountflags;
103 char __initdata command_line[COMMAND_LINE_SIZE];
105 struct resource standard_io_resources[] = {
106 { .name = "dma1", .start = 0x00, .end = 0x1f,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "pic1", .start = 0x20, .end = 0x21,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer0", .start = 0x40, .end = 0x43,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "timer1", .start = 0x50, .end = 0x53,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "keyboard", .start = 0x60, .end = 0x6f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic2", .start = 0xa0, .end = 0xa1,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "dma2", .start = 0xc0, .end = 0xdf,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "fpu", .start = 0xf0, .end = 0xff,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
126 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
128 struct resource data_resource = {
129 .name = "Kernel data",
132 .flags = IORESOURCE_RAM,
134 struct resource code_resource = {
135 .name = "Kernel code",
138 .flags = IORESOURCE_RAM,
141 #ifdef CONFIG_PROC_VMCORE
142 /* elfcorehdr= specifies the location of elf core header
143 * stored by the crashed kernel. This option will be passed
144 * by kexec loader to the capture kernel.
146 static int __init setup_elfcorehdr(char *arg)
151 elfcorehdr_addr = memparse(arg, &end);
152 return end > arg ? 0 : -EINVAL;
154 early_param("elfcorehdr", setup_elfcorehdr);
159 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
161 unsigned long bootmap_size, bootmap;
163 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
164 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
166 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
167 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
168 e820_register_active_regions(0, start_pfn, end_pfn);
169 free_bootmem_with_active_regions(0, end_pfn);
170 reserve_bootmem(bootmap, bootmap_size);
174 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
176 #ifdef CONFIG_EDD_MODULE
180 * copy_edd() - Copy the BIOS EDD information
181 * from boot_params into a safe place.
184 static inline void copy_edd(void)
186 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
187 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
188 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
189 edd.edd_info_nr = EDD_NR;
192 static inline void copy_edd(void)
197 #define EBDA_ADDR_POINTER 0x40E
199 unsigned __initdata ebda_addr;
200 unsigned __initdata ebda_size;
202 static void discover_ebda(void)
205 * there is a real-mode segmented pointer pointing to the
206 * 4K EBDA area at 0x40E
208 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
211 ebda_size = *(unsigned short *)__va(ebda_addr);
213 /* Round EBDA up to pages */
217 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
218 if (ebda_size > 64*1024)
222 void __init setup_arch(char **cmdline_p)
224 printk(KERN_INFO "Command line: %s\n", boot_command_line);
226 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
227 screen_info = SCREEN_INFO;
228 edid_info = EDID_INFO;
229 saved_video_mode = SAVED_VIDEO_MODE;
230 bootloader_type = LOADER_TYPE;
232 #ifdef CONFIG_BLK_DEV_RAM
233 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
234 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
235 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
237 setup_memory_region();
240 if (!MOUNT_ROOT_RDONLY)
241 root_mountflags &= ~MS_RDONLY;
242 init_mm.start_code = (unsigned long) &_text;
243 init_mm.end_code = (unsigned long) &_etext;
244 init_mm.end_data = (unsigned long) &_edata;
245 init_mm.brk = (unsigned long) &_end;
246 init_mm.pgd = __va(__pa_symbol(&init_level4_pgt));
248 code_resource.start = __pa_symbol(&_text);
249 code_resource.end = __pa_symbol(&_etext)-1;
250 data_resource.start = __pa_symbol(&_etext);
251 data_resource.end = __pa_symbol(&_edata)-1;
253 early_identify_cpu(&boot_cpu_data);
255 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
256 *cmdline_p = command_line;
260 finish_e820_parsing();
262 e820_register_active_regions(0, 0, -1UL);
264 * partially used pages are not usable - thus
265 * we are rounding upwards:
267 end_pfn = e820_end_of_ram();
268 num_physpages = end_pfn;
274 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
280 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
281 * Call this early for SRAT node setup.
283 acpi_boot_table_init();
286 /* How many end-of-memory variables you have, grandma! */
287 max_low_pfn = end_pfn;
289 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
291 /* Remove active ranges so rediscovery with NUMA-awareness happens */
292 remove_all_active_ranges();
294 #ifdef CONFIG_ACPI_NUMA
296 * Parse SRAT to discover nodes.
302 numa_initmem_init(0, end_pfn);
304 contig_initmem_init(0, end_pfn);
307 /* Reserve direct mapping */
308 reserve_bootmem_generic(table_start << PAGE_SHIFT,
309 (table_end - table_start) << PAGE_SHIFT);
312 reserve_bootmem_generic(__pa_symbol(&_text),
313 __pa_symbol(&_end) - __pa_symbol(&_text));
316 * reserve physical page 0 - it's a special BIOS page on many boxes,
317 * enabling clean reboots, SMP operation, laptop functions.
319 reserve_bootmem_generic(0, PAGE_SIZE);
321 /* reserve ebda region */
323 reserve_bootmem_generic(ebda_addr, ebda_size);
325 /* reserve nodemap region */
327 reserve_bootmem_generic(nodemap_addr, nodemap_size);
331 /* Reserve SMP trampoline */
332 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
335 #ifdef CONFIG_ACPI_SLEEP
337 * Reserve low memory region for sleep support.
339 acpi_reserve_bootmem();
342 * Find and reserve possible boot-time SMP configuration:
345 #ifdef CONFIG_BLK_DEV_INITRD
346 if (LOADER_TYPE && INITRD_START) {
347 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
348 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
349 initrd_start = INITRD_START + PAGE_OFFSET;
350 initrd_end = initrd_start+INITRD_SIZE;
353 printk(KERN_ERR "initrd extends beyond end of memory "
354 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
355 (unsigned long)(INITRD_START + INITRD_SIZE),
356 (unsigned long)(end_pfn << PAGE_SHIFT));
362 if (crashk_res.start != crashk_res.end) {
363 reserve_bootmem_generic(crashk_res.start,
364 crashk_res.end - crashk_res.start + 1);
375 * set this early, so we dont allocate cpu0
376 * if MADT list doesnt list BSP first
377 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
379 cpu_set(0, cpu_present_map);
382 * Read APIC and some other early information from ACPI tables.
390 * get boot-time SMP configuration:
392 if (smp_found_config)
394 init_apic_mappings();
397 * We trust e820 completely. No explicit ROM probing in memory.
399 e820_reserve_resources();
400 e820_mark_nosave_regions();
404 /* request I/O space for devices used on all i[345]86 PCs */
405 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
406 request_resource(&ioport_resource, &standard_io_resources[i]);
412 #if defined(CONFIG_VGA_CONSOLE)
413 conswitchp = &vga_con;
414 #elif defined(CONFIG_DUMMY_CONSOLE)
415 conswitchp = &dummy_con;
420 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
424 if (c->extended_cpuid_level < 0x80000004)
427 v = (unsigned int *) c->x86_model_id;
428 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
429 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
430 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
431 c->x86_model_id[48] = 0;
436 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
438 unsigned int n, dummy, eax, ebx, ecx, edx;
440 n = c->extended_cpuid_level;
442 if (n >= 0x80000005) {
443 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
444 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
445 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
446 c->x86_cache_size=(ecx>>24)+(edx>>24);
447 /* On K8 L1 TLB is inclusive, so don't count it */
451 if (n >= 0x80000006) {
452 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
453 ecx = cpuid_ecx(0x80000006);
454 c->x86_cache_size = ecx >> 16;
455 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
457 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
458 c->x86_cache_size, ecx & 0xFF);
462 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
463 if (n >= 0x80000008) {
464 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
465 c->x86_virt_bits = (eax >> 8) & 0xff;
466 c->x86_phys_bits = eax & 0xff;
471 static int nearby_node(int apicid)
474 for (i = apicid - 1; i >= 0; i--) {
475 int node = apicid_to_node[i];
476 if (node != NUMA_NO_NODE && node_online(node))
479 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
480 int node = apicid_to_node[i];
481 if (node != NUMA_NO_NODE && node_online(node))
484 return first_node(node_online_map); /* Shouldn't happen */
489 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
490 * Assumes number of cores is a power of two.
492 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
497 int cpu = smp_processor_id();
499 unsigned apicid = hard_smp_processor_id();
501 unsigned ecx = cpuid_ecx(0x80000008);
503 c->x86_max_cores = (ecx & 0xff) + 1;
505 /* CPU telling us the core id bits shift? */
506 bits = (ecx >> 12) & 0xF;
508 /* Otherwise recompute */
510 while ((1 << bits) < c->x86_max_cores)
514 /* Low order bits define the core id (index of core in socket) */
515 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
516 /* Convert the APIC ID into the socket ID */
517 c->phys_proc_id = phys_pkg_id(bits);
520 node = c->phys_proc_id;
521 if (apicid_to_node[apicid] != NUMA_NO_NODE)
522 node = apicid_to_node[apicid];
523 if (!node_online(node)) {
524 /* Two possibilities here:
525 - The CPU is missing memory and no node was created.
526 In that case try picking one from a nearby CPU
527 - The APIC IDs differ from the HyperTransport node IDs
528 which the K8 northbridge parsing fills in.
529 Assume they are all increased by a constant offset,
530 but in the same order as the HT nodeids.
531 If that doesn't result in a usable node fall back to the
532 path for the previous case. */
533 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
534 if (ht_nodeid >= 0 &&
535 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
536 node = apicid_to_node[ht_nodeid];
537 /* Pick a nearby node */
538 if (!node_online(node))
539 node = nearby_node(apicid);
541 numa_set_node(cpu, node);
543 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
548 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
556 * Disable TLB flush filter by setting HWCR.FFDIS on K8
557 * bit 6 of msr C001_0015
559 * Errata 63 for SH-B3 steppings
560 * Errata 122 for all steppings (F+ have it disabled by default)
563 rdmsrl(MSR_K8_HWCR, value);
565 wrmsrl(MSR_K8_HWCR, value);
569 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
570 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
571 clear_bit(0*32+31, &c->x86_capability);
573 /* On C+ stepping K8 rep microcode works well for copy/memset */
574 level = cpuid_eax(1);
575 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
576 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
578 /* Enable workaround for FXSAVE leak */
580 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
582 level = get_model_name(c);
586 /* Should distinguish Models here, but this is only
587 a fallback anyways. */
588 strcpy(c->x86_model_id, "Hammer");
592 display_cacheinfo(c);
594 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
595 if (c->x86_power & (1<<8))
596 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
598 /* Multi core CPU? */
599 if (c->extended_cpuid_level >= 0x80000008)
602 /* Fix cpuid4 emulation for more */
603 num_cache_leaves = 3;
605 /* RDTSC can be speculated around */
606 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
609 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
612 u32 eax, ebx, ecx, edx;
613 int index_msb, core_bits;
615 cpuid(1, &eax, &ebx, &ecx, &edx);
618 if (!cpu_has(c, X86_FEATURE_HT))
620 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
623 smp_num_siblings = (ebx & 0xff0000) >> 16;
625 if (smp_num_siblings == 1) {
626 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
627 } else if (smp_num_siblings > 1 ) {
629 if (smp_num_siblings > NR_CPUS) {
630 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
631 smp_num_siblings = 1;
635 index_msb = get_count_order(smp_num_siblings);
636 c->phys_proc_id = phys_pkg_id(index_msb);
638 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
640 index_msb = get_count_order(smp_num_siblings) ;
642 core_bits = get_count_order(c->x86_max_cores);
644 c->cpu_core_id = phys_pkg_id(index_msb) &
645 ((1 << core_bits) - 1);
648 if ((c->x86_max_cores * smp_num_siblings) > 1) {
649 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
650 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
657 * find out the number of processor cores on the die
659 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
663 if (c->cpuid_level < 4)
666 cpuid_count(4, 0, &eax, &t, &t, &t);
669 return ((eax >> 26) + 1);
674 static void srat_detect_node(void)
678 int cpu = smp_processor_id();
679 int apicid = hard_smp_processor_id();
681 /* Don't do the funky fallback heuristics the AMD version employs
683 node = apicid_to_node[apicid];
684 if (node == NUMA_NO_NODE)
685 node = first_node(node_online_map);
686 numa_set_node(cpu, node);
688 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
692 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
697 init_intel_cacheinfo(c);
698 if (c->cpuid_level > 9 ) {
699 unsigned eax = cpuid_eax(10);
700 /* Check for version and the number of counters */
701 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
702 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
707 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
709 set_bit(X86_FEATURE_BTS, c->x86_capability);
711 set_bit(X86_FEATURE_PEBS, c->x86_capability);
714 n = c->extended_cpuid_level;
715 if (n >= 0x80000008) {
716 unsigned eax = cpuid_eax(0x80000008);
717 c->x86_virt_bits = (eax >> 8) & 0xff;
718 c->x86_phys_bits = eax & 0xff;
719 /* CPUID workaround for Intel 0F34 CPU */
720 if (c->x86_vendor == X86_VENDOR_INTEL &&
721 c->x86 == 0xF && c->x86_model == 0x3 &&
723 c->x86_phys_bits = 36;
727 c->x86_cache_alignment = c->x86_clflush_size * 2;
728 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
729 (c->x86 == 0x6 && c->x86_model >= 0x0e))
730 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
732 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
734 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
736 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
737 c->x86_max_cores = intel_num_cpu_cores(c);
742 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
744 char *v = c->x86_vendor_id;
746 if (!strcmp(v, "AuthenticAMD"))
747 c->x86_vendor = X86_VENDOR_AMD;
748 else if (!strcmp(v, "GenuineIntel"))
749 c->x86_vendor = X86_VENDOR_INTEL;
751 c->x86_vendor = X86_VENDOR_UNKNOWN;
754 struct cpu_model_info {
757 char *model_names[16];
760 /* Do some early cpuid on the boot CPU to get some parameter that are
761 needed before check_bugs. Everything advanced is in identify_cpu
763 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
767 c->loops_per_jiffy = loops_per_jiffy;
768 c->x86_cache_size = -1;
769 c->x86_vendor = X86_VENDOR_UNKNOWN;
770 c->x86_model = c->x86_mask = 0; /* So far unknown... */
771 c->x86_vendor_id[0] = '\0'; /* Unset */
772 c->x86_model_id[0] = '\0'; /* Unset */
773 c->x86_clflush_size = 64;
774 c->x86_cache_alignment = c->x86_clflush_size;
775 c->x86_max_cores = 1;
776 c->extended_cpuid_level = 0;
777 memset(&c->x86_capability, 0, sizeof c->x86_capability);
779 /* Get vendor name */
780 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
781 (unsigned int *)&c->x86_vendor_id[0],
782 (unsigned int *)&c->x86_vendor_id[8],
783 (unsigned int *)&c->x86_vendor_id[4]);
787 /* Initialize the standard set of capabilities */
788 /* Note that the vendor-specific code below might override */
790 /* Intel-defined flags: level 0x00000001 */
791 if (c->cpuid_level >= 0x00000001) {
793 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
794 &c->x86_capability[0]);
795 c->x86 = (tfms >> 8) & 0xf;
796 c->x86_model = (tfms >> 4) & 0xf;
797 c->x86_mask = tfms & 0xf;
799 c->x86 += (tfms >> 20) & 0xff;
801 c->x86_model += ((tfms >> 16) & 0xF) << 4;
802 if (c->x86_capability[0] & (1<<19))
803 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
805 /* Have CPUID level 0 only - unheard of */
810 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
815 * This does the hard work of actually picking apart the CPU stuff...
817 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
822 early_identify_cpu(c);
824 /* AMD-defined flags: level 0x80000001 */
825 xlvl = cpuid_eax(0x80000000);
826 c->extended_cpuid_level = xlvl;
827 if ((xlvl & 0xffff0000) == 0x80000000) {
828 if (xlvl >= 0x80000001) {
829 c->x86_capability[1] = cpuid_edx(0x80000001);
830 c->x86_capability[6] = cpuid_ecx(0x80000001);
832 if (xlvl >= 0x80000004)
833 get_model_name(c); /* Default name */
836 /* Transmeta-defined flags: level 0x80860001 */
837 xlvl = cpuid_eax(0x80860000);
838 if ((xlvl & 0xffff0000) == 0x80860000) {
839 /* Don't set x86_cpuid_level here for now to not confuse. */
840 if (xlvl >= 0x80860001)
841 c->x86_capability[2] = cpuid_edx(0x80860001);
844 c->apicid = phys_pkg_id(0);
847 * Vendor-specific initialization. In this section we
848 * canonicalize the feature flags, meaning if there are
849 * features a certain CPU supports which CPUID doesn't
850 * tell us, CPUID claiming incorrect flags, or other bugs,
851 * we handle them here.
853 * At the end of this section, c->x86_capability better
854 * indicate the features this CPU genuinely supports!
856 switch (c->x86_vendor) {
861 case X86_VENDOR_INTEL:
865 case X86_VENDOR_UNKNOWN:
867 display_cacheinfo(c);
871 select_idle_routine(c);
875 * On SMP, boot_cpu_data holds the common feature set between
876 * all CPUs; so make sure that we indicate which features are
877 * common between the CPUs. The first time this routine gets
878 * executed, c == &boot_cpu_data.
880 if (c != &boot_cpu_data) {
881 /* AND the already accumulated flags with these */
882 for (i = 0 ; i < NCAPINTS ; i++)
883 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
886 #ifdef CONFIG_X86_MCE
889 if (c == &boot_cpu_data)
894 numa_add_cpu(smp_processor_id());
899 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
901 if (c->x86_model_id[0])
902 printk("%s", c->x86_model_id);
904 if (c->x86_mask || c->cpuid_level >= 0)
905 printk(" stepping %02x\n", c->x86_mask);
911 * Get CPU information for use by the procfs.
914 static int show_cpuinfo(struct seq_file *m, void *v)
916 struct cpuinfo_x86 *c = v;
919 * These flag bits must match the definitions in <asm/cpufeature.h>.
920 * NULL means this bit is undefined or reserved; either way it doesn't
921 * have meaning as far as Linux is concerned. Note that it's important
922 * to realize there is a difference between this table and CPUID -- if
923 * applications want to get the raw CPUID data, they should access
924 * /dev/cpu/<cpu_nr>/cpuid instead.
926 static char *x86_cap_flags[] = {
928 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
929 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
930 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
931 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
934 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
935 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
936 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
937 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
940 /* Transmeta-defined */
941 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
942 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
946 /* Other (Linux-defined) */
947 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
948 "constant_tsc", NULL, NULL,
949 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
950 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
951 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
953 /* Intel-defined (#2) */
954 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
955 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
956 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
957 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
959 /* VIA/Cyrix/Centaur-defined */
960 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
961 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
962 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
965 /* AMD-defined (#2) */
966 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
967 "altmovcr8", "abm", "sse4a",
968 "misalignsse", "3dnowprefetch",
969 "osvw", "ibs", NULL, NULL, NULL, NULL,
970 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
973 static char *x86_power_flags[] = {
974 "ts", /* temperature sensor */
975 "fid", /* frequency id control */
976 "vid", /* voltage id control */
977 "ttp", /* thermal trip */
982 "", /* tsc invariant mapped to constant_tsc */
988 if (!cpu_online(c-cpu_data))
992 seq_printf(m,"processor\t: %u\n"
996 "model name\t: %s\n",
997 (unsigned)(c-cpu_data),
998 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1001 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1003 if (c->x86_mask || c->cpuid_level >= 0)
1004 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1006 seq_printf(m, "stepping\t: unknown\n");
1008 if (cpu_has(c,X86_FEATURE_TSC)) {
1009 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1012 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1013 freq / 1000, (freq % 1000));
1017 if (c->x86_cache_size >= 0)
1018 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1021 if (smp_num_siblings * c->x86_max_cores > 1) {
1022 int cpu = c - cpu_data;
1023 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1024 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1025 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1026 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1032 "fpu_exception\t: yes\n"
1033 "cpuid level\t: %d\n"
1040 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1041 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1042 seq_printf(m, " %s", x86_cap_flags[i]);
1045 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1046 c->loops_per_jiffy/(500000/HZ),
1047 (c->loops_per_jiffy/(5000/HZ)) % 100);
1049 if (c->x86_tlbsize > 0)
1050 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1051 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1052 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1054 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1055 c->x86_phys_bits, c->x86_virt_bits);
1057 seq_printf(m, "power management:");
1060 for (i = 0; i < 32; i++)
1061 if (c->x86_power & (1 << i)) {
1062 if (i < ARRAY_SIZE(x86_power_flags) &&
1064 seq_printf(m, "%s%s",
1065 x86_power_flags[i][0]?" ":"",
1066 x86_power_flags[i]);
1068 seq_printf(m, " [%d]", i);
1072 seq_printf(m, "\n\n");
1077 static void *c_start(struct seq_file *m, loff_t *pos)
1079 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1082 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1085 return c_start(m, pos);
1088 static void c_stop(struct seq_file *m, void *v)
1092 struct seq_operations cpuinfo_op = {
1096 .show = show_cpuinfo,