2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
11 * This file handles the architecture-dependent parts of initialization
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/screen_info.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/initrd.h>
29 #include <linux/highmem.h>
30 #include <linux/bootmem.h>
31 #include <linux/module.h>
32 #include <asm/processor.h>
33 #include <linux/console.h>
34 #include <linux/seq_file.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/pci.h>
38 #include <linux/acpi.h>
39 #include <linux/kallsyms.h>
40 #include <linux/edd.h>
41 #include <linux/mmzone.h>
42 #include <linux/kexec.h>
43 #include <linux/cpufreq.h>
44 #include <linux/dmi.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/ctype.h>
49 #include <asm/uaccess.h>
50 #include <asm/system.h>
55 #include <video/edid.h>
58 #include <asm/mpspec.h>
59 #include <asm/mmu_context.h>
60 #include <asm/bootsetup.h>
61 #include <asm/proto.h>
62 #include <asm/setup.h>
63 #include <asm/mach_apic.h>
65 #include <asm/sections.h>
72 struct cpuinfo_x86 boot_cpu_data __read_mostly;
73 EXPORT_SYMBOL(boot_cpu_data);
75 unsigned long mmu_cr4_features;
77 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
80 unsigned long saved_video_mode;
86 char dmi_alloc_data[DMI_MAX_DATA];
91 struct screen_info screen_info;
92 EXPORT_SYMBOL(screen_info);
93 struct sys_desc_table_struct {
94 unsigned short length;
95 unsigned char table[0];
98 struct edid_info edid_info;
99 EXPORT_SYMBOL_GPL(edid_info);
101 extern int root_mountflags;
103 char __initdata command_line[COMMAND_LINE_SIZE];
105 struct resource standard_io_resources[] = {
106 { .name = "dma1", .start = 0x00, .end = 0x1f,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "pic1", .start = 0x20, .end = 0x21,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer0", .start = 0x40, .end = 0x43,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "timer1", .start = 0x50, .end = 0x53,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "keyboard", .start = 0x60, .end = 0x6f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic2", .start = 0xa0, .end = 0xa1,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "dma2", .start = 0xc0, .end = 0xdf,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "fpu", .start = 0xf0, .end = 0xff,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
126 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
128 struct resource data_resource = {
129 .name = "Kernel data",
132 .flags = IORESOURCE_RAM,
134 struct resource code_resource = {
135 .name = "Kernel code",
138 .flags = IORESOURCE_RAM,
141 #ifdef CONFIG_PROC_VMCORE
142 /* elfcorehdr= specifies the location of elf core header
143 * stored by the crashed kernel. This option will be passed
144 * by kexec loader to the capture kernel.
146 static int __init setup_elfcorehdr(char *arg)
151 elfcorehdr_addr = memparse(arg, &end);
152 return end > arg ? 0 : -EINVAL;
154 early_param("elfcorehdr", setup_elfcorehdr);
159 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
161 unsigned long bootmap_size, bootmap;
163 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
164 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
166 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
167 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
168 e820_register_active_regions(0, start_pfn, end_pfn);
169 free_bootmem_with_active_regions(0, end_pfn);
170 reserve_bootmem(bootmap, bootmap_size);
174 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
176 #ifdef CONFIG_EDD_MODULE
180 * copy_edd() - Copy the BIOS EDD information
181 * from boot_params into a safe place.
184 static inline void copy_edd(void)
186 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
187 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
188 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
189 edd.edd_info_nr = EDD_NR;
192 static inline void copy_edd(void)
197 #define EBDA_ADDR_POINTER 0x40E
199 unsigned __initdata ebda_addr;
200 unsigned __initdata ebda_size;
202 static void discover_ebda(void)
205 * there is a real-mode segmented pointer pointing to the
206 * 4K EBDA area at 0x40E
208 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
211 ebda_size = *(unsigned short *)__va(ebda_addr);
213 /* Round EBDA up to pages */
217 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
218 if (ebda_size > 64*1024)
222 void __init setup_arch(char **cmdline_p)
224 printk(KERN_INFO "Command line: %s\n", boot_command_line);
226 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
227 screen_info = SCREEN_INFO;
228 edid_info = EDID_INFO;
229 saved_video_mode = SAVED_VIDEO_MODE;
230 bootloader_type = LOADER_TYPE;
232 #ifdef CONFIG_BLK_DEV_RAM
233 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
234 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
235 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
237 setup_memory_region();
240 if (!MOUNT_ROOT_RDONLY)
241 root_mountflags &= ~MS_RDONLY;
242 init_mm.start_code = (unsigned long) &_text;
243 init_mm.end_code = (unsigned long) &_etext;
244 init_mm.end_data = (unsigned long) &_edata;
245 init_mm.brk = (unsigned long) &_end;
247 code_resource.start = virt_to_phys(&_text);
248 code_resource.end = virt_to_phys(&_etext)-1;
249 data_resource.start = virt_to_phys(&_etext);
250 data_resource.end = virt_to_phys(&_edata)-1;
252 early_identify_cpu(&boot_cpu_data);
254 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
255 *cmdline_p = command_line;
259 finish_e820_parsing();
261 e820_register_active_regions(0, 0, -1UL);
263 * partially used pages are not usable - thus
264 * we are rounding upwards:
266 end_pfn = e820_end_of_ram();
267 num_physpages = end_pfn;
273 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
281 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
282 * Call this early for SRAT node setup.
284 acpi_boot_table_init();
287 /* How many end-of-memory variables you have, grandma! */
288 max_low_pfn = end_pfn;
290 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
292 /* Remove active ranges so rediscovery with NUMA-awareness happens */
293 remove_all_active_ranges();
295 #ifdef CONFIG_ACPI_NUMA
297 * Parse SRAT to discover nodes.
303 numa_initmem_init(0, end_pfn);
305 contig_initmem_init(0, end_pfn);
308 /* Reserve direct mapping */
309 reserve_bootmem_generic(table_start << PAGE_SHIFT,
310 (table_end - table_start) << PAGE_SHIFT);
313 reserve_bootmem_generic(__pa_symbol(&_text),
314 __pa_symbol(&_end) - __pa_symbol(&_text));
317 * reserve physical page 0 - it's a special BIOS page on many boxes,
318 * enabling clean reboots, SMP operation, laptop functions.
320 reserve_bootmem_generic(0, PAGE_SIZE);
322 /* reserve ebda region */
324 reserve_bootmem_generic(ebda_addr, ebda_size);
326 /* reserve nodemap region */
328 reserve_bootmem_generic(nodemap_addr, nodemap_size);
332 /* Reserve SMP trampoline */
333 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
336 #ifdef CONFIG_ACPI_SLEEP
338 * Reserve low memory region for sleep support.
340 acpi_reserve_bootmem();
343 * Find and reserve possible boot-time SMP configuration:
346 #ifdef CONFIG_BLK_DEV_INITRD
347 if (LOADER_TYPE && INITRD_START) {
348 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
349 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
350 initrd_start = INITRD_START + PAGE_OFFSET;
351 initrd_end = initrd_start+INITRD_SIZE;
354 printk(KERN_ERR "initrd extends beyond end of memory "
355 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
356 (unsigned long)(INITRD_START + INITRD_SIZE),
357 (unsigned long)(end_pfn << PAGE_SHIFT));
363 if (crashk_res.start != crashk_res.end) {
364 reserve_bootmem_generic(crashk_res.start,
365 crashk_res.end - crashk_res.start + 1);
376 * set this early, so we dont allocate cpu0
377 * if MADT list doesnt list BSP first
378 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
380 cpu_set(0, cpu_present_map);
383 * Read APIC and some other early information from ACPI tables.
391 * get boot-time SMP configuration:
393 if (smp_found_config)
395 init_apic_mappings();
398 * We trust e820 completely. No explicit ROM probing in memory.
400 e820_reserve_resources();
401 e820_mark_nosave_regions();
405 /* request I/O space for devices used on all i[345]86 PCs */
406 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
407 request_resource(&ioport_resource, &standard_io_resources[i]);
413 #if defined(CONFIG_VGA_CONSOLE)
414 conswitchp = &vga_con;
415 #elif defined(CONFIG_DUMMY_CONSOLE)
416 conswitchp = &dummy_con;
421 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
425 if (c->extended_cpuid_level < 0x80000004)
428 v = (unsigned int *) c->x86_model_id;
429 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
430 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
431 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
432 c->x86_model_id[48] = 0;
437 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
439 unsigned int n, dummy, eax, ebx, ecx, edx;
441 n = c->extended_cpuid_level;
443 if (n >= 0x80000005) {
444 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
445 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
446 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
447 c->x86_cache_size=(ecx>>24)+(edx>>24);
448 /* On K8 L1 TLB is inclusive, so don't count it */
452 if (n >= 0x80000006) {
453 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
454 ecx = cpuid_ecx(0x80000006);
455 c->x86_cache_size = ecx >> 16;
456 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
458 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
459 c->x86_cache_size, ecx & 0xFF);
463 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
464 if (n >= 0x80000008) {
465 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
466 c->x86_virt_bits = (eax >> 8) & 0xff;
467 c->x86_phys_bits = eax & 0xff;
472 static int nearby_node(int apicid)
475 for (i = apicid - 1; i >= 0; i--) {
476 int node = apicid_to_node[i];
477 if (node != NUMA_NO_NODE && node_online(node))
480 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
481 int node = apicid_to_node[i];
482 if (node != NUMA_NO_NODE && node_online(node))
485 return first_node(node_online_map); /* Shouldn't happen */
490 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
491 * Assumes number of cores is a power of two.
493 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
498 int cpu = smp_processor_id();
500 unsigned apicid = hard_smp_processor_id();
502 unsigned ecx = cpuid_ecx(0x80000008);
504 c->x86_max_cores = (ecx & 0xff) + 1;
506 /* CPU telling us the core id bits shift? */
507 bits = (ecx >> 12) & 0xF;
509 /* Otherwise recompute */
511 while ((1 << bits) < c->x86_max_cores)
515 /* Low order bits define the core id (index of core in socket) */
516 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
517 /* Convert the APIC ID into the socket ID */
518 c->phys_proc_id = phys_pkg_id(bits);
521 node = c->phys_proc_id;
522 if (apicid_to_node[apicid] != NUMA_NO_NODE)
523 node = apicid_to_node[apicid];
524 if (!node_online(node)) {
525 /* Two possibilities here:
526 - The CPU is missing memory and no node was created.
527 In that case try picking one from a nearby CPU
528 - The APIC IDs differ from the HyperTransport node IDs
529 which the K8 northbridge parsing fills in.
530 Assume they are all increased by a constant offset,
531 but in the same order as the HT nodeids.
532 If that doesn't result in a usable node fall back to the
533 path for the previous case. */
534 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
535 if (ht_nodeid >= 0 &&
536 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
537 node = apicid_to_node[ht_nodeid];
538 /* Pick a nearby node */
539 if (!node_online(node))
540 node = nearby_node(apicid);
542 numa_set_node(cpu, node);
544 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
549 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
557 * Disable TLB flush filter by setting HWCR.FFDIS on K8
558 * bit 6 of msr C001_0015
560 * Errata 63 for SH-B3 steppings
561 * Errata 122 for all steppings (F+ have it disabled by default)
564 rdmsrl(MSR_K8_HWCR, value);
566 wrmsrl(MSR_K8_HWCR, value);
570 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
571 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
572 clear_bit(0*32+31, &c->x86_capability);
574 /* On C+ stepping K8 rep microcode works well for copy/memset */
575 level = cpuid_eax(1);
576 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
577 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
579 /* Enable workaround for FXSAVE leak */
581 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
583 level = get_model_name(c);
587 /* Should distinguish Models here, but this is only
588 a fallback anyways. */
589 strcpy(c->x86_model_id, "Hammer");
593 display_cacheinfo(c);
595 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
596 if (c->x86_power & (1<<8))
597 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
599 /* Multi core CPU? */
600 if (c->extended_cpuid_level >= 0x80000008)
603 /* Fix cpuid4 emulation for more */
604 num_cache_leaves = 3;
606 /* RDTSC can be speculated around */
607 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
610 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
613 u32 eax, ebx, ecx, edx;
614 int index_msb, core_bits;
616 cpuid(1, &eax, &ebx, &ecx, &edx);
619 if (!cpu_has(c, X86_FEATURE_HT))
621 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
624 smp_num_siblings = (ebx & 0xff0000) >> 16;
626 if (smp_num_siblings == 1) {
627 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
628 } else if (smp_num_siblings > 1 ) {
630 if (smp_num_siblings > NR_CPUS) {
631 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
632 smp_num_siblings = 1;
636 index_msb = get_count_order(smp_num_siblings);
637 c->phys_proc_id = phys_pkg_id(index_msb);
639 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
641 index_msb = get_count_order(smp_num_siblings) ;
643 core_bits = get_count_order(c->x86_max_cores);
645 c->cpu_core_id = phys_pkg_id(index_msb) &
646 ((1 << core_bits) - 1);
649 if ((c->x86_max_cores * smp_num_siblings) > 1) {
650 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
651 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
658 * find out the number of processor cores on the die
660 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
664 if (c->cpuid_level < 4)
667 cpuid_count(4, 0, &eax, &t, &t, &t);
670 return ((eax >> 26) + 1);
675 static void srat_detect_node(void)
679 int cpu = smp_processor_id();
680 int apicid = hard_smp_processor_id();
682 /* Don't do the funky fallback heuristics the AMD version employs
684 node = apicid_to_node[apicid];
685 if (node == NUMA_NO_NODE)
686 node = first_node(node_online_map);
687 numa_set_node(cpu, node);
689 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
693 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
698 init_intel_cacheinfo(c);
699 if (c->cpuid_level > 9 ) {
700 unsigned eax = cpuid_eax(10);
701 /* Check for version and the number of counters */
702 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
703 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
708 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
710 set_bit(X86_FEATURE_BTS, c->x86_capability);
712 set_bit(X86_FEATURE_PEBS, c->x86_capability);
715 n = c->extended_cpuid_level;
716 if (n >= 0x80000008) {
717 unsigned eax = cpuid_eax(0x80000008);
718 c->x86_virt_bits = (eax >> 8) & 0xff;
719 c->x86_phys_bits = eax & 0xff;
720 /* CPUID workaround for Intel 0F34 CPU */
721 if (c->x86_vendor == X86_VENDOR_INTEL &&
722 c->x86 == 0xF && c->x86_model == 0x3 &&
724 c->x86_phys_bits = 36;
728 c->x86_cache_alignment = c->x86_clflush_size * 2;
729 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
730 (c->x86 == 0x6 && c->x86_model >= 0x0e))
731 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
733 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
735 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
737 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
738 c->x86_max_cores = intel_num_cpu_cores(c);
743 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
745 char *v = c->x86_vendor_id;
747 if (!strcmp(v, "AuthenticAMD"))
748 c->x86_vendor = X86_VENDOR_AMD;
749 else if (!strcmp(v, "GenuineIntel"))
750 c->x86_vendor = X86_VENDOR_INTEL;
752 c->x86_vendor = X86_VENDOR_UNKNOWN;
755 struct cpu_model_info {
758 char *model_names[16];
761 /* Do some early cpuid on the boot CPU to get some parameter that are
762 needed before check_bugs. Everything advanced is in identify_cpu
764 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
768 c->loops_per_jiffy = loops_per_jiffy;
769 c->x86_cache_size = -1;
770 c->x86_vendor = X86_VENDOR_UNKNOWN;
771 c->x86_model = c->x86_mask = 0; /* So far unknown... */
772 c->x86_vendor_id[0] = '\0'; /* Unset */
773 c->x86_model_id[0] = '\0'; /* Unset */
774 c->x86_clflush_size = 64;
775 c->x86_cache_alignment = c->x86_clflush_size;
776 c->x86_max_cores = 1;
777 c->extended_cpuid_level = 0;
778 memset(&c->x86_capability, 0, sizeof c->x86_capability);
780 /* Get vendor name */
781 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
782 (unsigned int *)&c->x86_vendor_id[0],
783 (unsigned int *)&c->x86_vendor_id[8],
784 (unsigned int *)&c->x86_vendor_id[4]);
788 /* Initialize the standard set of capabilities */
789 /* Note that the vendor-specific code below might override */
791 /* Intel-defined flags: level 0x00000001 */
792 if (c->cpuid_level >= 0x00000001) {
794 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
795 &c->x86_capability[0]);
796 c->x86 = (tfms >> 8) & 0xf;
797 c->x86_model = (tfms >> 4) & 0xf;
798 c->x86_mask = tfms & 0xf;
800 c->x86 += (tfms >> 20) & 0xff;
802 c->x86_model += ((tfms >> 16) & 0xF) << 4;
803 if (c->x86_capability[0] & (1<<19))
804 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
806 /* Have CPUID level 0 only - unheard of */
811 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
816 * This does the hard work of actually picking apart the CPU stuff...
818 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
823 early_identify_cpu(c);
825 /* AMD-defined flags: level 0x80000001 */
826 xlvl = cpuid_eax(0x80000000);
827 c->extended_cpuid_level = xlvl;
828 if ((xlvl & 0xffff0000) == 0x80000000) {
829 if (xlvl >= 0x80000001) {
830 c->x86_capability[1] = cpuid_edx(0x80000001);
831 c->x86_capability[6] = cpuid_ecx(0x80000001);
833 if (xlvl >= 0x80000004)
834 get_model_name(c); /* Default name */
837 /* Transmeta-defined flags: level 0x80860001 */
838 xlvl = cpuid_eax(0x80860000);
839 if ((xlvl & 0xffff0000) == 0x80860000) {
840 /* Don't set x86_cpuid_level here for now to not confuse. */
841 if (xlvl >= 0x80860001)
842 c->x86_capability[2] = cpuid_edx(0x80860001);
845 c->apicid = phys_pkg_id(0);
848 * Vendor-specific initialization. In this section we
849 * canonicalize the feature flags, meaning if there are
850 * features a certain CPU supports which CPUID doesn't
851 * tell us, CPUID claiming incorrect flags, or other bugs,
852 * we handle them here.
854 * At the end of this section, c->x86_capability better
855 * indicate the features this CPU genuinely supports!
857 switch (c->x86_vendor) {
862 case X86_VENDOR_INTEL:
866 case X86_VENDOR_UNKNOWN:
868 display_cacheinfo(c);
872 select_idle_routine(c);
876 * On SMP, boot_cpu_data holds the common feature set between
877 * all CPUs; so make sure that we indicate which features are
878 * common between the CPUs. The first time this routine gets
879 * executed, c == &boot_cpu_data.
881 if (c != &boot_cpu_data) {
882 /* AND the already accumulated flags with these */
883 for (i = 0 ; i < NCAPINTS ; i++)
884 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
887 #ifdef CONFIG_X86_MCE
890 if (c == &boot_cpu_data)
895 numa_add_cpu(smp_processor_id());
900 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
902 if (c->x86_model_id[0])
903 printk("%s", c->x86_model_id);
905 if (c->x86_mask || c->cpuid_level >= 0)
906 printk(" stepping %02x\n", c->x86_mask);
912 * Get CPU information for use by the procfs.
915 static int show_cpuinfo(struct seq_file *m, void *v)
917 struct cpuinfo_x86 *c = v;
920 * These flag bits must match the definitions in <asm/cpufeature.h>.
921 * NULL means this bit is undefined or reserved; either way it doesn't
922 * have meaning as far as Linux is concerned. Note that it's important
923 * to realize there is a difference between this table and CPUID -- if
924 * applications want to get the raw CPUID data, they should access
925 * /dev/cpu/<cpu_nr>/cpuid instead.
927 static char *x86_cap_flags[] = {
929 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
930 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
931 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
932 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
935 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
936 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
937 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
938 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
941 /* Transmeta-defined */
942 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
945 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
947 /* Other (Linux-defined) */
948 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
949 "constant_tsc", NULL, NULL,
950 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
951 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
952 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
954 /* Intel-defined (#2) */
955 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
956 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
957 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
958 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
960 /* VIA/Cyrix/Centaur-defined */
961 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
962 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
964 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
966 /* AMD-defined (#2) */
967 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
968 "altmovcr8", "abm", "sse4a",
969 "misalignsse", "3dnowprefetch",
970 "osvw", "ibs", NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
974 static char *x86_power_flags[] = {
975 "ts", /* temperature sensor */
976 "fid", /* frequency id control */
977 "vid", /* voltage id control */
978 "ttp", /* thermal trip */
983 NULL, /* tsc invariant mapped to constant_tsc */
985 /* nothing */ /* constant_tsc - moved to flags */
990 if (!cpu_online(c-cpu_data))
994 seq_printf(m,"processor\t: %u\n"
998 "model name\t: %s\n",
999 (unsigned)(c-cpu_data),
1000 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1003 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1005 if (c->x86_mask || c->cpuid_level >= 0)
1006 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1008 seq_printf(m, "stepping\t: unknown\n");
1010 if (cpu_has(c,X86_FEATURE_TSC)) {
1011 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1014 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1015 freq / 1000, (freq % 1000));
1019 if (c->x86_cache_size >= 0)
1020 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1023 if (smp_num_siblings * c->x86_max_cores > 1) {
1024 int cpu = c - cpu_data;
1025 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1026 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1027 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1028 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1034 "fpu_exception\t: yes\n"
1035 "cpuid level\t: %d\n"
1042 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1043 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1044 seq_printf(m, " %s", x86_cap_flags[i]);
1047 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1048 c->loops_per_jiffy/(500000/HZ),
1049 (c->loops_per_jiffy/(5000/HZ)) % 100);
1051 if (c->x86_tlbsize > 0)
1052 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1053 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1054 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1056 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1057 c->x86_phys_bits, c->x86_virt_bits);
1059 seq_printf(m, "power management:");
1062 for (i = 0; i < 32; i++)
1063 if (c->x86_power & (1 << i)) {
1064 if (i < ARRAY_SIZE(x86_power_flags) &&
1066 seq_printf(m, "%s%s",
1067 x86_power_flags[i][0]?" ":"",
1068 x86_power_flags[i]);
1070 seq_printf(m, " [%d]", i);
1074 seq_printf(m, "\n\n");
1079 static void *c_start(struct seq_file *m, loff_t *pos)
1081 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1084 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1087 return c_start(m, pos);
1090 static void c_stop(struct seq_file *m, void *v)
1094 struct seq_operations cpuinfo_op = {
1098 .show = show_cpuinfo,