2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
42 #include <linux/uaccess.h>
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
51 #include <video/edid.h>
54 #include <asm/mpspec.h>
55 #include <asm/mmu_context.h>
56 #include <asm/proto.h>
57 #include <asm/setup.h>
58 #include <asm/mach_apic.h>
60 #include <asm/sections.h>
62 #include <asm/cacheflush.h>
66 #ifdef CONFIG_PARAVIRT
67 #include <asm/paravirt.h>
76 struct cpuinfo_x86 boot_cpu_data __read_mostly;
77 EXPORT_SYMBOL(boot_cpu_data);
79 unsigned long mmu_cr4_features;
81 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
84 unsigned long saved_video_mode;
86 int force_mwait __cpuinitdata;
92 char dmi_alloc_data[DMI_MAX_DATA];
97 struct screen_info screen_info;
98 EXPORT_SYMBOL(screen_info);
99 struct sys_desc_table_struct {
100 unsigned short length;
101 unsigned char table[0];
104 struct edid_info edid_info;
105 EXPORT_SYMBOL_GPL(edid_info);
107 extern int root_mountflags;
109 char __initdata command_line[COMMAND_LINE_SIZE];
111 struct resource standard_io_resources[] = {
112 { .name = "dma1", .start = 0x00, .end = 0x1f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "pic1", .start = 0x20, .end = 0x21,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "timer0", .start = 0x40, .end = 0x43,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "timer1", .start = 0x50, .end = 0x53,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "keyboard", .start = 0x60, .end = 0x6f,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "pic2", .start = 0xa0, .end = 0xa1,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "dma2", .start = 0xc0, .end = 0xdf,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "fpu", .start = 0xf0, .end = 0xff,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
132 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
134 static struct resource data_resource = {
135 .name = "Kernel data",
138 .flags = IORESOURCE_RAM,
140 static struct resource code_resource = {
141 .name = "Kernel code",
144 .flags = IORESOURCE_RAM,
146 static struct resource bss_resource = {
147 .name = "Kernel bss",
150 .flags = IORESOURCE_RAM,
153 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
155 #ifdef CONFIG_PROC_VMCORE
156 /* elfcorehdr= specifies the location of elf core header
157 * stored by the crashed kernel. This option will be passed
158 * by kexec loader to the capture kernel.
160 static int __init setup_elfcorehdr(char *arg)
165 elfcorehdr_addr = memparse(arg, &end);
166 return end > arg ? 0 : -EINVAL;
168 early_param("elfcorehdr", setup_elfcorehdr);
173 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
175 unsigned long bootmap_size, bootmap;
177 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
178 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
180 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
181 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
182 e820_register_active_regions(0, start_pfn, end_pfn);
183 free_bootmem_with_active_regions(0, end_pfn);
184 reserve_bootmem(bootmap, bootmap_size);
188 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
190 #ifdef CONFIG_EDD_MODULE
194 * copy_edd() - Copy the BIOS EDD information
195 * from boot_params into a safe place.
198 static inline void copy_edd(void)
200 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
201 sizeof(edd.mbr_signature));
202 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
203 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
204 edd.edd_info_nr = boot_params.eddbuf_entries;
207 static inline void copy_edd(void)
213 static void __init reserve_crashkernel(void)
215 unsigned long long free_mem;
216 unsigned long long crash_size, crash_base;
220 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
222 ret = parse_crashkernel(boot_command_line, free_mem,
223 &crash_size, &crash_base);
224 if (ret == 0 && crash_size) {
225 if (crash_base > 0) {
226 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
227 "for crashkernel (System RAM: %ldMB)\n",
228 (unsigned long)(crash_size >> 20),
229 (unsigned long)(crash_base >> 20),
230 (unsigned long)(free_mem >> 20));
231 crashk_res.start = crash_base;
232 crashk_res.end = crash_base + crash_size - 1;
233 reserve_bootmem(crash_base, crash_size);
235 printk(KERN_INFO "crashkernel reservation failed - "
236 "you have to specify a base address\n");
240 static inline void __init reserve_crashkernel(void)
244 #define EBDA_ADDR_POINTER 0x40E
246 unsigned __initdata ebda_addr;
247 unsigned __initdata ebda_size;
249 static void discover_ebda(void)
252 * there is a real-mode segmented pointer pointing to the
253 * 4K EBDA area at 0x40E
255 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
257 * There can be some situations, like paravirtualized guests,
258 * in which there is no available ebda information. In such
268 ebda_size = *(unsigned short *)__va(ebda_addr);
270 /* Round EBDA up to pages */
274 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
275 if (ebda_size > 64*1024)
279 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
280 void __attribute__((weak)) memory_setup(void)
282 machine_specific_memory_setup();
285 void __init setup_arch(char **cmdline_p)
289 printk(KERN_INFO "Command line: %s\n", boot_command_line);
291 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
292 screen_info = boot_params.screen_info;
293 edid_info = boot_params.edid_info;
294 saved_video_mode = boot_params.hdr.vid_mode;
295 bootloader_type = boot_params.hdr.type_of_loader;
297 #ifdef CONFIG_BLK_DEV_RAM
298 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
299 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
300 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
308 if (!boot_params.hdr.root_flags)
309 root_mountflags &= ~MS_RDONLY;
310 init_mm.start_code = (unsigned long) &_text;
311 init_mm.end_code = (unsigned long) &_etext;
312 init_mm.end_data = (unsigned long) &_edata;
313 init_mm.brk = (unsigned long) &_end;
315 code_resource.start = virt_to_phys(&_text);
316 code_resource.end = virt_to_phys(&_etext)-1;
317 data_resource.start = virt_to_phys(&_etext);
318 data_resource.end = virt_to_phys(&_edata)-1;
319 bss_resource.start = virt_to_phys(&__bss_start);
320 bss_resource.end = virt_to_phys(&__bss_stop)-1;
322 early_identify_cpu(&boot_cpu_data);
324 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
325 *cmdline_p = command_line;
329 finish_e820_parsing();
331 e820_register_active_regions(0, 0, -1UL);
333 * partially used pages are not usable - thus
334 * we are rounding upwards:
336 end_pfn = e820_end_of_ram();
337 num_physpages = end_pfn;
343 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
350 /* setup to use the static apicid table during kernel startup */
351 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
356 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
357 * Call this early for SRAT node setup.
359 acpi_boot_table_init();
362 /* How many end-of-memory variables you have, grandma! */
363 max_low_pfn = end_pfn;
365 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
367 /* Remove active ranges so rediscovery with NUMA-awareness happens */
368 remove_all_active_ranges();
370 #ifdef CONFIG_ACPI_NUMA
372 * Parse SRAT to discover nodes.
378 numa_initmem_init(0, end_pfn);
380 contig_initmem_init(0, end_pfn);
383 /* Reserve direct mapping */
384 reserve_bootmem_generic(table_start << PAGE_SHIFT,
385 (table_end - table_start) << PAGE_SHIFT);
388 reserve_bootmem_generic(__pa_symbol(&_text),
389 __pa_symbol(&_end) - __pa_symbol(&_text));
392 * reserve physical page 0 - it's a special BIOS page on many boxes,
393 * enabling clean reboots, SMP operation, laptop functions.
395 reserve_bootmem_generic(0, PAGE_SIZE);
397 /* reserve ebda region */
399 reserve_bootmem_generic(ebda_addr, ebda_size);
401 /* reserve nodemap region */
403 reserve_bootmem_generic(nodemap_addr, nodemap_size);
407 /* Reserve SMP trampoline */
408 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
411 #ifdef CONFIG_ACPI_SLEEP
413 * Reserve low memory region for sleep support.
415 acpi_reserve_bootmem();
418 * Find and reserve possible boot-time SMP configuration:
421 #ifdef CONFIG_BLK_DEV_INITRD
422 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
423 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
424 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
425 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
426 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
428 if (ramdisk_end <= end_of_mem) {
429 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
430 initrd_start = ramdisk_image + PAGE_OFFSET;
431 initrd_end = initrd_start+ramdisk_size;
433 printk(KERN_ERR "initrd extends beyond end of memory "
434 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
435 ramdisk_end, end_of_mem);
440 reserve_crashkernel();
446 * set this early, so we dont allocate cpu0
447 * if MADT list doesnt list BSP first
448 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
450 cpu_set(0, cpu_present_map);
453 * Read APIC and some other early information from ACPI tables.
461 * get boot-time SMP configuration:
463 if (smp_found_config)
465 init_apic_mappings();
466 ioapic_init_mappings();
469 * We trust e820 completely. No explicit ROM probing in memory.
471 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
472 e820_mark_nosave_regions();
474 /* request I/O space for devices used on all i[345]86 PCs */
475 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
476 request_resource(&ioport_resource, &standard_io_resources[i]);
481 #if defined(CONFIG_VGA_CONSOLE)
482 conswitchp = &vga_con;
483 #elif defined(CONFIG_DUMMY_CONSOLE)
484 conswitchp = &dummy_con;
489 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
493 if (c->extended_cpuid_level < 0x80000004)
496 v = (unsigned int *) c->x86_model_id;
497 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
498 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
499 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
500 c->x86_model_id[48] = 0;
505 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
507 unsigned int n, dummy, eax, ebx, ecx, edx;
509 n = c->extended_cpuid_level;
511 if (n >= 0x80000005) {
512 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
513 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
514 "D cache %dK (%d bytes/line)\n",
515 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
516 c->x86_cache_size = (ecx>>24) + (edx>>24);
517 /* On K8 L1 TLB is inclusive, so don't count it */
521 if (n >= 0x80000006) {
522 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
523 ecx = cpuid_ecx(0x80000006);
524 c->x86_cache_size = ecx >> 16;
525 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
527 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
528 c->x86_cache_size, ecx & 0xFF);
532 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
533 if (n >= 0x80000008) {
534 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
535 c->x86_virt_bits = (eax >> 8) & 0xff;
536 c->x86_phys_bits = eax & 0xff;
541 static int nearby_node(int apicid)
545 for (i = apicid - 1; i >= 0; i--) {
546 node = apicid_to_node[i];
547 if (node != NUMA_NO_NODE && node_online(node))
550 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
551 node = apicid_to_node[i];
552 if (node != NUMA_NO_NODE && node_online(node))
555 return first_node(node_online_map); /* Shouldn't happen */
560 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
561 * Assumes number of cores is a power of two.
563 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
568 int cpu = smp_processor_id();
570 unsigned apicid = hard_smp_processor_id();
572 bits = c->x86_coreid_bits;
574 /* Low order bits define the core id (index of core in socket) */
575 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
576 /* Convert the APIC ID into the socket ID */
577 c->phys_proc_id = phys_pkg_id(bits);
580 node = c->phys_proc_id;
581 if (apicid_to_node[apicid] != NUMA_NO_NODE)
582 node = apicid_to_node[apicid];
583 if (!node_online(node)) {
584 /* Two possibilities here:
585 - The CPU is missing memory and no node was created.
586 In that case try picking one from a nearby CPU
587 - The APIC IDs differ from the HyperTransport node IDs
588 which the K8 northbridge parsing fills in.
589 Assume they are all increased by a constant offset,
590 but in the same order as the HT nodeids.
591 If that doesn't result in a usable node fall back to the
592 path for the previous case. */
594 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
596 if (ht_nodeid >= 0 &&
597 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
598 node = apicid_to_node[ht_nodeid];
599 /* Pick a nearby node */
600 if (!node_online(node))
601 node = nearby_node(apicid);
603 numa_set_node(cpu, node);
605 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
610 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
615 /* Multi core CPU? */
616 if (c->extended_cpuid_level < 0x80000008)
619 ecx = cpuid_ecx(0x80000008);
621 c->x86_max_cores = (ecx & 0xff) + 1;
623 /* CPU telling us the core id bits shift? */
624 bits = (ecx >> 12) & 0xF;
626 /* Otherwise recompute */
628 while ((1 << bits) < c->x86_max_cores)
632 c->x86_coreid_bits = bits;
637 #define ENABLE_C1E_MASK 0x18000000
638 #define CPUID_PROCESSOR_SIGNATURE 1
639 #define CPUID_XFAM 0x0ff00000
640 #define CPUID_XFAM_K8 0x00000000
641 #define CPUID_XFAM_10H 0x00100000
642 #define CPUID_XFAM_11H 0x00200000
643 #define CPUID_XMOD 0x000f0000
644 #define CPUID_XMOD_REV_F 0x00040000
646 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
647 static __cpuinit int amd_apic_timer_broken(void)
649 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
651 switch (eax & CPUID_XFAM) {
653 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
657 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
658 if (lo & ENABLE_C1E_MASK)
662 /* err on the side of caution */
668 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
676 * Disable TLB flush filter by setting HWCR.FFDIS on K8
677 * bit 6 of msr C001_0015
679 * Errata 63 for SH-B3 steppings
680 * Errata 122 for all steppings (F+ have it disabled by default)
683 rdmsrl(MSR_K8_HWCR, value);
685 wrmsrl(MSR_K8_HWCR, value);
689 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
690 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
691 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
693 /* On C+ stepping K8 rep microcode works well for copy/memset */
694 level = cpuid_eax(1);
695 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
697 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
698 if (c->x86 == 0x10 || c->x86 == 0x11)
699 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
701 /* Enable workaround for FXSAVE leak */
703 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
705 level = get_model_name(c);
709 /* Should distinguish Models here, but this is only
710 a fallback anyways. */
711 strcpy(c->x86_model_id, "Hammer");
715 display_cacheinfo(c);
717 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
718 if (c->x86_power & (1<<8))
719 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
721 /* Multi core CPU? */
722 if (c->extended_cpuid_level >= 0x80000008)
725 if (c->extended_cpuid_level >= 0x80000006 &&
726 (cpuid_edx(0x80000006) & 0xf000))
727 num_cache_leaves = 4;
729 num_cache_leaves = 3;
731 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
732 set_cpu_cap(c, X86_FEATURE_K8);
734 /* RDTSC can be speculated around */
735 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
737 /* Family 10 doesn't support C states in MWAIT so don't use it */
738 if (c->x86 == 0x10 && !force_mwait)
739 clear_cpu_cap(c, X86_FEATURE_MWAIT);
741 if (amd_apic_timer_broken())
742 disable_apic_timer = 1;
745 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
748 u32 eax, ebx, ecx, edx;
749 int index_msb, core_bits;
751 cpuid(1, &eax, &ebx, &ecx, &edx);
754 if (!cpu_has(c, X86_FEATURE_HT))
756 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
759 smp_num_siblings = (ebx & 0xff0000) >> 16;
761 if (smp_num_siblings == 1) {
762 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
763 } else if (smp_num_siblings > 1) {
765 if (smp_num_siblings > NR_CPUS) {
766 printk(KERN_WARNING "CPU: Unsupported number of "
767 "siblings %d", smp_num_siblings);
768 smp_num_siblings = 1;
772 index_msb = get_count_order(smp_num_siblings);
773 c->phys_proc_id = phys_pkg_id(index_msb);
775 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
777 index_msb = get_count_order(smp_num_siblings);
779 core_bits = get_count_order(c->x86_max_cores);
781 c->cpu_core_id = phys_pkg_id(index_msb) &
782 ((1 << core_bits) - 1);
785 if ((c->x86_max_cores * smp_num_siblings) > 1) {
786 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
788 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
796 * find out the number of processor cores on the die
798 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
802 if (c->cpuid_level < 4)
805 cpuid_count(4, 0, &eax, &t, &t, &t);
808 return ((eax >> 26) + 1);
813 static void srat_detect_node(void)
817 int cpu = smp_processor_id();
818 int apicid = hard_smp_processor_id();
820 /* Don't do the funky fallback heuristics the AMD version employs
822 node = apicid_to_node[apicid];
823 if (node == NUMA_NO_NODE)
824 node = first_node(node_online_map);
825 numa_set_node(cpu, node);
827 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
831 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
836 init_intel_cacheinfo(c);
837 if (c->cpuid_level > 9) {
838 unsigned eax = cpuid_eax(10);
839 /* Check for version and the number of counters */
840 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
841 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
846 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
848 set_cpu_cap(c, X86_FEATURE_BTS);
850 set_cpu_cap(c, X86_FEATURE_PEBS);
857 n = c->extended_cpuid_level;
858 if (n >= 0x80000008) {
859 unsigned eax = cpuid_eax(0x80000008);
860 c->x86_virt_bits = (eax >> 8) & 0xff;
861 c->x86_phys_bits = eax & 0xff;
862 /* CPUID workaround for Intel 0F34 CPU */
863 if (c->x86_vendor == X86_VENDOR_INTEL &&
864 c->x86 == 0xF && c->x86_model == 0x3 &&
866 c->x86_phys_bits = 36;
870 c->x86_cache_alignment = c->x86_clflush_size * 2;
871 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
872 (c->x86 == 0x6 && c->x86_model >= 0x0e))
873 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
875 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
877 set_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
879 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
880 c->x86_max_cores = intel_num_cpu_cores(c);
885 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
887 char *v = c->x86_vendor_id;
889 if (!strcmp(v, "AuthenticAMD"))
890 c->x86_vendor = X86_VENDOR_AMD;
891 else if (!strcmp(v, "GenuineIntel"))
892 c->x86_vendor = X86_VENDOR_INTEL;
894 c->x86_vendor = X86_VENDOR_UNKNOWN;
897 struct cpu_model_info {
900 char *model_names[16];
903 /* Do some early cpuid on the boot CPU to get some parameter that are
904 needed before check_bugs. Everything advanced is in identify_cpu
906 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
910 c->loops_per_jiffy = loops_per_jiffy;
911 c->x86_cache_size = -1;
912 c->x86_vendor = X86_VENDOR_UNKNOWN;
913 c->x86_model = c->x86_mask = 0; /* So far unknown... */
914 c->x86_vendor_id[0] = '\0'; /* Unset */
915 c->x86_model_id[0] = '\0'; /* Unset */
916 c->x86_clflush_size = 64;
917 c->x86_cache_alignment = c->x86_clflush_size;
918 c->x86_max_cores = 1;
919 c->x86_coreid_bits = 0;
920 c->extended_cpuid_level = 0;
921 memset(&c->x86_capability, 0, sizeof c->x86_capability);
923 /* Get vendor name */
924 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
925 (unsigned int *)&c->x86_vendor_id[0],
926 (unsigned int *)&c->x86_vendor_id[8],
927 (unsigned int *)&c->x86_vendor_id[4]);
931 /* Initialize the standard set of capabilities */
932 /* Note that the vendor-specific code below might override */
934 /* Intel-defined flags: level 0x00000001 */
935 if (c->cpuid_level >= 0x00000001) {
937 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
938 &c->x86_capability[0]);
939 c->x86 = (tfms >> 8) & 0xf;
940 c->x86_model = (tfms >> 4) & 0xf;
941 c->x86_mask = tfms & 0xf;
943 c->x86 += (tfms >> 20) & 0xff;
945 c->x86_model += ((tfms >> 16) & 0xF) << 4;
946 if (c->x86_capability[0] & (1<<19))
947 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
949 /* Have CPUID level 0 only - unheard of */
954 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
956 /* AMD-defined flags: level 0x80000001 */
957 xlvl = cpuid_eax(0x80000000);
958 c->extended_cpuid_level = xlvl;
959 if ((xlvl & 0xffff0000) == 0x80000000) {
960 if (xlvl >= 0x80000001) {
961 c->x86_capability[1] = cpuid_edx(0x80000001);
962 c->x86_capability[6] = cpuid_ecx(0x80000001);
964 if (xlvl >= 0x80000004)
965 get_model_name(c); /* Default name */
968 /* Transmeta-defined flags: level 0x80860001 */
969 xlvl = cpuid_eax(0x80860000);
970 if ((xlvl & 0xffff0000) == 0x80860000) {
971 /* Don't set x86_cpuid_level here for now to not confuse. */
972 if (xlvl >= 0x80860001)
973 c->x86_capability[2] = cpuid_edx(0x80860001);
976 switch (c->x86_vendor) {
985 * This does the hard work of actually picking apart the CPU stuff...
987 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
991 early_identify_cpu(c);
993 init_scattered_cpuid_features(c);
995 c->apicid = phys_pkg_id(0);
998 * Vendor-specific initialization. In this section we
999 * canonicalize the feature flags, meaning if there are
1000 * features a certain CPU supports which CPUID doesn't
1001 * tell us, CPUID claiming incorrect flags, or other bugs,
1002 * we handle them here.
1004 * At the end of this section, c->x86_capability better
1005 * indicate the features this CPU genuinely supports!
1007 switch (c->x86_vendor) {
1008 case X86_VENDOR_AMD:
1012 case X86_VENDOR_INTEL:
1016 case X86_VENDOR_UNKNOWN:
1018 display_cacheinfo(c);
1022 select_idle_routine(c);
1026 * On SMP, boot_cpu_data holds the common feature set between
1027 * all CPUs; so make sure that we indicate which features are
1028 * common between the CPUs. The first time this routine gets
1029 * executed, c == &boot_cpu_data.
1031 if (c != &boot_cpu_data) {
1032 /* AND the already accumulated flags with these */
1033 for (i = 0; i < NCAPINTS; i++)
1034 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1037 #ifdef CONFIG_X86_MCE
1040 if (c != &boot_cpu_data)
1043 numa_add_cpu(smp_processor_id());
1047 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1049 if (c->x86_model_id[0])
1050 printk(KERN_INFO "%s", c->x86_model_id);
1052 if (c->x86_mask || c->cpuid_level >= 0)
1053 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1055 printk(KERN_CONT "\n");
1059 * Get CPU information for use by the procfs.
1062 static int show_cpuinfo(struct seq_file *m, void *v)
1064 struct cpuinfo_x86 *c = v;
1068 * These flag bits must match the definitions in <asm/cpufeature.h>.
1069 * NULL means this bit is undefined or reserved; either way it doesn't
1070 * have meaning as far as Linux is concerned. Note that it's important
1071 * to realize there is a difference between this table and CPUID -- if
1072 * applications want to get the raw CPUID data, they should access
1073 * /dev/cpu/<cpu_nr>/cpuid instead.
1075 static const char *const x86_cap_flags[] = {
1077 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1078 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1079 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1080 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1083 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1084 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1085 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1086 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1087 "3dnowext", "3dnow",
1089 /* Transmeta-defined */
1090 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1091 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1092 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1093 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1095 /* Other (Linux-defined) */
1096 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1097 NULL, NULL, NULL, NULL,
1098 "constant_tsc", "up", NULL, "arch_perfmon",
1099 "pebs", "bts", NULL, "sync_rdtsc",
1100 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1103 /* Intel-defined (#2) */
1104 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1105 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1106 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1107 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1109 /* VIA/Cyrix/Centaur-defined */
1110 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1111 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1112 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1113 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1115 /* AMD-defined (#2) */
1116 "lahf_lm", "cmp_legacy", "svm", "extapic",
1117 "cr8_legacy", "abm", "sse4a", "misalignsse",
1118 "3dnowprefetch", "osvw", "ibs", "sse5",
1119 "skinit", "wdt", NULL, NULL,
1120 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1123 /* Auxiliary (Linux-defined) */
1124 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1125 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1126 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1127 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1129 static const char *const x86_power_flags[] = {
1130 "ts", /* temperature sensor */
1131 "fid", /* frequency id control */
1132 "vid", /* voltage id control */
1133 "ttp", /* thermal trip */
1138 "", /* tsc invariant mapped to constant_tsc */
1147 seq_printf(m, "processor\t: %u\n"
1149 "cpu family\t: %d\n"
1151 "model name\t: %s\n",
1153 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1156 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1158 if (c->x86_mask || c->cpuid_level >= 0)
1159 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1161 seq_printf(m, "stepping\t: unknown\n");
1163 if (cpu_has(c, X86_FEATURE_TSC)) {
1164 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1168 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1169 freq / 1000, (freq % 1000));
1173 if (c->x86_cache_size >= 0)
1174 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1177 if (smp_num_siblings * c->x86_max_cores > 1) {
1178 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1179 seq_printf(m, "siblings\t: %d\n",
1180 cpus_weight(per_cpu(cpu_core_map, cpu)));
1181 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1182 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1188 "fpu_exception\t: yes\n"
1189 "cpuid level\t: %d\n"
1194 for (i = 0; i < 32*NCAPINTS; i++)
1195 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1196 seq_printf(m, " %s", x86_cap_flags[i]);
1198 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1199 c->loops_per_jiffy/(500000/HZ),
1200 (c->loops_per_jiffy/(5000/HZ)) % 100);
1202 if (c->x86_tlbsize > 0)
1203 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1204 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1205 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1207 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1208 c->x86_phys_bits, c->x86_virt_bits);
1210 seq_printf(m, "power management:");
1211 for (i = 0; i < 32; i++) {
1212 if (c->x86_power & (1 << i)) {
1213 if (i < ARRAY_SIZE(x86_power_flags) &&
1215 seq_printf(m, "%s%s",
1216 x86_power_flags[i][0]?" ":"",
1217 x86_power_flags[i]);
1219 seq_printf(m, " [%d]", i);
1223 seq_printf(m, "\n\n");
1228 static void *c_start(struct seq_file *m, loff_t *pos)
1230 if (*pos == 0) /* just in case, cpu 0 is not the first */
1231 *pos = first_cpu(cpu_online_map);
1232 if ((*pos) < NR_CPUS && cpu_online(*pos))
1233 return &cpu_data(*pos);
1237 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1239 *pos = next_cpu(*pos, cpu_online_map);
1240 return c_start(m, pos);
1243 static void c_stop(struct seq_file *m, void *v)
1247 struct seq_operations cpuinfo_op = {
1251 .show = show_cpuinfo,