2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/efi.h>
34 #include <linux/acpi.h>
35 #include <linux/kallsyms.h>
36 #include <linux/edd.h>
37 #include <linux/mmzone.h>
38 #include <linux/kexec.h>
39 #include <linux/cpufreq.h>
40 #include <linux/dmi.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/ctype.h>
43 #include <linux/uaccess.h>
46 #include <asm/uaccess.h>
47 #include <asm/system.h>
48 #include <asm/vsyscall.h>
53 #include <video/edid.h>
57 #include <asm/mpspec.h>
58 #include <asm/mmu_context.h>
59 #include <asm/proto.h>
60 #include <asm/setup.h>
61 #include <asm/mach_apic.h>
63 #include <asm/sections.h>
65 #include <asm/cacheflush.h>
68 #include <asm/topology.h>
70 #ifdef CONFIG_PARAVIRT
71 #include <asm/paravirt.h>
80 struct cpuinfo_x86 boot_cpu_data __read_mostly;
81 EXPORT_SYMBOL(boot_cpu_data);
83 unsigned long mmu_cr4_features;
85 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
88 unsigned long saved_video_mode;
90 int force_mwait __cpuinitdata;
96 char dmi_alloc_data[DMI_MAX_DATA];
101 struct screen_info screen_info;
102 EXPORT_SYMBOL(screen_info);
103 struct sys_desc_table_struct {
104 unsigned short length;
105 unsigned char table[0];
108 struct edid_info edid_info;
109 EXPORT_SYMBOL_GPL(edid_info);
111 extern int root_mountflags;
113 char __initdata command_line[COMMAND_LINE_SIZE];
115 struct resource standard_io_resources[] = {
116 { .name = "dma1", .start = 0x00, .end = 0x1f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic1", .start = 0x20, .end = 0x21,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer0", .start = 0x40, .end = 0x43,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "timer1", .start = 0x50, .end = 0x53,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "keyboard", .start = 0x60, .end = 0x6f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "pic2", .start = 0xa0, .end = 0xa1,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "dma2", .start = 0xc0, .end = 0xdf,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "fpu", .start = 0xf0, .end = 0xff,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
136 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
138 static struct resource data_resource = {
139 .name = "Kernel data",
142 .flags = IORESOURCE_RAM,
144 static struct resource code_resource = {
145 .name = "Kernel code",
148 .flags = IORESOURCE_RAM,
150 static struct resource bss_resource = {
151 .name = "Kernel bss",
154 .flags = IORESOURCE_RAM,
157 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
159 #ifdef CONFIG_PROC_VMCORE
160 /* elfcorehdr= specifies the location of elf core header
161 * stored by the crashed kernel. This option will be passed
162 * by kexec loader to the capture kernel.
164 static int __init setup_elfcorehdr(char *arg)
169 elfcorehdr_addr = memparse(arg, &end);
170 return end > arg ? 0 : -EINVAL;
172 early_param("elfcorehdr", setup_elfcorehdr);
177 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
179 unsigned long bootmap_size, bootmap;
181 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
182 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
184 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
185 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
186 e820_register_active_regions(0, start_pfn, end_pfn);
187 free_bootmem_with_active_regions(0, end_pfn);
188 reserve_bootmem(bootmap, bootmap_size);
192 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
194 #ifdef CONFIG_EDD_MODULE
198 * copy_edd() - Copy the BIOS EDD information
199 * from boot_params into a safe place.
202 static inline void copy_edd(void)
204 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
205 sizeof(edd.mbr_signature));
206 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
207 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
208 edd.edd_info_nr = boot_params.eddbuf_entries;
211 static inline void copy_edd(void)
217 static void __init reserve_crashkernel(void)
219 unsigned long long free_mem;
220 unsigned long long crash_size, crash_base;
224 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
226 ret = parse_crashkernel(boot_command_line, free_mem,
227 &crash_size, &crash_base);
228 if (ret == 0 && crash_size) {
229 if (crash_base > 0) {
230 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
231 "for crashkernel (System RAM: %ldMB)\n",
232 (unsigned long)(crash_size >> 20),
233 (unsigned long)(crash_base >> 20),
234 (unsigned long)(free_mem >> 20));
235 crashk_res.start = crash_base;
236 crashk_res.end = crash_base + crash_size - 1;
237 reserve_bootmem(crash_base, crash_size);
239 printk(KERN_INFO "crashkernel reservation failed - "
240 "you have to specify a base address\n");
244 static inline void __init reserve_crashkernel(void)
248 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
249 void __attribute__((weak)) __init memory_setup(void)
251 machine_specific_memory_setup();
254 void __init setup_arch(char **cmdline_p)
258 printk(KERN_INFO "Command line: %s\n", boot_command_line);
260 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
261 screen_info = boot_params.screen_info;
262 edid_info = boot_params.edid_info;
263 saved_video_mode = boot_params.hdr.vid_mode;
264 bootloader_type = boot_params.hdr.type_of_loader;
266 #ifdef CONFIG_BLK_DEV_RAM
267 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
268 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
269 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
272 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
282 if (!boot_params.hdr.root_flags)
283 root_mountflags &= ~MS_RDONLY;
284 init_mm.start_code = (unsigned long) &_text;
285 init_mm.end_code = (unsigned long) &_etext;
286 init_mm.end_data = (unsigned long) &_edata;
287 init_mm.brk = (unsigned long) &_end;
289 code_resource.start = virt_to_phys(&_text);
290 code_resource.end = virt_to_phys(&_etext)-1;
291 data_resource.start = virt_to_phys(&_etext);
292 data_resource.end = virt_to_phys(&_edata)-1;
293 bss_resource.start = virt_to_phys(&__bss_start);
294 bss_resource.end = virt_to_phys(&__bss_stop)-1;
296 early_identify_cpu(&boot_cpu_data);
298 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
299 *cmdline_p = command_line;
303 finish_e820_parsing();
305 early_gart_iommu_check();
307 e820_register_active_regions(0, 0, -1UL);
309 * partially used pages are not usable - thus
310 * we are rounding upwards:
312 end_pfn = e820_end_of_ram();
313 num_physpages = end_pfn;
317 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
326 /* setup to use the early static init tables during kernel startup */
327 x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
329 x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
331 x86_bios_cpu_apicid_early_ptr = (void *)&x86_bios_cpu_apicid_init;
336 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
337 * Call this early for SRAT node setup.
339 acpi_boot_table_init();
342 /* How many end-of-memory variables you have, grandma! */
343 max_low_pfn = end_pfn;
345 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
347 /* Remove active ranges so rediscovery with NUMA-awareness happens */
348 remove_all_active_ranges();
350 #ifdef CONFIG_ACPI_NUMA
352 * Parse SRAT to discover nodes.
358 numa_initmem_init(0, end_pfn);
360 contig_initmem_init(0, end_pfn);
363 early_res_to_bootmem();
365 #ifdef CONFIG_ACPI_SLEEP
367 * Reserve low memory region for sleep support.
369 acpi_reserve_bootmem();
374 efi_reserve_bootmem();
378 * Find and reserve possible boot-time SMP configuration:
381 #ifdef CONFIG_BLK_DEV_INITRD
382 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
383 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
384 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
385 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
386 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
388 if (ramdisk_end <= end_of_mem) {
389 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
390 initrd_start = ramdisk_image + PAGE_OFFSET;
391 initrd_end = initrd_start+ramdisk_size;
393 /* Assumes everything on node 0 */
394 free_bootmem(ramdisk_image, ramdisk_size);
395 printk(KERN_ERR "initrd extends beyond end of memory "
396 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
397 ramdisk_end, end_of_mem);
402 reserve_crashkernel();
409 * set this early, so we dont allocate cpu0
410 * if MADT list doesnt list BSP first
411 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
413 cpu_set(0, cpu_present_map);
416 * Read APIC and some other early information from ACPI tables.
424 * get boot-time SMP configuration:
426 if (smp_found_config)
428 init_apic_mappings();
429 ioapic_init_mappings();
432 * We trust e820 completely. No explicit ROM probing in memory.
434 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
435 e820_mark_nosave_regions();
437 /* request I/O space for devices used on all i[345]86 PCs */
438 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
439 request_resource(&ioport_resource, &standard_io_resources[i]);
444 #if defined(CONFIG_VGA_CONSOLE)
445 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
446 conswitchp = &vga_con;
447 #elif defined(CONFIG_DUMMY_CONSOLE)
448 conswitchp = &dummy_con;
453 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
457 if (c->extended_cpuid_level < 0x80000004)
460 v = (unsigned int *) c->x86_model_id;
461 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
462 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
463 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
464 c->x86_model_id[48] = 0;
469 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
471 unsigned int n, dummy, eax, ebx, ecx, edx;
473 n = c->extended_cpuid_level;
475 if (n >= 0x80000005) {
476 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
477 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
478 "D cache %dK (%d bytes/line)\n",
479 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
480 c->x86_cache_size = (ecx>>24) + (edx>>24);
481 /* On K8 L1 TLB is inclusive, so don't count it */
485 if (n >= 0x80000006) {
486 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
487 ecx = cpuid_ecx(0x80000006);
488 c->x86_cache_size = ecx >> 16;
489 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
491 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
492 c->x86_cache_size, ecx & 0xFF);
494 if (n >= 0x80000008) {
495 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
496 c->x86_virt_bits = (eax >> 8) & 0xff;
497 c->x86_phys_bits = eax & 0xff;
502 static int nearby_node(int apicid)
506 for (i = apicid - 1; i >= 0; i--) {
507 node = apicid_to_node[i];
508 if (node != NUMA_NO_NODE && node_online(node))
511 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
512 node = apicid_to_node[i];
513 if (node != NUMA_NO_NODE && node_online(node))
516 return first_node(node_online_map); /* Shouldn't happen */
521 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
522 * Assumes number of cores is a power of two.
524 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
529 int cpu = smp_processor_id();
531 unsigned apicid = hard_smp_processor_id();
533 bits = c->x86_coreid_bits;
535 /* Low order bits define the core id (index of core in socket) */
536 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
537 /* Convert the APIC ID into the socket ID */
538 c->phys_proc_id = phys_pkg_id(bits);
541 node = c->phys_proc_id;
542 if (apicid_to_node[apicid] != NUMA_NO_NODE)
543 node = apicid_to_node[apicid];
544 if (!node_online(node)) {
545 /* Two possibilities here:
546 - The CPU is missing memory and no node was created.
547 In that case try picking one from a nearby CPU
548 - The APIC IDs differ from the HyperTransport node IDs
549 which the K8 northbridge parsing fills in.
550 Assume they are all increased by a constant offset,
551 but in the same order as the HT nodeids.
552 If that doesn't result in a usable node fall back to the
553 path for the previous case. */
555 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
557 if (ht_nodeid >= 0 &&
558 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
559 node = apicid_to_node[ht_nodeid];
560 /* Pick a nearby node */
561 if (!node_online(node))
562 node = nearby_node(apicid);
564 numa_set_node(cpu, node);
566 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
571 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
576 /* Multi core CPU? */
577 if (c->extended_cpuid_level < 0x80000008)
580 ecx = cpuid_ecx(0x80000008);
582 c->x86_max_cores = (ecx & 0xff) + 1;
584 /* CPU telling us the core id bits shift? */
585 bits = (ecx >> 12) & 0xF;
587 /* Otherwise recompute */
589 while ((1 << bits) < c->x86_max_cores)
593 c->x86_coreid_bits = bits;
598 #define ENABLE_C1E_MASK 0x18000000
599 #define CPUID_PROCESSOR_SIGNATURE 1
600 #define CPUID_XFAM 0x0ff00000
601 #define CPUID_XFAM_K8 0x00000000
602 #define CPUID_XFAM_10H 0x00100000
603 #define CPUID_XFAM_11H 0x00200000
604 #define CPUID_XMOD 0x000f0000
605 #define CPUID_XMOD_REV_F 0x00040000
607 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
608 static __cpuinit int amd_apic_timer_broken(void)
610 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
612 switch (eax & CPUID_XFAM) {
614 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
618 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
619 if (lo & ENABLE_C1E_MASK)
623 /* err on the side of caution */
629 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
631 early_init_amd_mc(c);
633 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
634 if (c->x86_power & (1<<8))
635 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
638 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
646 * Disable TLB flush filter by setting HWCR.FFDIS on K8
647 * bit 6 of msr C001_0015
649 * Errata 63 for SH-B3 steppings
650 * Errata 122 for all steppings (F+ have it disabled by default)
653 rdmsrl(MSR_K8_HWCR, value);
655 wrmsrl(MSR_K8_HWCR, value);
659 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
660 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
661 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
663 /* On C+ stepping K8 rep microcode works well for copy/memset */
664 level = cpuid_eax(1);
665 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
667 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
668 if (c->x86 == 0x10 || c->x86 == 0x11)
669 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
671 /* Enable workaround for FXSAVE leak */
673 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
675 level = get_model_name(c);
679 /* Should distinguish Models here, but this is only
680 a fallback anyways. */
681 strcpy(c->x86_model_id, "Hammer");
685 display_cacheinfo(c);
687 /* Multi core CPU? */
688 if (c->extended_cpuid_level >= 0x80000008)
691 if (c->extended_cpuid_level >= 0x80000006 &&
692 (cpuid_edx(0x80000006) & 0xf000))
693 num_cache_leaves = 4;
695 num_cache_leaves = 3;
697 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
698 set_cpu_cap(c, X86_FEATURE_K8);
700 /* MFENCE stops RDTSC speculation */
701 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
703 if (amd_apic_timer_broken())
704 disable_apic_timer = 1;
707 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
710 u32 eax, ebx, ecx, edx;
711 int index_msb, core_bits;
713 cpuid(1, &eax, &ebx, &ecx, &edx);
716 if (!cpu_has(c, X86_FEATURE_HT))
718 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
721 smp_num_siblings = (ebx & 0xff0000) >> 16;
723 if (smp_num_siblings == 1) {
724 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
725 } else if (smp_num_siblings > 1) {
727 if (smp_num_siblings > NR_CPUS) {
728 printk(KERN_WARNING "CPU: Unsupported number of "
729 "siblings %d", smp_num_siblings);
730 smp_num_siblings = 1;
734 index_msb = get_count_order(smp_num_siblings);
735 c->phys_proc_id = phys_pkg_id(index_msb);
737 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
739 index_msb = get_count_order(smp_num_siblings);
741 core_bits = get_count_order(c->x86_max_cores);
743 c->cpu_core_id = phys_pkg_id(index_msb) &
744 ((1 << core_bits) - 1);
747 if ((c->x86_max_cores * smp_num_siblings) > 1) {
748 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
750 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
758 * find out the number of processor cores on the die
760 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
764 if (c->cpuid_level < 4)
767 cpuid_count(4, 0, &eax, &t, &t, &t);
770 return ((eax >> 26) + 1);
775 static void srat_detect_node(void)
779 int cpu = smp_processor_id();
780 int apicid = hard_smp_processor_id();
782 /* Don't do the funky fallback heuristics the AMD version employs
784 node = apicid_to_node[apicid];
785 if (node == NUMA_NO_NODE)
786 node = first_node(node_online_map);
787 numa_set_node(cpu, node);
789 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
793 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
795 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
796 (c->x86 == 0x6 && c->x86_model >= 0x0e))
797 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
800 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
805 init_intel_cacheinfo(c);
806 if (c->cpuid_level > 9) {
807 unsigned eax = cpuid_eax(10);
808 /* Check for version and the number of counters */
809 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
810 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
815 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
817 set_cpu_cap(c, X86_FEATURE_BTS);
819 set_cpu_cap(c, X86_FEATURE_PEBS);
826 n = c->extended_cpuid_level;
827 if (n >= 0x80000008) {
828 unsigned eax = cpuid_eax(0x80000008);
829 c->x86_virt_bits = (eax >> 8) & 0xff;
830 c->x86_phys_bits = eax & 0xff;
831 /* CPUID workaround for Intel 0F34 CPU */
832 if (c->x86_vendor == X86_VENDOR_INTEL &&
833 c->x86 == 0xF && c->x86_model == 0x3 &&
835 c->x86_phys_bits = 36;
839 c->x86_cache_alignment = c->x86_clflush_size * 2;
840 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
841 (c->x86 == 0x6 && c->x86_model >= 0x0e))
842 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
844 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
845 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
846 c->x86_max_cores = intel_num_cpu_cores(c);
851 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
853 char *v = c->x86_vendor_id;
855 if (!strcmp(v, "AuthenticAMD"))
856 c->x86_vendor = X86_VENDOR_AMD;
857 else if (!strcmp(v, "GenuineIntel"))
858 c->x86_vendor = X86_VENDOR_INTEL;
860 c->x86_vendor = X86_VENDOR_UNKNOWN;
863 struct cpu_model_info {
866 char *model_names[16];
869 /* Do some early cpuid on the boot CPU to get some parameter that are
870 needed before check_bugs. Everything advanced is in identify_cpu
872 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
876 c->loops_per_jiffy = loops_per_jiffy;
877 c->x86_cache_size = -1;
878 c->x86_vendor = X86_VENDOR_UNKNOWN;
879 c->x86_model = c->x86_mask = 0; /* So far unknown... */
880 c->x86_vendor_id[0] = '\0'; /* Unset */
881 c->x86_model_id[0] = '\0'; /* Unset */
882 c->x86_clflush_size = 64;
883 c->x86_cache_alignment = c->x86_clflush_size;
884 c->x86_max_cores = 1;
885 c->x86_coreid_bits = 0;
886 c->extended_cpuid_level = 0;
887 memset(&c->x86_capability, 0, sizeof c->x86_capability);
889 /* Get vendor name */
890 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
891 (unsigned int *)&c->x86_vendor_id[0],
892 (unsigned int *)&c->x86_vendor_id[8],
893 (unsigned int *)&c->x86_vendor_id[4]);
897 /* Initialize the standard set of capabilities */
898 /* Note that the vendor-specific code below might override */
900 /* Intel-defined flags: level 0x00000001 */
901 if (c->cpuid_level >= 0x00000001) {
903 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
904 &c->x86_capability[0]);
905 c->x86 = (tfms >> 8) & 0xf;
906 c->x86_model = (tfms >> 4) & 0xf;
907 c->x86_mask = tfms & 0xf;
909 c->x86 += (tfms >> 20) & 0xff;
911 c->x86_model += ((tfms >> 16) & 0xF) << 4;
912 if (c->x86_capability[0] & (1<<19))
913 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
915 /* Have CPUID level 0 only - unheard of */
920 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
922 /* AMD-defined flags: level 0x80000001 */
923 xlvl = cpuid_eax(0x80000000);
924 c->extended_cpuid_level = xlvl;
925 if ((xlvl & 0xffff0000) == 0x80000000) {
926 if (xlvl >= 0x80000001) {
927 c->x86_capability[1] = cpuid_edx(0x80000001);
928 c->x86_capability[6] = cpuid_ecx(0x80000001);
930 if (xlvl >= 0x80000004)
931 get_model_name(c); /* Default name */
934 /* Transmeta-defined flags: level 0x80860001 */
935 xlvl = cpuid_eax(0x80860000);
936 if ((xlvl & 0xffff0000) == 0x80860000) {
937 /* Don't set x86_cpuid_level here for now to not confuse. */
938 if (xlvl >= 0x80860001)
939 c->x86_capability[2] = cpuid_edx(0x80860001);
942 c->extended_cpuid_level = cpuid_eax(0x80000000);
943 if (c->extended_cpuid_level >= 0x80000007)
944 c->x86_power = cpuid_edx(0x80000007);
946 switch (c->x86_vendor) {
955 * This does the hard work of actually picking apart the CPU stuff...
957 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
961 early_identify_cpu(c);
963 init_scattered_cpuid_features(c);
965 c->apicid = phys_pkg_id(0);
968 * Vendor-specific initialization. In this section we
969 * canonicalize the feature flags, meaning if there are
970 * features a certain CPU supports which CPUID doesn't
971 * tell us, CPUID claiming incorrect flags, or other bugs,
972 * we handle them here.
974 * At the end of this section, c->x86_capability better
975 * indicate the features this CPU genuinely supports!
977 switch (c->x86_vendor) {
982 case X86_VENDOR_INTEL:
986 case X86_VENDOR_UNKNOWN:
988 display_cacheinfo(c);
992 select_idle_routine(c);
996 * On SMP, boot_cpu_data holds the common feature set between
997 * all CPUs; so make sure that we indicate which features are
998 * common between the CPUs. The first time this routine gets
999 * executed, c == &boot_cpu_data.
1001 if (c != &boot_cpu_data) {
1002 /* AND the already accumulated flags with these */
1003 for (i = 0; i < NCAPINTS; i++)
1004 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1007 #ifdef CONFIG_X86_MCE
1010 if (c != &boot_cpu_data)
1013 numa_add_cpu(smp_processor_id());
1016 switch (c->x86_vendor) {
1017 case X86_VENDOR_AMD:
1020 case X86_VENDOR_INTEL:
1021 early_init_intel(c);
1026 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1028 if (c->x86_model_id[0])
1029 printk(KERN_INFO "%s", c->x86_model_id);
1031 if (c->x86_mask || c->cpuid_level >= 0)
1032 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1034 printk(KERN_CONT "\n");
1038 * Get CPU information for use by the procfs.
1041 static int show_cpuinfo(struct seq_file *m, void *v)
1043 struct cpuinfo_x86 *c = v;
1047 * These flag bits must match the definitions in <asm/cpufeature.h>.
1048 * NULL means this bit is undefined or reserved; either way it doesn't
1049 * have meaning as far as Linux is concerned. Note that it's important
1050 * to realize there is a difference between this table and CPUID -- if
1051 * applications want to get the raw CPUID data, they should access
1052 * /dev/cpu/<cpu_nr>/cpuid instead.
1054 static const char *const x86_cap_flags[] = {
1056 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1057 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1058 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1059 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1062 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1065 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1066 "3dnowext", "3dnow",
1068 /* Transmeta-defined */
1069 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1070 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1071 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1072 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1074 /* Other (Linux-defined) */
1075 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1076 NULL, NULL, NULL, NULL,
1077 "constant_tsc", "up", NULL, "arch_perfmon",
1078 "pebs", "bts", NULL, "sync_rdtsc",
1079 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1080 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1082 /* Intel-defined (#2) */
1083 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1084 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1085 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1086 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1088 /* VIA/Cyrix/Centaur-defined */
1089 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1090 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1091 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1092 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1094 /* AMD-defined (#2) */
1095 "lahf_lm", "cmp_legacy", "svm", "extapic",
1096 "cr8_legacy", "abm", "sse4a", "misalignsse",
1097 "3dnowprefetch", "osvw", "ibs", "sse5",
1098 "skinit", "wdt", NULL, NULL,
1099 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1100 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1102 /* Auxiliary (Linux-defined) */
1103 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1108 static const char *const x86_power_flags[] = {
1109 "ts", /* temperature sensor */
1110 "fid", /* frequency id control */
1111 "vid", /* voltage id control */
1112 "ttp", /* thermal trip */
1117 "", /* tsc invariant mapped to constant_tsc */
1126 seq_printf(m, "processor\t: %u\n"
1128 "cpu family\t: %d\n"
1130 "model name\t: %s\n",
1132 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1135 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1137 if (c->x86_mask || c->cpuid_level >= 0)
1138 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1140 seq_printf(m, "stepping\t: unknown\n");
1142 if (cpu_has(c, X86_FEATURE_TSC)) {
1143 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1147 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1148 freq / 1000, (freq % 1000));
1152 if (c->x86_cache_size >= 0)
1153 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1156 if (smp_num_siblings * c->x86_max_cores > 1) {
1157 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1158 seq_printf(m, "siblings\t: %d\n",
1159 cpus_weight(per_cpu(cpu_core_map, cpu)));
1160 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1161 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1167 "fpu_exception\t: yes\n"
1168 "cpuid level\t: %d\n"
1173 for (i = 0; i < 32*NCAPINTS; i++)
1174 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1175 seq_printf(m, " %s", x86_cap_flags[i]);
1177 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1178 c->loops_per_jiffy/(500000/HZ),
1179 (c->loops_per_jiffy/(5000/HZ)) % 100);
1181 if (c->x86_tlbsize > 0)
1182 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1183 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1184 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1186 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1187 c->x86_phys_bits, c->x86_virt_bits);
1189 seq_printf(m, "power management:");
1190 for (i = 0; i < 32; i++) {
1191 if (c->x86_power & (1 << i)) {
1192 if (i < ARRAY_SIZE(x86_power_flags) &&
1194 seq_printf(m, "%s%s",
1195 x86_power_flags[i][0]?" ":"",
1196 x86_power_flags[i]);
1198 seq_printf(m, " [%d]", i);
1202 seq_printf(m, "\n\n");
1207 static void *c_start(struct seq_file *m, loff_t *pos)
1209 if (*pos == 0) /* just in case, cpu 0 is not the first */
1210 *pos = first_cpu(cpu_online_map);
1211 if ((*pos) < NR_CPUS && cpu_online(*pos))
1212 return &cpu_data(*pos);
1216 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1218 *pos = next_cpu(*pos, cpu_online_map);
1219 return c_start(m, pos);
1222 static void c_stop(struct seq_file *m, void *v)
1226 struct seq_operations cpuinfo_op = {
1230 .show = show_cpuinfo,