From: Ingo Molnar Date: Tue, 17 Feb 2009 11:07:00 +0000 (+0100) Subject: Merge branches 'x86/acpi', 'x86/apic', 'x86/cpudetect', 'x86/headers', 'x86/paravirt... X-Git-Tag: v2.6.30-rc3~72^2~31^2~2 X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=commitdiff_plain;h=494df596f9c315e20523894caa2a2938db3e5d8d Merge branches 'x86/acpi', 'x86/apic', 'x86/cpudetect', 'x86/headers', 'x86/paravirt', 'x86/urgent' and 'x86/xen'; commit 'v2.6.29-rc5' into x86/core --- 494df596f9c315e20523894caa2a2938db3e5d8d diff --cc Makefile index 681c1d2,22d7584,c06e250,77a006d,77a006d,7715b2c,77a006d..b280cfc --- a/Makefile +++ b/Makefile @@@@@@@@ -1,7 -1,7 -1,7 -1,7 -1,7 -1,7 -1,7 +1,7 @@@@@@@@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 29 - -- -EXTRAVERSION = -rc4 - EXTRAVERSION = -rc1 - EXTRAVERSION = -rc3 + +++++EXTRAVERSION = -rc5 NAME = Erotic Pickled Herring # *DOCUMENTATION* diff --cc arch/x86/kernel/cpu/common.c index 83492b1,cbcdb79,32093d0,e8f4a38,e8f4a38,83492b1,e8f4a38..4db150e --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@@@@@@@ -110,9 -121,9 -110,9 -122,10 -122,10 -110,9 -122,10 +122,10 @@@@@@@@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_ [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, - - - [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, - - - } }; + + + [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, +++ + GDT_STACK_CANARY_INIT #endif + + + } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); #ifdef CONFIG_X86_32 @@@@@@@@ -213,6 -224,49 -213,49 -226,49 -226,49 -213,6 -226,49 +226,49 @@@@@@@@ static inline void squash_the_stupid_se #endif /* + + * Some CPU features depend on higher CPUID levels, which may not always + + * be available due to CPUID level capping or broken virtualization + + * software. Add those features to this table to auto-disable them. + + */ + + struct cpuid_dependent_feature { + + u32 feature; + + u32 level; + + }; + + static const struct cpuid_dependent_feature __cpuinitconst + + cpuid_dependent_features[] = { + + { X86_FEATURE_MWAIT, 0x00000005 }, + + { X86_FEATURE_DCA, 0x00000009 }, + + { X86_FEATURE_XSAVE, 0x0000000d }, + + { 0, 0 } + + }; + + + + static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) + + { + + const struct cpuid_dependent_feature *df; + + for (df = cpuid_dependent_features; df->feature; df++) { + + /* + + * Note: cpuid_level is set to -1 if unavailable, but + + * extended_extended_level is set to 0 if unavailable + + * and the legitimate extended levels are all negative + + * when signed; hence the weird messing around with + + * signs here... + + */ + + if (cpu_has(c, df->feature) && - -- - ((s32)df->feature < 0 ? - -- - (u32)df->feature > (u32)c->extended_cpuid_level : - -- - (s32)df->feature > (s32)c->cpuid_level)) { ++ ++++ ((s32)df->level < 0 ? ++ ++++ (u32)df->level > (u32)c->extended_cpuid_level : ++ ++++ (s32)df->level > (s32)c->cpuid_level)) { + + clear_cpu_cap(c, df->feature); + + if (warn) + + printk(KERN_WARNING + + "CPU: CPU feature %s disabled " + + "due to lack of CPUID level 0x%x\n", + + x86_cap_flags[df->feature], + + df->level); + + } + + } - -- -} ++ ++++} + + + + /* * Naming convention should be: [()] * This table only is used unless init_() below doesn't set it; * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used @@@@@@@@ -242,9 -296,19 -285,9 -298,20 -298,20 -242,9 -298,20 +298,20 @@@@@@@@ static char __cpuinit *table_lookup_mod __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; + + + void load_percpu_segment(int cpu) + + + { + + + #ifdef CONFIG_X86_32 + + + loadsegment(fs, __KERNEL_PERCPU); + + + #else + + + loadsegment(gs, 0); + + + wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); + + + #endif +++ + load_stack_canary_segment(); + + + } + + + /* Current gdt points %fs at the "master" per-cpu area: after this, * it's on the real one. */ - - - void switch_to_new_gdt(void) + + + void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; @@@@@@@@ -395,13 -455,8 -438,13 -458,8 -458,8 -395,13 -458,8 +458,8 @@@@@@@@ void __cpuinit detect_ht(struct cpuinfo core_bits = get_count_order(c->x86_max_cores); - - - #ifdef CONFIG_X86_64 - - - c->cpu_core_id = phys_pkg_id(index_msb) & - ((1 << core_bits) - 1); - #else - c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & + + + c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); - - #else - - c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & - - ((1 << core_bits) - 1); - - - #endif } out: @@@@@@@@ -877,54 -934,26 -922,54 -937,22 -937,22 -877,54 -937,22 +937,22 @@@@@@@@ static __init int setup_disablecpuid(ch __setup("clearcpuid=", setup_disablecpuid); #ifdef CONFIG_X86_64 - - - struct x8664_pda **_cpu_pda __read_mostly; - - - EXPORT_SYMBOL(_cpu_pda); - - - struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; - - - static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; - - - - void __cpuinit pda_init(int cpu) - - { - - struct x8664_pda *pda = cpu_pda(cpu); + + + DEFINE_PER_CPU_FIRST(union irq_stack_union, + + + irq_stack_union) __aligned(PAGE_SIZE); - #ifdef CONFIG_SMP - DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */ - #else + + + DEFINE_PER_CPU(char *, irq_stack_ptr) = - per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; - #endif +++ + init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; - - /* Setup up data that may be needed in __get_free_pages early */ - - loadsegment(fs, 0); - - loadsegment(gs, 0); - - /* Memory clobbers used to order PDA accessed */ - - mb(); - - wrmsrl(MSR_GS_BASE, pda); - - mb(); - - - - pda->cpunumber = cpu; - - pda->irqcount = -1; - - pda->kernelstack = (unsigned long)stack_thread_info() - - - PDA_STACKOFFSET + THREAD_SIZE; - - pda->active_mm = &init_mm; - - pda->mmu_state = 0; - - - - if (cpu == 0) { - - /* others are initialized in smpboot.c */ - - pda->pcurrent = &init_task; - - pda->irqstackptr = boot_cpu_stack; - - pda->irqstackptr += IRQSTACKSIZE - 64; - - } else { - - if (!pda->irqstackptr) { - - pda->irqstackptr = (char *) - - __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); - - if (!pda->irqstackptr) - - panic("cannot allocate irqstack for cpu %d", - - cpu); - - pda->irqstackptr += IRQSTACKSIZE - 64; - - } - void __cpuinit pda_init(int cpu) - { - struct x8664_pda *pda = cpu_pda(cpu); - - /* Setup up data that may be needed in __get_free_pages early */ - loadsegment(fs, 0); - loadsegment(gs, 0); - /* Memory clobbers used to order PDA accessed */ - mb(); - wrmsrl(MSR_GS_BASE, pda); - mb(); - - pda->cpunumber = cpu; - pda->irqcount = -1; - pda->kernelstack = (unsigned long)stack_thread_info() - - PDA_STACKOFFSET + THREAD_SIZE; - pda->active_mm = &init_mm; - pda->mmu_state = 0; - - if (cpu == 0) { - /* others are initialized in smpboot.c */ - pda->pcurrent = &init_task; - pda->irqstackptr = boot_cpu_stack; - pda->irqstackptr += IRQSTACKSIZE - 64; - } else { - if (!pda->irqstackptr) { - pda->irqstackptr = (char *) - __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); - if (!pda->irqstackptr) - panic("cannot allocate irqstack for cpu %d", - cpu); - pda->irqstackptr += IRQSTACKSIZE - 64; - } + + + DEFINE_PER_CPU(unsigned long, kernel_stack) = + + + (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; + + + EXPORT_PER_CPU_SYMBOL(kernel_stack); - - - if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) - - - pda->nodenumber = cpu_to_node(cpu); - - - } - - - } + + + DEFINE_PER_CPU(unsigned int, irq_count) = -1; - - - static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + - - - DEBUG_STKSZ] __page_aligned_bss; + + + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + + + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) + + + __aligned(PAGE_SIZE); extern asmlinkage void ignore_sysret(void); @@@@@@@@ -957,9 -986,9 -1002,9 -985,13 -985,13 -957,9 -985,13 +985,13 @@@@@@@@ unsigned long kernel_eflags */ DEFINE_PER_CPU(struct orig_ist, orig_ist); --- - #else +++ + #else /* x86_64 */ + + -- /* Make sure %fs is initialized properly in idle threads */ +++ + #ifdef CONFIG_CC_STACKPROTECTOR +++ + DEFINE_PER_CPU(unsigned long, stack_canary); +++ + #endif ++ - - /* Make sure %fs is initialized properly in idle threads */ +++ + /* Make sure %fs and %gs are initialized properly in idle threads */ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); diff --cc arch/x86/kernel/traps.c index 98c2d055,0d032d2,98c2d055,bde57f0,bde57f0,a9e7548,bde57f0..acb8c05 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@@@@@@@ -906,16 -905,19 -906,16 -905,20 -905,20 -914,19 -905,20 +913,20 @@@@@@@@ void math_emulate(struct math_emu_info } #endif /* CONFIG_MATH_EMULATION */ - - dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) + + dotraplinkage void __kprobes - - do_device_not_available(struct pt_regs *regs, long error) +++ + do_device_not_available(struct pt_regs *regs, long error_code) { #ifdef CONFIG_X86_32 if (read_cr0() & X86_CR0_EM) { + + struct math_emu_info info = { }; + + - - conditional_sti(®s); + + conditional_sti(regs); - - math_emulate(0); + + - - info.regs = ®s; +++ + info.regs = regs; + + math_emulate(&info); } else { math_state_restore(); /* interrupts still off */ - - conditional_sti(®s); + + conditional_sti(regs); } #else math_state_restore();