From: Ingo Molnar Date: Mon, 1 Jun 2009 19:06:21 +0000 (+0200) Subject: Merge branch 'linus' into irq/numa X-Git-Tag: v2.6.31-rc1~423^2~4 X-Git-Url: http://ftp.safe.ca/?a=commitdiff_plain;h=3d58f48ba05caed9118bce62b3047f8683438835;hp=-c;p=safe%2Fjmp%2Flinux-2.6 Merge branch 'linus' into irq/numa Conflicts: arch/mips/sibyte/bcm1480/irq.c arch/mips/sibyte/sb1250/irq.c Merge reason: we gathered a few conflicts plus update to latest upstream fixes. Signed-off-by: Ingo Molnar --- 3d58f48ba05caed9118bce62b3047f8683438835 diff --combined Documentation/kernel-parameters.txt index e455938,fd5cac0..11648c1 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@@ -1535,6 -1535,10 +1535,10 @@@ and is between 256 and 4096 characters register save and restore. The kernel will only save legacy floating-point registers on task switch. + noxsave [BUGS=X86] Disables x86 extended register state save + and restore using xsave. The kernel will fallback to + enabling legacy floating-point and sse state. + nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or wfi(ARM) instruction doesn't work correctly and not to use it. This is also useful when using JTAG debugger. @@@ -1571,9 -1575,6 +1575,9 @@@ noinitrd [RAM] Tells the kernel not to load any configured initial RAM disk. + nointremap [X86-64, Intel-IOMMU] Do not enable interrupt + remapping. + nointroute [IA-64] nojitter [IA64] Disables jitter checking for ITC timers. diff --combined arch/arm/common/gic.c index 90f6b7f,3e1714c..664c7b8 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@@ -109,7 -109,7 +109,7 @@@ static void gic_unmask_irq(unsigned in } #ifdef CONFIG_SMP -static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) +static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) { void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); unsigned int shift = (irq % 4) * 8; @@@ -122,8 -122,6 +122,8 @@@ val |= 1 << (cpu + shift); writel(val, reg); spin_unlock(&irq_controller_lock); + + return 0; } #endif @@@ -255,9 -253,9 +255,9 @@@ void __cpuinit gic_cpu_init(unsigned in } #ifdef CONFIG_SMP - void gic_raise_softirq(cpumask_t cpumask, unsigned int irq) + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { - unsigned long map = *cpus_addr(cpumask); + unsigned long map = *cpus_addr(*mask); /* this always happens on GIC0 */ writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); diff --combined arch/mips/sibyte/bcm1480/irq.c index 4f256a1,c147c4b..690de06 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c @@@ -50,7 -50,7 +50,7 @@@ static void enable_bcm1480_irq(unsigne static void disable_bcm1480_irq(unsigned int irq); static void ack_bcm1480_irq(unsigned int irq); #ifdef CONFIG_SMP -static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); +static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); #endif #ifdef CONFIG_PCI @@@ -109,17 -109,16 +109,16 @@@ void bcm1480_unmask_irq(int cpu, int ir } #ifdef CONFIG_SMP -static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) +static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on, k; u64 cur_ints; - struct irq_desc *desc = irq_desc + irq; unsigned long flags; unsigned int irq_dirty; if (cpumask_weight(mask) != 1) { printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return; + return -1; } i = cpumask_first(mask); @@@ -127,8 -126,7 +126,7 @@@ cpu = cpu_logical_map(i); /* Protect against other affinity changers and IMR manipulation */ - spin_lock_irqsave(&desc->lock, flags); - spin_lock(&bcm1480_imr_lock); + spin_lock_irqsave(&bcm1480_imr_lock, flags); /* Swizzle each CPU's IMR (but leave the IP selection alone) */ old_cpu = bcm1480_irq_owner[irq]; @@@ -153,10 -151,7 +151,9 @@@ ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); } } - spin_unlock(&bcm1480_imr_lock); - spin_unlock_irqrestore(&desc->lock, flags); + spin_unlock_irqrestore(&bcm1480_imr_lock, flags); + + return 0; } #endif diff --combined arch/mips/sibyte/sb1250/irq.c index e389507,38cb998..409dec7 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@@ -50,7 -50,7 +50,7 @@@ static void enable_sb1250_irq(unsigned static void disable_sb1250_irq(unsigned int irq); static void ack_sb1250_irq(unsigned int irq); #ifdef CONFIG_SMP -static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); +static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); #endif #ifdef CONFIG_SIBYTE_HAS_LDT @@@ -103,26 -103,24 +103,24 @@@ void sb1250_unmask_irq(int cpu, int irq } #ifdef CONFIG_SMP -static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) +static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on; u64 cur_ints; - struct irq_desc *desc = irq_desc + irq; unsigned long flags; i = cpumask_first(mask); if (cpumask_weight(mask) > 1) { printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return; + return -1; } /* Convert logical CPU to physical CPU */ cpu = cpu_logical_map(i); /* Protect against other affinity changers and IMR manipulation */ - spin_lock_irqsave(&desc->lock, flags); - spin_lock(&sb1250_imr_lock); + spin_lock_irqsave(&sb1250_imr_lock, flags); /* Swizzle each CPU's IMR (but leave the IP selection alone) */ old_cpu = sb1250_irq_owner[irq]; @@@ -144,10 -142,7 +142,9 @@@ ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); } - spin_unlock(&sb1250_imr_lock); - spin_unlock_irqrestore(&desc->lock, flags); + spin_unlock_irqrestore(&sb1250_imr_lock, flags); + + return 0; } #endif diff --combined arch/powerpc/sysdev/mpic.c index f4cbd15,0efc12d..352d8c3 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@@ -807,7 -807,7 +807,7 @@@ static void mpic_end_ipi(unsigned int i #endif /* CONFIG_SMP */ -void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) +int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); @@@ -824,8 -824,6 +824,8 @@@ mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), mpic_physmask(cpus_addr(tmp)[0])); } + + return 0; } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) @@@ -1059,13 -1057,6 +1059,6 @@@ struct mpic * __init mpic_alloc(struct memset(mpic, 0, sizeof(struct mpic)); mpic->name = name; - mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, - isu_size, &mpic_host_ops, - flags & MPIC_LARGE_VECTORS ? 2048 : 256); - if (mpic->irqhost == NULL) - return NULL; - - mpic->irqhost->host_data = mpic; mpic->hc_irq = mpic_irq_chip; mpic->hc_irq.typename = name; if (flags & MPIC_PRIMARY) @@@ -1215,6 -1206,15 +1208,15 @@@ mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; + mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + isu_size ? isu_size : mpic->num_sources, + &mpic_host_ops, + flags & MPIC_LARGE_VECTORS ? 2048 : 256); + if (mpic->irqhost == NULL) + return NULL; + + mpic->irqhost->host_data = mpic; + /* Display version */ switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1: diff --combined arch/x86/Kconfig index e03485b,a6efe0a..b1d3f60 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@@ -274,9 -274,15 +274,9 @@@ config SPARSE_IR If you don't know what to do here, say N. -config NUMA_MIGRATE_IRQ_DESC - bool "Move irq desc when changing irq smp_affinity" +config NUMA_IRQ_DESC + def_bool y depends on SPARSE_IRQ && NUMA - depends on BROKEN - default n - ---help--- - This enables moving irq_desc to cpu/node that irq will use handled. - - If you don't know what to do here, say N. config X86_MPPARSE bool "Enable MPS table" if ACPI @@@ -349,7 -355,7 +349,7 @@@ config X86_U depends on X86_64 depends on X86_EXTENDED_PLATFORM depends on NUMA - select X86_X2APIC + depends on X86_X2APIC ---help--- This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. @@@ -492,6 -498,19 +492,19 @@@ config PARAVIR over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. + config PARAVIRT_SPINLOCKS + bool "Paravirtualization layer for spinlocks" + depends on PARAVIRT && SMP && EXPERIMENTAL + ---help--- + Paravirtualized spinlocks allow a pvops backend to replace the + spinlock implementation with something virtualization-friendly + (for example, block the virtual CPU rather than spinning). + + Unfortunately the downside is an up to 5% performance hit on + native kernels, with various workloads. + + If you are unsure how to answer this question, answer N. + config PARAVIRT_CLOCK bool default n diff --combined arch/x86/kernel/Makefile index 16e3acf,88d1bfc..235f592 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@@ -28,7 -28,7 +28,7 @@@ CFLAGS_paravirt.o := $(nostackp obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o -obj-y += setup.o i8259.o irqinit_$(BITS).o +obj-y += setup.o i8259.o irqinit.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o @@@ -89,7 -89,8 +89,8 @@@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx. obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o obj-$(CONFIG_KVM_GUEST) += kvm.o obj-$(CONFIG_KVM_CLOCK) += kvmclock.o - obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o + obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o + obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o diff --combined arch/x86/kernel/apic/es7000_32.c index 8e07c14,3029477..69328ac --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@@ -145,7 -145,7 +145,7 @@@ es7000_rename_gsi(int ioapic, int gsi return gsi; } -static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) +static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) { unsigned long vect = 0, psaival = 0; @@@ -254,7 -254,7 +254,7 @@@ static int parse_unisys_oem(char *oempt } #ifdef CONFIG_ACPI - static int find_unisys_acpi_oem_table(unsigned long *oem_addr) + static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; struct es7000_oem_table *table; @@@ -285,7 -285,7 +285,7 @@@ return 0; } - static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) + static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) { if (!oem_addr) return; @@@ -306,7 -306,7 +306,7 @@@ static int es7000_check_dsdt(void static int es7000_acpi_ret; /* Hook from generic ACPI tables.c */ - static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) + static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { unsigned long oem_addr = 0; int check_dsdt; @@@ -717,7 -717,7 +717,7 @@@ struct apic apic_es7000_cluster = .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; - struct apic apic_es7000 = { + struct apic __refdata apic_es7000 = { .name = "es7000", .probe = probe_es7000, diff --combined arch/x86/kernel/cpu/common.c index 017c600,77848d9..c2fa56a --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@@ -114,6 -114,13 +114,13 @@@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_ } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); + static int __init x86_xsave_setup(char *s) + { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); + return 1; + } + __setup("noxsave", x86_xsave_setup); + #ifdef CONFIG_X86_32 static int cachesize_override __cpuinitdata = -1; static int disable_x86_serial_nr __cpuinitdata = 1; @@@ -761,12 -768,6 +768,12 @@@ static void __cpuinit identify_cpu(stru if (this_cpu->c_identify) this_cpu->c_identify(c); + /* Clear/Set all flags overriden by options, after probe */ + for (i = 0; i < NCAPINTS; i++) { + c->x86_capability[i] &= ~cpu_caps_cleared[i]; + c->x86_capability[i] |= cpu_caps_set[i]; + } + #ifdef CONFIG_X86_64 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif diff --combined drivers/pci/intel-iommu.c index 9ce8f07,a563fbe..cd38916 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@@ -59,6 -59,10 +59,10 @@@ #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) + #ifndef PHYSICAL_PAGE_MASK + #define PHYSICAL_PAGE_MASK PAGE_MASK + #endif + /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; @@@ -1216,7 -1220,7 +1220,7 @@@ static void dmar_init_reserved_ranges(v if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK; + addr &= PHYSICAL_PAGE_MASK; size = r->end - addr; size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), @@@ -1968,6 -1972,15 +1972,6 @@@ static int __init init_dmars(void } } -#ifdef CONFIG_INTR_REMAP - if (!intr_remapping_enabled) { - ret = enable_intr_remapping(0); - if (ret) - printk(KERN_ERR - "IOMMU: enable interrupt remapping failed\n"); - } -#endif - /* * For each rmrr * for each dev attached to rmrr @@@ -2164,7 -2177,8 +2168,8 @@@ static dma_addr_t __intel_map_single(st * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK, size, prot); + ((u64)paddr) & PHYSICAL_PAGE_MASK, + size, prot); if (ret) goto error; @@@ -2454,8 -2468,8 +2459,8 @@@ static int intel_map_sg(struct device * addr = page_to_phys(sg_page(sg)) + sg->offset; size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK, - size, prot); + ((u64)addr) & PHYSICAL_PAGE_MASK, + size, prot); if (ret) { /* clear the page */ dma_pte_clear_range(domain, start_addr,