Merge branches 'x86/numa-fixes', 'x86/apic', 'x86/apm', 'x86/bitops', 'x86/build...
authorIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 07:16:56 +0000 (09:16 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 07:16:56 +0000 (09:16 +0200)
21 files changed:
1  2  3  4  5  6  7  8  9  10  11  12  13  14  15  16  17  18  19  20  21  22  23  24 
arch/x86/Kconfig
arch/x86/kernel/Makefile
arch/x86/kernel/apic_32.c
arch/x86/kernel/apic_64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/setup_64.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/pageattr.c
arch/x86/xen/mmu.c
fs/proc/proc_misc.c
include/asm-x86/apic.h
include/asm-x86/nmi.h
include/asm-x86/page.h
include/asm-x86/pgtable.h

                                  over full virtualization.  However, when run without a hypervisor
                                  the kernel is theoretically slower and slightly larger.
                        
+ +++   +  ++ +  ++++ + config PARAVIRT_CLOCK
+ +++   +  ++ +  ++++ +         bool
+ +++   +  ++ +  ++++ +         default n
+ +++   +  ++ +  ++++ + 
                        endif
                        
---------------- -------config MEMTEST_BOOTPARAM
---------------- -------        bool "Memtest boot parameter"
++++++++++++++++ +++++++config MEMTEST
++++++++++++++++ +++++++        bool "Memtest"
                                depends on X86_64
                                default y
                                help
Simple merge
Simple merge
Simple merge
Simple merge
@@@@@@@@@@@@@@@@@@@@@@@@@ -2129,8 -2120,9 -2129,8 -2129,8 -2129,8 -2130,7 -2129,7 -2129,7 -2129,8 -2129,7 -2129,7 -2129,8 -2129,7 -2129,7 -2129,8 -2129,7 -2129,7 -2129,7 -2129,7 -2129,8 -2129,7 -2129,7 -2129,8 -2129,7 +2121,9 @@@@@@@@@@@@@@@@@@@@@@@@@ static inline void __init unlock_ExtINT
                        static inline void __init check_timer(void)
                        {
                                int apic1, pin1, apic2, pin2;
+ ++++++++++++++++++++++        int no_pin1 = 0;
                                int vector;
     +++ ++ ++ ++++ ++ +        unsigned int ver;
                                unsigned long flags;
                        
                                local_irq_save(flags);
                                set_intr_gate(vector, interrupt[0]);
                        
                                /*
- ----------------------         * Subtle, code in do_timer_interrupt() expects an AEOI
- ----------------------         * mode for the 8259A whenever interrupts are routed
- ----------------------         * through I/O APICs.  Also IRQ0 has to be enabled in
- ----------------------         * the 8259A which implies the virtual wire has to be
- ---   -  -  -    -  -          * disabled in the local APIC.  Finally timer interrupts
- ---   -  -  -    -  -          * need to be acknowledged manually in the 8259A for
- ---   -  -  -    -  -          * timer_interrupt() and for the i82489DX when using
- ---   -  -  -    -  -          * the NMI watchdog.
     --- -- -- ---- -- -         * disabled in the local APIC.
+ ++++++++++++++++++++++         * As IRQ0 is to be enabled in the 8259A, the virtual
+ ++++++++++++++++++++++         * wire has to be disabled in the local APIC.  Also
+ ++++++++++++++++++++++         * timer interrupts need to be acknowledged manually in
+ ++++++++++++++++++++++         * the 8259A for the i82489DX when using the NMI
+ ++++++++++++++++++++++         * watchdog as that APIC treats NMIs as level-triggered.
+ ++++++++++++++++++++++         * The AEOI mode will finish them in the 8259A
+ ++++++++++++++++++++++         * automatically.
                                 */
                                apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
                                init_8259A(1);
- ---   -  -  -    -  -         timer_ack = !cpu_has_tsc;
- ---   -  -  -    -  -         timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
- ---   -  -  -    -  -         if (timer_over_8254 > 0)
- ---   -  -  -    -  -                 enable_8259A_irq(0);
     --- -- -- ---- -- -        timer_ack = 1;
     --- -- -- ---- -- -        if (timer_over_8254 > 0)
     --- -- -- ---- -- -                enable_8259A_irq(0);
+ ++++++++++++++++++++++        timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
                        
                                pin1  = find_isa_irq_pin(0, mp_INT);
                                apic1 = find_isa_irq_apic(0, mp_INT);
Simple merge
@@@@@@@@@@@@@@@@@@@@@@@@@ -405,8 -407,11 -405,8 -405,8 -403,8 -407,11 -407,11 -407,11 -403,8 -408,11 -407,11 -403,8 -407,11 -407,11 -405,8 -407,11 -407,11 -407,11 -407,11 -405,8 -407,11 -407,11 -405,8 -407,11 +408,11 @@@@@@@@@@@@@@@@@@@@@@@@@ dma_alloc_coherent(struct device *dev, 
                                   larger than 16MB and in this case we have a chance of
                                   finding fitting memory in the next higher zone first. If
                                   not retry with true GFP_DMA. -AK */
- --          -    -  -         if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
    -   -  -                    if (dma_mask <= DMA_32BIT_MASK)
+ +++   +  +  +    +  +         if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
                                        gfp |= GFP_DMA32;
+ +++   +  +  +    +  +                 if (dma_mask < DMA_32BIT_MASK)
+ +++   +  +  +    +  +                         noretry = 1;
+ +++   +  +  +    +  +         }
                        #endif
                        
                         again:
                        #endif
                        }
                        
------- ----------------/*
------- ---------------- * find out the number of processor cores on the die
------- ---------------- */
------- ----------------static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
------- ----------------{
------- ----------------        unsigned int eax, t;
------- ----------------
------- ----------------        if (c->cpuid_level < 4)
------- ----------------                return 1;
------- ----------------
------- ----------------        cpuid_count(4, 0, &eax, &t, &t, &t);
------- ----------------
------- ----------------        if (eax & 0x1f)
------- ----------------                return ((eax >> 26) + 1);
------- ----------------        else
------- ----------------                return 1;
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit srat_detect_node(void)
------- ----------------{
------- ----------------#ifdef CONFIG_NUMA
------- ----------------        unsigned node;
------- ----------------        int cpu = smp_processor_id();
------- ----------------        int apicid = hard_smp_processor_id();
------- ----------------
------- ----------------        /* Don't do the funky fallback heuristics the AMD version employs
------- ----------------           for now. */
------- ----------------        node = apicid_to_node[apicid];
------- ----------------        if (node == NUMA_NO_NODE || !node_online(node))
------- ----------------                node = first_node(node_online_map);
------- ----------------        numa_set_node(cpu, node);
------- ----------------
------- ----------------        printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
------- ----------------#endif
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
------- ----------------{
------- ----------------        if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
------- ----------------            (c->x86 == 0x6 && c->x86_model >= 0x0e))
------- ----------------                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+++++++ ++++++++++++++++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
                        {
------- ----------------        /* Cache sizes */
------- ----------------        unsigned n;
------- ----------------
------- ----------------        init_intel_cacheinfo(c);
------- ----------------        if (c->cpuid_level > 9) {
------- ----------------                unsigned eax = cpuid_eax(10);
------- ----------------                /* Check for version and the number of counters */
------- ----------------                if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
------- ----------------                        set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
     -                          }
     -                  
     -                          if (cpu_has_ds) {
     -                                  unsigned int l1, l2;
     -                                  rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
     -                                  if (!(l1 & (1<<11)))
     -                                          set_cpu_cap(c, X86_FEATURE_BTS);
     -                                  if (!(l1 & (1<<12)))
     -                                          set_cpu_cap(c, X86_FEATURE_PEBS);
+++++++ ++++++++++++++++        char *v = c->x86_vendor_id;
+++++++ ++++++++++++++++        int i;
+++++++ ++++++++++++++++        static int printed;
+++++++ ++++++++++++++++
+++++++ ++++++++++++++++        for (i = 0; i < X86_VENDOR_NUM; i++) {
+++++++ ++++++++++++++++                if (cpu_devs[i]) {
+++++++ ++++++++++++++++                        if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
+++++++ ++++++++++++++++                            (cpu_devs[i]->c_ident[1] &&
+++++++ ++++++++++++++++                            !strcmp(v, cpu_devs[i]->c_ident[1]))) {
+++++++ ++++++++++++++++                                c->x86_vendor = i;
+++++++ ++++++++++++++++                                this_cpu = cpu_devs[i];
+++++++ ++++++++++++++++                                return;
+++++++ ++++++++++++++++                        }
+++++++ ++++++++++++++++                }
                                }
------- ----------------
----- - ----------------        if (cpu_has_ds) {
----- - ----------------                unsigned int l1, l2;
----- - ----------------                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
----- - ----------------                if (!(l1 & (1<<11)))
----- - ----------------                        set_cpu_cap(c, X86_FEATURE_BTS);
----- - ----------------                if (!(l1 & (1<<12)))
----- - ----------------                        set_cpu_cap(c, X86_FEATURE_PEBS);
     -                  
     -                          if (cpu_has_bts)
     -                                  ds_init_intel(c);
     -                  
     -                          n = c->extended_cpuid_level;
     -                          if (n >= 0x80000008) {
     -                                  unsigned eax = cpuid_eax(0x80000008);
     -                                  c->x86_virt_bits = (eax >> 8) & 0xff;
     -                                  c->x86_phys_bits = eax & 0xff;
     -                                  /* CPUID workaround for Intel 0F34 CPU */
     -                                  if (c->x86_vendor == X86_VENDOR_INTEL &&
     -                                      c->x86 == 0xF && c->x86_model == 0x3 &&
     -                                      c->x86_mask == 0x4)
     -                                          c->x86_phys_bits = 36;
+++++++ ++++++++++++++++        if (!printed) {
+++++++ ++++++++++++++++                printed++;
+++++++ ++++++++++++++++                printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+++++++ ++++++++++++++++                printk(KERN_ERR "CPU: Your system may be unstable.\n");
                                }
------- ----------------
----- - ----------------
----- - ----------------        if (cpu_has_bts)
----- - ----------------                ds_init_intel(c);
----- - ----------------
----- - ----------------        n = c->extended_cpuid_level;
----- - ----------------        if (n >= 0x80000008) {
----- - ----------------                unsigned eax = cpuid_eax(0x80000008);
----- - ----------------                c->x86_virt_bits = (eax >> 8) & 0xff;
----- - ----------------                c->x86_phys_bits = eax & 0xff;
----- - ----------------                /* CPUID workaround for Intel 0F34 CPU */
----- - ----------------                if (c->x86_vendor == X86_VENDOR_INTEL &&
----- - ----------------                    c->x86 == 0xF && c->x86_model == 0x3 &&
----- - ----------------                    c->x86_mask == 0x4)
----- - ----------------                        c->x86_phys_bits = 36;
----- - ----------------        }
----- - ----------------
------- ----------------        if (c->x86 == 15)
------- ----------------                c->x86_cache_alignment = c->x86_clflush_size * 2;
------- ----------------        if (c->x86 == 6)
------- ----------------                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
------- ----------------        set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
------- ----------------        c->x86_max_cores = intel_num_cpu_cores(c);
------- ----------------
------- ----------------        srat_detect_node();
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
------- ----------------{
------- ----------------        if (c->x86 == 0x6 && c->x86_model >= 0xf)
---- --  -- ------------                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
    -   -  -                            set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+++++++ ++++++++++++++++        c->x86_vendor = X86_VENDOR_UNKNOWN;
                        }
                        
------- ----------------static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+++++++ ++++++++++++++++static void __init early_cpu_support_print(void)
                        {
------- ----------------        /* Cache sizes */
------- ----------------        unsigned n;
------- ----------------
------- ----------------        n = c->extended_cpuid_level;
------- ----------------        if (n >= 0x80000008) {
------- ----------------                unsigned eax = cpuid_eax(0x80000008);
------- ----------------                c->x86_virt_bits = (eax >> 8) & 0xff;
------- ----------------                c->x86_phys_bits = eax & 0xff;
------- ----------------        }
------- ----------------
------- ----------------        if (c->x86 == 0x6 && c->x86_model >= 0xf) {
------- ----------------                c->x86_cache_alignment = c->x86_clflush_size * 2;
------- ----------------                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
------- ----------------                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+++++++ ++++++++++++++++        int i,j;
+++++++ ++++++++++++++++        struct cpu_dev *cpu_devx;
+++++++ ++++++++++++++++
+++++++ ++++++++++++++++        printk("KERNEL supported cpus:\n");
+++++++ ++++++++++++++++        for (i = 0; i < X86_VENDOR_NUM; i++) {
+++++++ ++++++++++++++++                cpu_devx = cpu_devs[i];
+++++++ ++++++++++++++++                if (!cpu_devx)
+++++++ ++++++++++++++++                        continue;
+++++++ ++++++++++++++++                for (j = 0; j < 2; j++) {
+++++++ ++++++++++++++++                        if (!cpu_devx->c_ident[j])
+++++++ ++++++++++++++++                                continue;
+++++++ ++++++++++++++++                        printk("  %s %s\n", cpu_devx->c_vendor,
+++++++ ++++++++++++++++                                cpu_devx->c_ident[j]);
+++++++ ++++++++++++++++                }
                                }
------- ----------------        set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
                        }
                        
------- ----------------static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+++++++ ++++++++++++++++static void __init early_cpu_init(void)
                        {
------- ----------------        char *v = c->x86_vendor_id;
+++++++ ++++++++++++++++        struct cpu_vendor_dev *cvdev;
                        
------- ----------------        if (!strcmp(v, "AuthenticAMD"))
------- ----------------                c->x86_vendor = X86_VENDOR_AMD;
------- ----------------        else if (!strcmp(v, "GenuineIntel"))
------- ----------------                c->x86_vendor = X86_VENDOR_INTEL;
------- ----------------        else if (!strcmp(v, "CentaurHauls"))
------- ----------------                c->x86_vendor = X86_VENDOR_CENTAUR;
------- ----------------        else
------- ----------------                c->x86_vendor = X86_VENDOR_UNKNOWN;
+++++++ ++++++++++++++++        for (cvdev = __x86cpuvendor_start ;
+++++++ ++++++++++++++++             cvdev < __x86cpuvendor_end   ;
+++++++ ++++++++++++++++             cvdev++)
+++++++ ++++++++++++++++                cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
+++++++ ++++++++++++++++        early_cpu_support_print();
                        }
                        
                        /* Do some early cpuid on the boot CPU to get some parameter that are
Simple merge
Simple merge
Simple merge
@@@@@@@@@@@@@@@@@@@@@@@@@ -525,8 -525,9 -525,8 -525,8 -525,8 -526,8 -532,9 -525,9 -525,8 -525,8 -525,8 -525,8 -525,8 -525,8 -525,8 -525,8 -526,8 -525,8 -525,8 -525,8 -525,8 -525,8 -525,8 -525,8 +534,9 @@@@@@@@@@@@@@@@@@@@@@@@@ static void __init early_memtest(unsign
                                                if (t_start + t_size > end)
                                                        t_size = end - t_start;
                        
- ---   -  -  -    -  -                         printk(KERN_CONT "\n  %016lx - %016lx pattern %d",
- ---   -  -  -    -  -                                 t_start, t_start + t_size, pattern);
+ +++   +  +  +    +  +                         printk(KERN_CONT "\n  %016llx - %016llx pattern %d",
     -   -- -- ---- -- -                                t_start, t_start + t_size, pattern);
+ ++++  ++++++++++++++++                                (unsigned long long)t_start,
+ ++++  ++++++++++++++++                                (unsigned long long)t_start + t_size, pattern);
                        
                                                memtest(t_start, t_size, pattern);
                        
Simple merge
Simple merge
                                        preempt_enable();
                        }
                        
- ---   -  -- -  ---- - pteval_t xen_pte_val(pte_t pte)
+++++++++++++++++++++++ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 +   +++ ++  + ++    +  {
- ---   -  -- -  ---- -         pteval_t ret = pte.pte;
+++++++++++++++++++++++         /* Just return the pte as-is.  We preserve the bits on commit */
+++++++++++++++++++++++         return *ptep;
+++++++++++++++++++++++ }
+++++++++++++++++++++++ 
+++++++++++++++++++++++ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+++++++++++++++++++++++                                  pte_t *ptep, pte_t pte)
+++++++++++++++++++++++ {
+++++++++++++++++++++++         struct mmu_update u;
+++++++++++++++++++++++ 
+++++++++++++++++++++++         xen_mc_batch();
 +   +++ ++  + ++    +  
- ---   -  -- -  ---- -         if (ret & _PAGE_PRESENT)
- ---   -  -- -  ---- -                 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
+++++++++++++++++++++++         u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
+++++++++++++++++++++++         u.val = pte_val_ma(pte);
+++++++++++++++++++++++         extend_mmu_update(&u);
 +   +++ ++  + ++    +  
- ---   -  -- -  ---- -         return ret;
+++++++++++++++++++++++         xen_mc_issue(PARAVIRT_LAZY_MMU);
 +   +++ ++  + ++    +  }
 +   +++ ++  + ++    +  
- ---   -  -- -  ---- - pgdval_t xen_pgd_val(pgd_t pgd)
+ +++   +  ++ +  ++++ + /* Assume pteval_t is equivalent to all the other *val_t types. */
+ +++   +  ++ +  ++++ + static pteval_t pte_mfn_to_pfn(pteval_t val)
                        {
- ---   -  -- -  ---- -         pgdval_t ret = pgd.pgd;
- ---   -  -- -  ---- -         if (ret & _PAGE_PRESENT)
- ---   -  -- -  ---- -                 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
- ---   -  -- -  ---- -         return ret;
+ +++   +  ++ +  ++++ +         if (val & _PAGE_PRESENT) {
+ +++   +  ++ +  ++++ +                 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
+ +++   +  ++ +  ++++ +                 pteval_t flags = val & ~PTE_MASK;
     -   --  - --    - -                val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+ ++++  ++++++++++++++++                val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+ +++   +  ++ +  ++++ +         }
+ +++   +  ++ +  ++++ + 
+ +++   +  ++ +  ++++ +         return val;
                        }
                        
- ---   -  -- -  ---- - pte_t xen_make_pte(pteval_t pte)
+ +++   +  ++ +  ++++ + static pteval_t pte_pfn_to_mfn(pteval_t val)
                        {
- ---   -  -- -  ---- -         if (pte & _PAGE_PRESENT) {
- ---   -  -- -  ---- -                 pte = phys_to_machine(XPADDR(pte)).maddr;
- ---   -  -- -  ---- -                 pte &= ~(_PAGE_PCD | _PAGE_PWT);
+ +++   +  ++ +  ++++ +         if (val & _PAGE_PRESENT) {
+ +++   +  ++ +  ++++ +                 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
+ +++   +  ++ +  ++++ +                 pteval_t flags = val & ~PTE_MASK;
     -   --  - --    - -                val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+ ++++  ++++++++++++++++                val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
                                }
                        
- ---   -  -- -  ---- -         return (pte_t){ .pte = pte };
+ +++   +  ++ +  ++++ +         return val;
                        }
                        
- ---   -  -- -  ---- - pgd_t xen_make_pgd(pgdval_t pgd)
+ +++   +  ++ +  ++++ + pteval_t xen_pte_val(pte_t pte)
                        {
- ---   -  -- -  ---- -         if (pgd & _PAGE_PRESENT)
- ---   -  -- -  ---- -                 pgd = phys_to_machine(XPADDR(pgd)).maddr;
+ +++   +  ++ +  ++++ +         return pte_mfn_to_pfn(pte.pte);
+ +++   +  ++ +  ++++ + }
                        
- ---   -  -- -  ---- -         return (pgd_t){ pgd };
+ +++   +  ++ +  ++++ + pgdval_t xen_pgd_val(pgd_t pgd)
+ +++   +  ++ +  ++++ + {
+ +++   +  ++ +  ++++ +         return pte_mfn_to_pfn(pgd.pgd);
+ +++   +  ++ +  ++++ + }
+ +++   +  ++ +  ++++ + 
+ +++   +  ++ +  ++++ + pte_t xen_make_pte(pteval_t pte)
+ +++   +  ++ +  ++++ + {
+ +++   +  ++ +  ++++ +         pte = pte_pfn_to_mfn(pte);
+ +++   +  ++ +  ++++ +         return native_make_pte(pte);
+ +++   +  ++ +  ++++ + }
+ +++   +  ++ +  ++++ + 
+ +++   +  ++ +  ++++ + pgd_t xen_make_pgd(pgdval_t pgd)
+ +++   +  ++ +  ++++ + {
+ +++   +  ++ +  ++++ +         pgd = pte_pfn_to_mfn(pgd);
+ +++   +  ++ +  ++++ +         return native_make_pgd(pgd);
                        }
                        
                        pmdval_t xen_pmd_val(pmd_t pmd)
                        {
- ---   -  -- -  ---- -         pmdval_t ret = native_pmd_val(pmd);
- ---   -  -- -  ---- -         if (ret & _PAGE_PRESENT)
- ---   -  -- -  ---- -                 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
- ---   -  -- -  ---- -         return ret;
+ +++   +  ++ +  ++++ +         return pte_mfn_to_pfn(pmd.pmd);
                        }
- ---   -  -- -  ---- - #ifdef CONFIG_X86_PAE
- ---   -  -- -  ---- - void xen_set_pud(pud_t *ptr, pud_t val)
+ +++   +  ++ +  ++++ + 
 -   --- --  - --    -  void xen_set_pud(pud_t *ptr, pud_t val)
+++++++++++++++++++++++ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
                        {
-----------------------         struct multicall_space mcs;
-----------------------         struct mmu_update *u;
+++++++++++++++++++++++         struct mmu_update u;
                        
                                preempt_disable();
                        
                                        xen_mc_batch();
                                }
                        
- ---   -  -- -  ---- - #ifdef CONFIG_X86_PAE
- ---   -  -- -  ---- -         level = MMUEXT_PIN_L3_TABLE;
- ---   -  -- -  ---- - #else
- ---   -  -- -  ---- -         level = MMUEXT_PIN_L2_TABLE;
- ---   -  -- -  ---- - #endif
+ +++   +  ++ +  ++++ +         xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
+ +++   +  ++ +  ++++ +         xen_mc_issue(0);
+ +++   +  ++ +  ++++ + }
+ +++   +  ++ +  ++++ + 
+++++++++++++++++++++++ /*
+++++++++++++++++++++++  * On save, we need to pin all pagetables to make sure they get their
+++++++++++++++++++++++  * mfns turned into pfns.  Search the list for any unpinned pgds and pin
+++++++++++++++++++++++  * them (unpinned pgds are not currently in use, probably because the
+++++++++++++++++++++++  * process is under construction or destruction).
+++++++++++++++++++++++  */
+++++++++++++++++++++++ void xen_mm_pin_all(void)
+++++++++++++++++++++++ {
+++++++++++++++++++++++         unsigned long flags;
+++++++++++++++++++++++         struct page *page;
 +   +++ ++  + ++    +  
- ---   -  -- -  ---- -         xen_do_pin(level, PFN_DOWN(__pa(pgd)));
+++++++++++++++++++++++         spin_lock_irqsave(&pgd_lock, flags);
 +   +++ ++  + ++    +  
- ---   -  -- -  ---- -         xen_mc_issue(0);
+++++++++++++++++++++++         list_for_each_entry(page, &pgd_list, lru) {
+++++++++++++++++++++++                 if (!PagePinned(page)) {
+++++++++++++++++++++++                         xen_pgd_pin((pgd_t *)page_address(page));
+++++++++++++++++++++++                         SetPageSavePinned(page);
+++++++++++++++++++++++                 }
+++++++++++++++++++++++         }
+++++++++++++++++++++++ 
+++++++++++++++++++++++         spin_unlock_irqrestore(&pgd_lock, flags);
 +   +++ ++  + ++    +  }
 +   +++ ++  + ++    +  
                        /* The init_mm pagetable is really pinned as soon as its created, but
                           that's before we have page structures to store the bits.  So do all
                           the book-keeping now. */
Simple merge
@@@@@@@@@@@@@@@@@@@@@@@@@ -36,14 -36,13 -36,14 -36,14 -36,14 -36,14 -36,14 -36,11 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 -36,14 +36,10 @@@@@@@@@@@@@@@@@@@@@@@@@ extern void generic_apic_probe(void)
                        #ifdef CONFIG_X86_LOCAL_APIC
                        
                        extern int apic_verbosity;
- ----------------------extern int timer_over_8254;
                        extern int local_apic_timer_c2_ok;
------- ----------------extern int local_apic_timer_disabled;
                        
------- ----------------extern int apic_runs_main_timer;
                        extern int ioapic_force;
                        extern int disable_apic;
------- ----------------extern int disable_apic_timer;
                        
                        /*
                         * Basic functions accessing APICs.
Simple merge
Simple merge
                        #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
                                                 _PAGE_DIRTY)
                        
    -   -  -            #define _PAGE_CHG_MASK  (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+   +   +  +            /* Set of bits not changed in pte_modify */
    +   +  +            #define _PAGE_CHG_MASK  (PTE_MASK | _PAGE_PCD | _PAGE_PWT |             \
    +   +  +                                     _PAGE_ACCESSED | _PAGE_DIRTY)
                        
                        #define _PAGE_CACHE_MASK        (_PAGE_PCD | _PAGE_PWT)
                        #define _PAGE_CACHE_WB          (0)
@@@@@@@@@@@@@@@@@@@@@@@@@ -295,16 -296,16 -296,16 -296,16 -294,7 -296,16 -296,16 -296,16 -294,7 -296,16 -296,16 -294,7 -296,16 -296,16 -296,16 -296,16 -296,16 -296,16 -291,16 -296,16 -296,16 -296,16 -296,16 -296,16 +291,16 @@@@@@@@@@@@@@@@@@@@@@@@@ static inline pte_t pte_modify(pte_t pt
                                return __pte(val);
                        }
                        
    -   -  -            #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
    +   +  +            /* mprotect needs to preserve PAT bits when updating vm_page_prot */
    +   +  +            #define pgprot_modify pgprot_modify
    +   +  +            static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
    +   +  +            {
    +   +  +                    pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
    +   +  +                    pgprotval_t addbits = pgprot_val(newprot);
    +   +  +                    return __pgprot(preservebits | addbits);
    +   +  +            }
    +   +  +            
-                       #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
 --- --- -- ----------- #define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK)
+++++++++++++++++++++++ #define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK)
                        
                        #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)