x86: Remove move_cleanup_count from irq_cfg
[safe/jmp/linux-2.6] / arch / x86 / kernel / head_64.S
index b07ac7b..780cd92 100644 (file)
 #include <linux/linkage.h>
 #include <linux/threads.h>
 #include <linux/init.h>
-#include <asm/desc.h>
 #include <asm/segment.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
 #include <asm/msr.h>
 #include <asm/cache.h>
 #include <asm/processor-flags.h>
+#include <asm/percpu.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -40,7 +40,7 @@ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
        .text
-       .section .text.head
+       __HEAD
        .code64
        .globl startup_64
 startup_64:
@@ -110,7 +110,7 @@ startup_64:
        movq    %rdi, %rax
        shrq    $PMD_SHIFT, %rax
        andq    $(PTRS_PER_PMD - 1), %rax
-       leaq    __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
+       leaq    __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
        leaq    level2_spare_pgt(%rip), %rbx
        movq    %rdx, 0(%rbx, %rax, 8)
 ident_complete:
@@ -226,12 +226,15 @@ ENTRY(secondary_startup_64)
        movl %eax,%fs
        movl %eax,%gs
 
-       /* 
-        * Setup up a dummy PDA. this is just for some early bootup code
-        * that does in_interrupt() 
-        */ 
+       /* Set up %gs.
+        *
+        * The base of %gs always points to the bottom of the irqstack
+        * union.  If the stack protector canary is enabled, it is
+        * located at %gs:40.  Note that, on SMP, the boot cpu uses
+        * init data section till per cpu areas are set up.
+        */
        movl    $MSR_GS_BASE,%ecx
-       movq    $empty_zero_page,%rax
+       movq    initial_gs(%rip),%rax
        movq    %rax,%rdx
        shrq    $32,%rdx
        wrmsr   
@@ -257,6 +260,8 @@ ENTRY(secondary_startup_64)
        .align  8
        ENTRY(initial_code)
        .quad   x86_64_start_kernel
+       ENTRY(initial_gs)
+       .quad   INIT_PER_CPU_VAR(irq_stack_union)
        __FINITDATA
 
        ENTRY(stack_start)
@@ -305,7 +310,7 @@ ENTRY(early_idt_handler)
        call dump_stack
 #ifdef CONFIG_KALLSYMS 
        leaq early_idt_ripmsg(%rip),%rdi
-       movq 8(%rsp),%rsi       # get rip again
+       movq 0(%rsp),%rsi       # get rip again
        call __print_symbol
 #endif
 #endif /* EARLY_PRINTK */
@@ -323,8 +328,6 @@ early_idt_ripmsg:
 #endif /* CONFIG_EARLY_PRINTK */
        .previous
 
-.balign PAGE_SIZE
-
 #define NEXT_PAGE(name) \
        .balign PAGE_SIZE; \
 ENTRY(name)
@@ -374,7 +377,7 @@ NEXT_PAGE(level2_ident_pgt)
        /* Since I easily can, map the first 1G.
         * Don't set NX because code runs from these pages.
         */
-       PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
+       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 
 NEXT_PAGE(level2_kernel_pgt)
        /*
@@ -401,19 +404,21 @@ NEXT_PAGE(level2_spare_pgt)
        .globl early_gdt_descr
 early_gdt_descr:
        .word   GDT_ENTRIES*8-1
-       .quad   per_cpu__gdt_page
+early_gdt_descr_base:
+       .quad   INIT_PER_CPU_VAR(gdt_page)
 
 ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
        .quad   0x0000000000000000
 
+#include "../../x86/xen/xen-head.S"
        
        .section .bss, "aw", @nobits
        .align L1_CACHE_BYTES
 ENTRY(idt_table)
-       .skip 256 * 16
+       .skip IDT_ENTRIES * 16
 
-       .section .bss.page_aligned, "aw", @nobits
+       __PAGE_ALIGNED_BSS
        .align PAGE_SIZE
 ENTRY(empty_zero_page)
        .skip PAGE_SIZE