x86: Remove move_cleanup_count from irq_cfg
[safe/jmp/linux-2.6] / arch / x86 / kernel / head_64.S
index 32f5a11..780cd92 100644 (file)
 #include <linux/linkage.h>
 #include <linux/threads.h>
 #include <linux/init.h>
-#include <asm/desc.h>
 #include <asm/segment.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
 #include <asm/msr.h>
 #include <asm/cache.h>
 #include <asm/processor-flags.h>
+#include <asm/percpu.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
  *
  */
 
+#define pud_index(x)   (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+
+L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+L3_START_KERNEL = pud_index(__START_KERNEL_map)
+
        .text
-       .section .text.head
+       __HEAD
        .code64
        .globl startup_64
 startup_64:
@@ -77,8 +84,8 @@ startup_64:
        /* Fixup the physical addresses in the page table
         */
        addq    %rbp, init_level4_pgt + 0(%rip)
-       addq    %rbp, init_level4_pgt + (258*8)(%rip)
-       addq    %rbp, init_level4_pgt + (511*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
 
        addq    %rbp, level3_ident_pgt + 0(%rip)
 
@@ -103,7 +110,7 @@ startup_64:
        movq    %rdi, %rax
        shrq    $PMD_SHIFT, %rax
        andq    $(PTRS_PER_PMD - 1), %rax
-       leaq    __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
+       leaq    __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
        leaq    level2_spare_pgt(%rip), %rbx
        movq    %rdx, 0(%rbx, %rax, 8)
 ident_complete:
@@ -219,12 +226,15 @@ ENTRY(secondary_startup_64)
        movl %eax,%fs
        movl %eax,%gs
 
-       /* 
-        * Setup up a dummy PDA. this is just for some early bootup code
-        * that does in_interrupt() 
-        */ 
+       /* Set up %gs.
+        *
+        * The base of %gs always points to the bottom of the irqstack
+        * union.  If the stack protector canary is enabled, it is
+        * located at %gs:40.  Note that, on SMP, the boot cpu uses
+        * init data section till per cpu areas are set up.
+        */
        movl    $MSR_GS_BASE,%ecx
-       movq    $empty_zero_page,%rax
+       movq    initial_gs(%rip),%rax
        movq    %rax,%rdx
        shrq    $32,%rdx
        wrmsr   
@@ -250,6 +260,8 @@ ENTRY(secondary_startup_64)
        .align  8
        ENTRY(initial_code)
        .quad   x86_64_start_kernel
+       ENTRY(initial_gs)
+       .quad   INIT_PER_CPU_VAR(irq_stack_union)
        __FINITDATA
 
        ENTRY(stack_start)
@@ -298,7 +310,7 @@ ENTRY(early_idt_handler)
        call dump_stack
 #ifdef CONFIG_KALLSYMS 
        leaq early_idt_ripmsg(%rip),%rdi
-       movq 8(%rsp),%rsi       # get rip again
+       movq 0(%rsp),%rsi       # get rip again
        call __print_symbol
 #endif
 #endif /* EARLY_PRINTK */
@@ -316,8 +328,6 @@ early_idt_ripmsg:
 #endif /* CONFIG_EARLY_PRINTK */
        .previous
 
-.balign PAGE_SIZE
-
 #define NEXT_PAGE(name) \
        .balign PAGE_SIZE; \
 ENTRY(name)
@@ -338,9 +348,9 @@ ENTRY(name)
         */
 NEXT_PAGE(init_level4_pgt)
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-       .fill   257,8,0
+       .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-       .fill   252,8,0
+       .org    init_level4_pgt + L4_START_KERNEL*8, 0
        /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
        .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
 
@@ -349,7 +359,7 @@ NEXT_PAGE(level3_ident_pgt)
        .fill   511,8,0
 
 NEXT_PAGE(level3_kernel_pgt)
-       .fill   510,8,0
+       .fill   L3_START_KERNEL,8,0
        /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
        .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
        .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
@@ -367,7 +377,7 @@ NEXT_PAGE(level2_ident_pgt)
        /* Since I easily can, map the first 1G.
         * Don't set NX because code runs from these pages.
         */
-       PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
+       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 
 NEXT_PAGE(level2_kernel_pgt)
        /*
@@ -380,7 +390,7 @@ NEXT_PAGE(level2_kernel_pgt)
         *  If you want to increase this then increase MODULES_VADDR
         *  too.)
         */
-       PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
+       PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
                KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 NEXT_PAGE(level2_spare_pgt)
@@ -394,19 +404,21 @@ NEXT_PAGE(level2_spare_pgt)
        .globl early_gdt_descr
 early_gdt_descr:
        .word   GDT_ENTRIES*8-1
-       .quad   per_cpu__gdt_page
+early_gdt_descr_base:
+       .quad   INIT_PER_CPU_VAR(gdt_page)
 
 ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
        .quad   0x0000000000000000
 
+#include "../../x86/xen/xen-head.S"
        
        .section .bss, "aw", @nobits
        .align L1_CACHE_BYTES
 ENTRY(idt_table)
-       .skip 256 * 16
+       .skip IDT_ENTRIES * 16
 
-       .section .bss.page_aligned, "aw", @nobits
+       __PAGE_ALIGNED_BSS
        .align PAGE_SIZE
 ENTRY(empty_zero_page)
        .skip PAGE_SIZE