X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fasm-generic%2Fvmlinux.lds.h;h=5615440027ec545b069f6477e61d85273b4a674a;hb=dbcb0f19c877df9026b8c1227758d38bd561e9c4;hp=9d873163a7ab09dfe605e2a2a49c5176f1b347e9;hpb=735a7ffb739b6efeaeb1e720306ba308eaaeb20e;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9d87316..5615440 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -9,10 +9,15 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) -#define RODATA \ - . = ALIGN(4096); \ - __start_rodata = .; \ +/* .data section */ +#define DATA_DATA \ + *(.data) \ + *(.data.init.refok) + +#define RO_DATA(align) \ + . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ *(__vermagic) /* Kernel version magic */ \ } \ @@ -35,6 +40,9 @@ VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ *(.pci_fixup_enable) \ VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ + *(.pci_fixup_resume) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ } \ \ /* RapidIO route ops */ \ @@ -124,13 +132,14 @@ VMLINUX_SYMBOL(__start___param) = .; \ *(__param) \ VMLINUX_SYMBOL(__stop___param) = .; \ + VMLINUX_SYMBOL(__end_rodata) = .; \ } \ \ - /* Unwind data binary search table */ \ - EH_FRAME_HDR \ - \ - __end_rodata = .; \ - . = ALIGN(4096); + . = ALIGN((align)); + +/* RODATA provided for backward compatibility. + * All archs are supposed to use RO_DATA() */ +#define RODATA RO_DATA(4096) #define SECURITY_INIT \ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ @@ -139,6 +148,14 @@ VMLINUX_SYMBOL(__security_initcall_end) = .; \ } +/* .text section. Map to function alignment to avoid address changes + * during second ld run in second ld pass when generating System.map */ +#define TEXT_TEXT \ + ALIGN_FUNCTION(); \ + *(.text) \ + *(.text.init.refok) \ + *(.exit.text.refok) + /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ #define SCHED_TEXT \ @@ -161,18 +178,6 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; -#ifdef CONFIG_STACK_UNWIND - /* Unwind data binary search table */ -#define EH_FRAME_HDR \ - .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_unwind_hdr) = .; \ - *(.eh_frame_hdr) \ - VMLINUX_SYMBOL(__end_unwind_hdr) = .; \ - } -#else -#define EH_FRAME_HDR -#endif - /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ @@ -211,10 +216,24 @@ .stab.indexstr 0 : { *(.stab.indexstr) } \ .comment 0 : { *(.comment) } +#define BUG_TABLE \ + . = ALIGN(8); \ + __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ + __start___bug_table = .; \ + *(__bug_table) \ + __stop___bug_table = .; \ + } + #define NOTES \ - .notes : { *(.note.*) } :note + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_notes) = .; \ + *(.note.*) \ + VMLINUX_SYMBOL(__stop_notes) = .; \ + } #define INITCALLS \ + *(.initcall0.init) \ + *(.initcall0s.init) \ *(.initcall1.init) \ *(.initcall1s.init) \ *(.initcall2.init) \ @@ -225,8 +244,17 @@ *(.initcall4s.init) \ *(.initcall5.init) \ *(.initcall5s.init) \ + *(.initcallrootfs.init) \ *(.initcall6.init) \ *(.initcall6s.init) \ *(.initcall7.init) \ *(.initcall7s.init) +#define PERCPU(align) \ + . = ALIGN(align); \ + __per_cpu_start = .; \ + .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + *(.data.percpu) \ + *(.data.percpu.shared_aligned) \ + } \ + __per_cpu_end = .;