X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fasm-generic%2Fvmlinux.lds.h;h=89853bcd27a658ac0aef934c61ba723057933457;hb=27b1833279995e7c290a40cac4ef36ccea7e9283;hp=0240e0506a078582e6544aa7b2d4ab57ef711df2;hpb=cbe87121f1545bb3e98ae114519bf0c4db27d6ab;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 0240e05..89853bc 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -1,3 +1,5 @@ +#include + #ifndef LOAD_OFFSET #define LOAD_OFFSET 0 #endif @@ -9,10 +11,110 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG +#define DEV_KEEP(sec) *(.dev##sec) +#define DEV_DISCARD(sec) +#else +#define DEV_KEEP(sec) +#define DEV_DISCARD(sec) *(.dev##sec) +#endif + +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \ + *(__mcount_loc) \ + VMLINUX_SYMBOL(__stop_mcount_loc) = .; +#else +#define MCOUNT_REC() +#endif + +#ifdef CONFIG_TRACE_BRANCH_PROFILING +#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ + *(_ftrace_annotated_branch) \ + VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; +#else +#define LIKELY_PROFILE() +#endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ + *(_ftrace_branch) \ + VMLINUX_SYMBOL(__stop_branch_profile) = .; +#else +#define BRANCH_PROFILE() +#endif + +#ifdef CONFIG_EVENT_TRACER +#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ + *(_ftrace_events) \ + VMLINUX_SYMBOL(__stop_ftrace_events) = .; +#else +#define FTRACE_EVENTS() +#endif + +#ifdef CONFIG_TRACING +#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ + *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; +#else +#define TRACE_PRINTKS() +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ + *(__syscalls_metadata) \ + VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; +#else +#define TRACE_SYSCALLS() +#endif + /* .data section */ #define DATA_DATA \ *(.data) \ - *(.data.init.refok) + *(.ref.data) \ + DEV_KEEP(init.data) \ + DEV_KEEP(exit.data) \ + CPU_KEEP(init.data) \ + CPU_KEEP(exit.data) \ + MEM_KEEP(init.data) \ + MEM_KEEP(exit.data) \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___markers) = .; \ + *(__markers) \ + VMLINUX_SYMBOL(__stop___markers) = .; \ + . = ALIGN(32); \ + VMLINUX_SYMBOL(__start___tracepoints) = .; \ + *(__tracepoints) \ + VMLINUX_SYMBOL(__stop___tracepoints) = .; \ + /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___verbose) = .; \ + *(__verbose) \ + VMLINUX_SYMBOL(__stop___verbose) = .; \ + LIKELY_PROFILE() \ + BRANCH_PROFILE() \ + TRACE_PRINTKS() \ + FTRACE_EVENTS() \ + TRACE_SYSCALLS() #define RO_DATA(align) \ . = ALIGN((align)); \ @@ -20,12 +122,16 @@ VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ *(__vermagic) /* Kernel version magic */ \ + *(__markers_strings) /* Markers: strings */ \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ } \ \ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ *(.rodata1) \ } \ \ + BUG_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -43,6 +149,19 @@ VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ *(.pci_fixup_resume) \ VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ + *(.pci_fixup_resume_early) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ + *(.pci_fixup_suspend) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ + } \ + \ + /* Built-in firmware blobs */ \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_builtin_fw) = .; \ + *(.builtin_fw) \ + VMLINUX_SYMBOL(__end_builtin_fw) = .; \ } \ \ /* RapidIO route ops */ \ @@ -52,6 +171,8 @@ VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ } \ \ + TRACEDATA \ + \ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab) = .; \ @@ -127,14 +248,26 @@ *(__ksymtab_strings) \ } \ \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + MCOUNT_REC() \ + DEV_KEEP(init.rodata) \ + DEV_KEEP(exit.rodata) \ + CPU_KEEP(init.rodata) \ + CPU_KEEP(exit.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ *(__param) \ VMLINUX_SYMBOL(__stop___param) = .; \ + . = ALIGN((align)); \ VMLINUX_SYMBOL(__end_rodata) = .; \ } \ - \ . = ALIGN((align)); /* RODATA provided for backward compatibility. @@ -152,8 +285,17 @@ * during second ld run in second ld pass when generating System.map */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ + *(.text.hot) \ *(.text) \ - *(.text.init.refok) + *(.ref.text) \ + DEV_KEEP(init.text) \ + DEV_KEEP(exit.text) \ + CPU_KEEP(init.text) \ + CPU_KEEP(exit.text) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) \ + *(.text.unlikely) + /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ @@ -177,6 +319,50 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define IRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__irqentry_text_start) = .; \ + *(.irqentry.text) \ + VMLINUX_SYMBOL(__irqentry_text_end) = .; +#else +#define IRQENTRY_TEXT +#endif + +/* Section used for early init (in .S files) */ +#define HEAD_TEXT *(HEAD_TEXT_SECTION) + +/* init and exit section handling */ +#define INIT_DATA \ + *(.init.data) \ + DEV_DISCARD(init.data) \ + DEV_DISCARD(init.rodata) \ + CPU_DISCARD(init.data) \ + CPU_DISCARD(init.rodata) \ + MEM_DISCARD(init.data) \ + MEM_DISCARD(init.rodata) + +#define INIT_TEXT \ + *(.init.text) \ + DEV_DISCARD(init.text) \ + CPU_DISCARD(init.text) \ + MEM_DISCARD(init.text) + +#define EXIT_DATA \ + *(.exit.data) \ + DEV_DISCARD(exit.data) \ + DEV_DISCARD(exit.rodata) \ + CPU_DISCARD(exit.data) \ + CPU_DISCARD(exit.rodata) \ + MEM_DISCARD(exit.data) \ + MEM_DISCARD(exit.rodata) + +#define EXIT_TEXT \ + *(.exit.text) \ + DEV_DISCARD(exit.text) \ + CPU_DISCARD(exit.text) \ + MEM_DISCARD(exit.text) + /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ @@ -215,13 +401,29 @@ .stab.indexstr 0 : { *(.stab.indexstr) } \ .comment 0 : { *(.comment) } +#ifdef CONFIG_GENERIC_BUG #define BUG_TABLE \ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ - __start___bug_table = .; \ + VMLINUX_SYMBOL(__start___bug_table) = .; \ *(__bug_table) \ - __stop___bug_table = .; \ + VMLINUX_SYMBOL(__stop___bug_table) = .; \ + } +#else +#define BUG_TABLE +#endif + +#ifdef CONFIG_PM_TRACE +#define TRACEDATA \ + . = ALIGN(4); \ + .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__tracedata_start) = .; \ + *(.tracedata) \ + VMLINUX_SYMBOL(__tracedata_end) = .; \ } +#else +#define TRACEDATA +#endif #define NOTES \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ @@ -231,6 +433,8 @@ } #define INITCALLS \ + *(.initcallearly.init) \ + VMLINUX_SYMBOL(__early_initcall_end) = .; \ *(.initcall0.init) \ *(.initcall0s.init) \ *(.initcall1.init) \ @@ -249,11 +453,59 @@ *(.initcall7.init) \ *(.initcall7s.init) +/** + * PERCPU_VADDR - define output section for percpu area + * @vaddr: explicit base address (optional) + * @phdr: destination PHDR (optional) + * + * Macro which expands to output section for percpu area. If @vaddr + * is not blank, it specifies explicit base address and all percpu + * symbols will be offset from the given address. If blank, @vaddr + * always equals @laddr + LOAD_OFFSET. + * + * @phdr defines the output PHDR to use if not blank. Be warned that + * output PHDR is sticky. If @phdr is specified, the next output + * section in the linker script will go there too. @phdr should have + * a leading colon. + * + * Note that this macros defines __per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU(). + */ +#define PERCPU_VADDR(vaddr, phdr) \ + VMLINUX_SYMBOL(__per_cpu_load) = .; \ + .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ + *(.data.percpu.first) \ + *(.data.percpu.page_aligned) \ + *(.data.percpu) \ + *(.data.percpu.shared_aligned) \ + VMLINUX_SYMBOL(__per_cpu_end) = .; \ + } phdr \ + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); + +/** + * PERCPU - define output section for percpu area, simple version + * @align: required alignment + * + * Align to @align and outputs output section for percpu area. This + * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and + * __per_cpu_start will be identical. + * + * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except + * that __per_cpu_load is defined as a relative symbol against + * .data.percpu which is required for relocatable x86_32 + * configuration. + */ #define PERCPU(align) \ . = ALIGN(align); \ - __per_cpu_start = .; \ - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__per_cpu_load) = .; \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ + *(.data.percpu.first) \ + *(.data.percpu.page_aligned) \ *(.data.percpu) \ *(.data.percpu.shared_aligned) \ - } \ - __per_cpu_end = .; + VMLINUX_SYMBOL(__per_cpu_end) = .; \ + }