1 /* ld script to make x86-64 Linux kernel
2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
5 #define LOAD_OFFSET __START_KERNEL_map
7 #include <asm-generic/vmlinux.lds.h>
8 #include <asm/asm-offsets.h>
9 #include <asm/page_types.h>
11 #undef i386 /* in case the preprocessor is a 32bit one */
13 OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
14 OUTPUT_ARCH(i386:x86-64)
15 ENTRY(phys_startup_64)
18 text PT_LOAD FLAGS(5); /* R_E */
19 data PT_LOAD FLAGS(7); /* RWE */
20 user PT_LOAD FLAGS(7); /* RWE */
21 data.init PT_LOAD FLAGS(7); /* RWE */
23 percpu PT_LOAD FLAGS(7); /* RWE */
25 data.init2 PT_LOAD FLAGS(7); /* RWE */
26 note PT_NOTE FLAGS(0); /* ___ */
31 phys_startup_64 = startup_64 - LOAD_OFFSET;
33 /* Text and read-only data */
34 .text : AT(ADDR(.text) - LOAD_OFFSET) {
36 /* First the code that has to be first for bootstrapping */
47 /* End of text section */
55 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
56 __start___ex_table = .;
58 __stop___ex_table = .;
63 /* Align data segment to page size boundary */
66 .data : AT(ADDR(.data) - LOAD_OFFSET) {
69 /* End of data section */
74 .data.cacheline_aligned :
75 AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
77 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
78 *(.data.cacheline_aligned)
81 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
82 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
86 #define VSYSCALL_ADDR (-10*1024*1024)
87 #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
88 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
89 #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
90 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
92 #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
93 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
95 #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
96 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
99 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
103 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
105 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
106 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
110 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
111 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
112 *(.vsyscall_gtod_data)
115 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
116 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
119 vsyscall_clock = VVIRT(.vsyscall_clock);
122 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
125 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
129 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
132 vgetcpu_mode = VVIRT(.vgetcpu_mode);
134 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
135 .jiffies : AT(VLOAD(.jiffies)) {
138 jiffies = VVIRT(.jiffies);
140 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
144 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
147 #undef VSYSCALL_PHYS_ADDR
148 #undef VSYSCALL_VIRT_ADDR
155 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
156 . = ALIGN(THREAD_SIZE);
160 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
161 . = ALIGN(PAGE_SIZE);
162 *(.data.page_aligned)
165 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
166 /* might get freed after init */
167 . = ALIGN(PAGE_SIZE);
172 . = ALIGN(PAGE_SIZE);
176 /* Init code and data */
177 . = ALIGN(PAGE_SIZE);
178 __init_begin = .; /* paired with __init_end */
179 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
185 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
186 __initdata_begin = .;
191 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
198 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
199 __initcall_start = .;
204 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
205 __con_initcall_start = .;
206 *(.con_initcall.init)
207 __con_initcall_end = .;
210 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
211 __x86_cpu_dev_start = .;
213 __x86_cpu_dev_end = .;
219 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
220 __parainstructions = .;
222 __parainstructions_end = .;
225 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
227 __alt_instructions = .;
229 __alt_instructions_end = .;
232 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
233 *(.altinstr_replacement)
237 * .exit.text is discard at runtime, not link time, to deal with
238 * references from .altinstructions and .eh_frame
240 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
244 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
248 #ifdef CONFIG_BLK_DEV_INITRD
249 . = ALIGN(PAGE_SIZE);
250 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
251 __initramfs_start = .;
259 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
260 * output PHDR, so the next output section - __data_nosave - should
261 * start another section data.init2. Also, pda should be at the head of
262 * percpu area. Preallocate it and define the percpu offset symbol
263 * so that it can be accessed as a percpu variable.
265 . = ALIGN(PAGE_SIZE);
266 PERCPU_VADDR(0, :percpu)
271 . = ALIGN(PAGE_SIZE);
274 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
275 . = ALIGN(PAGE_SIZE);
278 . = ALIGN(PAGE_SIZE);
281 /* use another section data.init2, see PERCPU_VADDR() above */
283 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
284 . = ALIGN(PAGE_SIZE);
285 __bss_start = .; /* BSS */
291 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
292 . = ALIGN(PAGE_SIZE);
294 . += 64 * 1024; /* 64k alignment slop space */
295 *(.brk_reservation) /* areas brk users have reserved */
301 /* Sections to be discarded */
313 * Per-cpu symbols which need to be offset from __per_cpu_load
314 * for the boot processor.
316 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
317 INIT_PER_CPU(gdt_page);
318 INIT_PER_CPU(irq_stack_union);
321 * Build-time check on the image size:
323 ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
324 "kernel image bigger than KERNEL_IMAGE_SIZE")
327 ASSERT((per_cpu__irq_stack_union == 0),
328 "irq_stack_union is not at start of per-cpu area");
332 #include <asm/kexec.h>
334 ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
335 "kexec control code size is too big")