blob: ef3e4f1042b5688f522ae7482e34c8d04a894afd [file] [log] [blame]
Sam Ravnborg17ce2652009-04-29 09:47:18 +02001/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation and unification done by Sam Ravnborg <sam@ravnborg.org>
7 *
8 *
9 * Don't define absolute symbols until and unless you know that symbol
10 * value is should remain constant even if kernel image is relocated
11 * at run time. Absolute symbols are not relocated. If symbol value should
12 * change if kernel is relocated, make the symbol section relative and
13 * put it inside the section definition.
14 */
15
16#ifdef CONFIG_X86_32
17#define LOAD_OFFSET __PAGE_OFFSET
18#else
19#define LOAD_OFFSET __START_KERNEL_map
20#endif
21
22#include <asm-generic/vmlinux.lds.h>
23#include <asm/asm-offsets.h>
24#include <asm/thread_info.h>
25#include <asm/page_types.h>
26#include <asm/cache.h>
27#include <asm/boot.h>
28
29#undef i386 /* in case the preprocessor is a 32bit one */
30
31OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
32
33#ifdef CONFIG_X86_32
34OUTPUT_ARCH(i386)
35ENTRY(phys_startup_32)
36jiffies = jiffies_64;
37#else
38OUTPUT_ARCH(i386:x86-64)
39ENTRY(phys_startup_64)
40jiffies_64 = jiffies;
41#endif
42
Sam Ravnborgafb80952009-04-29 09:47:19 +020043PHDRS {
44 text PT_LOAD FLAGS(5); /* R_E */
45 data PT_LOAD FLAGS(7); /* RWE */
46#ifdef CONFIG_X86_64
47 user PT_LOAD FLAGS(7); /* RWE */
48 data.init PT_LOAD FLAGS(7); /* RWE */
49#ifdef CONFIG_SMP
50 percpu PT_LOAD FLAGS(7); /* RWE */
51#endif
52 data.init2 PT_LOAD FLAGS(7); /* RWE */
53#endif
54 note PT_NOTE FLAGS(0); /* ___ */
55}
Sam Ravnborg17ce2652009-04-29 09:47:18 +020056
Sam Ravnborg444e0ae2009-04-29 09:47:20 +020057SECTIONS
58{
59#ifdef CONFIG_X86_32
60 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
61 phys_startup_32 = startup_32 - LOAD_OFFSET;
62#else
63 . = __START_KERNEL;
64 phys_startup_64 = startup_64 - LOAD_OFFSET;
65#endif
66
Sam Ravnborgdfc208952009-04-29 09:47:21 +020067 /* Text and read-only data */
68
69 /* bootstrapping code */
70 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
71 _text = .;
72 *(.text.head)
73 } :text = 0x9090
74
75 /* The rest of the text */
76 .text : AT(ADDR(.text) - LOAD_OFFSET) {
77#ifdef CONFIG_X86_32
78 /* not really needed, already page aligned */
79 . = ALIGN(PAGE_SIZE);
80 *(.text.page_aligned)
81#endif
82 . = ALIGN(8);
83 _stext = .;
84 TEXT_TEXT
85 SCHED_TEXT
86 LOCK_TEXT
87 KPROBES_TEXT
88 IRQENTRY_TEXT
89 *(.fixup)
90 *(.gnu.warning)
91 /* End of text section */
92 _etext = .;
93 } :text = 0x9090
94
95 NOTES :text :note
96
Sam Ravnborg448bc3a2009-04-29 09:47:22 +020097 /* Exception table */
98 . = ALIGN(16);
99 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
100 __start___ex_table = .;
101 *(__ex_table)
102 __stop___ex_table = .;
103 } :text = 0x9090
104
105 RODATA
106
Sam Ravnborg1f6397b2009-04-29 09:47:23 +0200107 /* Data */
108 . = ALIGN(PAGE_SIZE);
109 .data : AT(ADDR(.data) - LOAD_OFFSET) {
110 DATA_DATA
111 CONSTRUCTORS
112
113#ifdef CONFIG_X86_64
114 /* End of data section */
115 _edata = .;
116#endif
117 } :data
118
119#ifdef CONFIG_X86_32
120 /* 32 bit has nosave before _edata */
121 . = ALIGN(PAGE_SIZE);
122 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
123 __nosave_begin = .;
124 *(.data.nosave)
125 . = ALIGN(PAGE_SIZE);
126 __nosave_end = .;
127 }
128#endif
129
130 . = ALIGN(PAGE_SIZE);
131 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
132 *(.data.page_aligned)
133 *(.data.idt)
134 }
135
136#ifdef CONFIG_X86_32
137 . = ALIGN(32);
138#else
139 . = ALIGN(PAGE_SIZE);
140 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
141#endif
142 .data.cacheline_aligned :
143 AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
144 *(.data.cacheline_aligned)
145 }
146
147 /* rarely changed data like cpu maps */
148#ifdef CONFIG_X86_32
149 . = ALIGN(32);
150#else
151 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
152#endif
153 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
154 *(.data.read_mostly)
155
156#ifdef CONFIG_X86_32
157 /* End of data section */
158 _edata = .;
159#endif
160 }
161
Sam Ravnborgff6f87e2009-04-29 09:47:24 +0200162#ifdef CONFIG_X86_64
163
164#define VSYSCALL_ADDR (-10*1024*1024)
165#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
166 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
167#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
168 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
169
170#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
171#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
172
173#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
174#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
175
176 . = VSYSCALL_ADDR;
177 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
178 *(.vsyscall_0)
179 } :user
180
181 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
182
183 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
184 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
185 *(.vsyscall_fn)
186 }
187
188 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
189 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
190 *(.vsyscall_gtod_data)
191 }
192
193 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
194 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
195 *(.vsyscall_clock)
196 }
197 vsyscall_clock = VVIRT(.vsyscall_clock);
198
199
200 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
201 *(.vsyscall_1)
202 }
203 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
204 *(.vsyscall_2)
205 }
206
207 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
208 *(.vgetcpu_mode)
209 }
210 vgetcpu_mode = VVIRT(.vgetcpu_mode);
211
212 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
213 .jiffies : AT(VLOAD(.jiffies)) {
214 *(.jiffies)
215 }
216 jiffies = VVIRT(.jiffies);
217
218 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
219 *(.vsyscall_3)
220 }
221
222 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
223
224#undef VSYSCALL_ADDR
225#undef VSYSCALL_PHYS_ADDR
226#undef VSYSCALL_VIRT_ADDR
227#undef VLOAD_OFFSET
228#undef VLOAD
229#undef VVIRT_OFFSET
230#undef VVIRT
231
232#endif /* CONFIG_X86_64 */
Sam Ravnborgdfc208952009-04-29 09:47:21 +0200233
Sam Ravnborge58bdaa2009-04-29 09:47:25 +0200234 /* init_task */
235 . = ALIGN(THREAD_SIZE);
236 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
237 *(.data.init_task)
238 }
239#ifdef CONFIG_X86_64
240 :data.init
241#endif
242
243 /*
244 * smp_locks might be freed after init
245 * start/end must be page aligned
246 */
247 . = ALIGN(PAGE_SIZE);
248 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
249 __smp_locks = .;
250 *(.smp_locks)
251 __smp_locks_end = .;
252 . = ALIGN(PAGE_SIZE);
253 }
254
255 /* Init code and data - will be freed after init */
256 . = ALIGN(PAGE_SIZE);
257 __init_begin = .; /* paired with __init_end */
258 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
259 _sinittext = .;
260 INIT_TEXT
261 _einittext = .;
262 }
263
264 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
265 INIT_DATA
266 }
267
268 . = ALIGN(16);
269 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
270 __setup_start = .;
271 *(.init.setup)
272 __setup_end = .;
273 }
274 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
275 __initcall_start = .;
276 INITCALLS
277 __initcall_end = .;
278 }
279
280 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
281 __con_initcall_start = .;
282 *(.con_initcall.init)
283 __con_initcall_end = .;
284 }
285
286 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
287 __x86_cpu_dev_start = .;
288 *(.x86_cpu_dev.init)
289 __x86_cpu_dev_end = .;
290 }
291
292 SECURITY_INIT
293
Sam Ravnborgae618362009-04-29 09:47:26 +0200294 . = ALIGN(8);
295 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
296 __parainstructions = .;
297 *(.parainstructions)
298 __parainstructions_end = .;
299 }
300
301 . = ALIGN(8);
302 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
303 __alt_instructions = .;
304 *(.altinstructions)
305 __alt_instructions_end = .;
306 }
307
308 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
309 *(.altinstr_replacement)
310 }
311
Sam Ravnborgbf6a5742009-04-29 09:47:27 +0200312 /*
313 * .exit.text is discard at runtime, not link time, to deal with
314 * references from .altinstructions and .eh_frame
315 */
316 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
317 EXIT_TEXT
318 }
319
320 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
321 EXIT_DATA
322 }
323
324#ifdef CONFIG_BLK_DEV_INITRD
325 . = ALIGN(PAGE_SIZE);
326 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
327 __initramfs_start = .;
328 *(.init.ramfs)
329 __initramfs_end = .;
330 }
331#endif
Sam Ravnborge58bdaa2009-04-29 09:47:25 +0200332
Sam Ravnborg9d16e782009-04-29 09:47:28 +0200333#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
334 /*
335 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
336 * output PHDR, so the next output section - __data_nosave - should
337 * start another section data.init2. Also, pda should be at the head of
338 * percpu area. Preallocate it and define the percpu offset symbol
339 * so that it can be accessed as a percpu variable.
340 */
341 . = ALIGN(PAGE_SIZE);
342 PERCPU_VADDR(0, :percpu)
343#else
344 PERCPU(PAGE_SIZE)
345#endif
346
347 . = ALIGN(PAGE_SIZE);
348 /* freed after init ends here */
349 __init_end = .;
350
351#ifdef CONFIG_X86_64
352 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
353 . = ALIGN(PAGE_SIZE);
354 __nosave_begin = .;
355 *(.data.nosave)
356 . = ALIGN(PAGE_SIZE);
357 __nosave_end = .;
358 } :data.init2
359 /* use another section data.init2, see PERCPU_VADDR() above */
360#endif
361
Sam Ravnborg091e52c2009-04-29 09:47:29 +0200362 /* BSS */
363 . = ALIGN(PAGE_SIZE);
364 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
365 __bss_start = .;
366 *(.bss.page_aligned)
367 *(.bss)
368 . = ALIGN(4);
369 __bss_stop = .;
370 }
Sam Ravnborg9d16e782009-04-29 09:47:28 +0200371
Sam Ravnborg091e52c2009-04-29 09:47:29 +0200372 . = ALIGN(PAGE_SIZE);
373 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
374 __brk_base = .;
375 . += 64 * 1024; /* 64k alignment slop space */
376 *(.brk_reservation) /* areas brk users have reserved */
377 __brk_limit = .;
378 }
379
380 .end : AT(ADDR(.end) - LOAD_OFFSET) {
381 _end = .;
382 }
383
384 /* Sections to be discarded */
385 /DISCARD/ : {
386 *(.exitcall.exit)
387 *(.eh_frame)
388 *(.discard)
389 }
Sam Ravnborg17ce2652009-04-29 09:47:18 +0200390
Sam Ravnborg444e0ae2009-04-29 09:47:20 +0200391 STABS_DEBUG
392 DWARF_DEBUG
393}
394
Sam Ravnborg17ce2652009-04-29 09:47:18 +0200395
396#ifdef CONFIG_X86_32
397ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
398 "kernel image bigger than KERNEL_IMAGE_SIZE")
399#else
400/*
401 * Per-cpu symbols which need to be offset from __per_cpu_load
402 * for the boot processor.
403 */
404#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
405INIT_PER_CPU(gdt_page);
406INIT_PER_CPU(irq_stack_union);
407
408/*
409 * Build-time check on the image size:
410 */
411ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
412 "kernel image bigger than KERNEL_IMAGE_SIZE")
413
414#ifdef CONFIG_SMP
415ASSERT((per_cpu__irq_stack_union == 0),
416 "irq_stack_union is not at start of per-cpu area");
417#endif
418
419#endif /* CONFIG_X86_32 */
420
421#ifdef CONFIG_KEXEC
422#include <asm/kexec.h>
423
424ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
425 "kexec control code size is too big")
426#endif
427