blob: b3106c2a0373ad70103458b9f84645213cf6358d [file] [log] [blame]
Sam Ravnborg17ce2652009-04-29 09:47:18 +02001/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation and unification done by Sam Ravnborg <sam@ravnborg.org>
7 *
8 *
9 * Don't define absolute symbols until and unless you know that symbol
10 * value is should remain constant even if kernel image is relocated
11 * at run time. Absolute symbols are not relocated. If symbol value should
12 * change if kernel is relocated, make the symbol section relative and
13 * put it inside the section definition.
14 */
15
16#ifdef CONFIG_X86_32
17#define LOAD_OFFSET __PAGE_OFFSET
18#else
19#define LOAD_OFFSET __START_KERNEL_map
20#endif
21
22#include <asm-generic/vmlinux.lds.h>
23#include <asm/asm-offsets.h>
24#include <asm/thread_info.h>
25#include <asm/page_types.h>
26#include <asm/cache.h>
27#include <asm/boot.h>
28
29#undef i386 /* in case the preprocessor is a 32bit one */
30
31OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
32
33#ifdef CONFIG_X86_32
34OUTPUT_ARCH(i386)
35ENTRY(phys_startup_32)
36jiffies = jiffies_64;
37#else
38OUTPUT_ARCH(i386:x86-64)
39ENTRY(phys_startup_64)
40jiffies_64 = jiffies;
41#endif
42
Sam Ravnborgafb80952009-04-29 09:47:19 +020043PHDRS {
44 text PT_LOAD FLAGS(5); /* R_E */
45 data PT_LOAD FLAGS(7); /* RWE */
46#ifdef CONFIG_X86_64
47 user PT_LOAD FLAGS(7); /* RWE */
48 data.init PT_LOAD FLAGS(7); /* RWE */
49#ifdef CONFIG_SMP
50 percpu PT_LOAD FLAGS(7); /* RWE */
51#endif
52 data.init2 PT_LOAD FLAGS(7); /* RWE */
53#endif
54 note PT_NOTE FLAGS(0); /* ___ */
55}
Sam Ravnborg17ce2652009-04-29 09:47:18 +020056
Sam Ravnborg444e0ae2009-04-29 09:47:20 +020057SECTIONS
58{
59#ifdef CONFIG_X86_32
60 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
61 phys_startup_32 = startup_32 - LOAD_OFFSET;
62#else
63 . = __START_KERNEL;
64 phys_startup_64 = startup_64 - LOAD_OFFSET;
65#endif
66
Sam Ravnborgdfc208952009-04-29 09:47:21 +020067 /* Text and read-only data */
68
69 /* bootstrapping code */
70 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
71 _text = .;
72 *(.text.head)
73 } :text = 0x9090
74
75 /* The rest of the text */
76 .text : AT(ADDR(.text) - LOAD_OFFSET) {
77#ifdef CONFIG_X86_32
78 /* not really needed, already page aligned */
79 . = ALIGN(PAGE_SIZE);
80 *(.text.page_aligned)
81#endif
82 . = ALIGN(8);
83 _stext = .;
84 TEXT_TEXT
85 SCHED_TEXT
86 LOCK_TEXT
87 KPROBES_TEXT
88 IRQENTRY_TEXT
89 *(.fixup)
90 *(.gnu.warning)
91 /* End of text section */
92 _etext = .;
93 } :text = 0x9090
94
95 NOTES :text :note
96
Sam Ravnborg448bc3a2009-04-29 09:47:22 +020097 /* Exception table */
98 . = ALIGN(16);
99 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
100 __start___ex_table = .;
101 *(__ex_table)
102 __stop___ex_table = .;
103 } :text = 0x9090
104
105 RODATA
106
Sam Ravnborg1f6397b2009-04-29 09:47:23 +0200107 /* Data */
108 . = ALIGN(PAGE_SIZE);
109 .data : AT(ADDR(.data) - LOAD_OFFSET) {
110 DATA_DATA
111 CONSTRUCTORS
112
113#ifdef CONFIG_X86_64
114 /* End of data section */
115 _edata = .;
116#endif
117 } :data
118
119#ifdef CONFIG_X86_32
120 /* 32 bit has nosave before _edata */
121 . = ALIGN(PAGE_SIZE);
122 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
123 __nosave_begin = .;
124 *(.data.nosave)
125 . = ALIGN(PAGE_SIZE);
126 __nosave_end = .;
127 }
128#endif
129
130 . = ALIGN(PAGE_SIZE);
131 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
132 *(.data.page_aligned)
133 *(.data.idt)
134 }
135
136#ifdef CONFIG_X86_32
137 . = ALIGN(32);
138#else
139 . = ALIGN(PAGE_SIZE);
140 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
141#endif
142 .data.cacheline_aligned :
143 AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
144 *(.data.cacheline_aligned)
145 }
146
147 /* rarely changed data like cpu maps */
148#ifdef CONFIG_X86_32
149 . = ALIGN(32);
150#else
151 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
152#endif
153 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
154 *(.data.read_mostly)
155
156#ifdef CONFIG_X86_32
157 /* End of data section */
158 _edata = .;
159#endif
160 }
161
Sam Ravnborgff6f87e2009-04-29 09:47:24 +0200162#ifdef CONFIG_X86_64
163
164#define VSYSCALL_ADDR (-10*1024*1024)
165#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
166 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
167#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
168 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
169
170#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
171#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
172
173#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
174#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
175
176 . = VSYSCALL_ADDR;
177 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
178 *(.vsyscall_0)
179 } :user
180
181 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
182
183 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
184 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
185 *(.vsyscall_fn)
186 }
187
188 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
189 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
190 *(.vsyscall_gtod_data)
191 }
192
193 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
194 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
195 *(.vsyscall_clock)
196 }
197 vsyscall_clock = VVIRT(.vsyscall_clock);
198
199
200 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
201 *(.vsyscall_1)
202 }
203 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
204 *(.vsyscall_2)
205 }
206
207 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
208 *(.vgetcpu_mode)
209 }
210 vgetcpu_mode = VVIRT(.vgetcpu_mode);
211
212 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
213 .jiffies : AT(VLOAD(.jiffies)) {
214 *(.jiffies)
215 }
216 jiffies = VVIRT(.jiffies);
217
218 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
219 *(.vsyscall_3)
220 }
221
222 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
223
224#undef VSYSCALL_ADDR
225#undef VSYSCALL_PHYS_ADDR
226#undef VSYSCALL_VIRT_ADDR
227#undef VLOAD_OFFSET
228#undef VLOAD
229#undef VVIRT_OFFSET
230#undef VVIRT
231
232#endif /* CONFIG_X86_64 */
Sam Ravnborgdfc208952009-04-29 09:47:21 +0200233
Thomas Gleixnera60b7782007-10-11 11:12:11 +0200234#ifdef CONFIG_X86_32
235# include "vmlinux_32.lds.S"
236#else
237# include "vmlinux_64.lds.S"
Jean-Paul Saman67d38222007-02-10 01:44:44 -0800238#endif
Sam Ravnborg17ce2652009-04-29 09:47:18 +0200239
Sam Ravnborg444e0ae2009-04-29 09:47:20 +0200240 STABS_DEBUG
241 DWARF_DEBUG
242}
243
Sam Ravnborg17ce2652009-04-29 09:47:18 +0200244
245#ifdef CONFIG_X86_32
246ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
247 "kernel image bigger than KERNEL_IMAGE_SIZE")
248#else
249/*
250 * Per-cpu symbols which need to be offset from __per_cpu_load
251 * for the boot processor.
252 */
253#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
254INIT_PER_CPU(gdt_page);
255INIT_PER_CPU(irq_stack_union);
256
257/*
258 * Build-time check on the image size:
259 */
260ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
261 "kernel image bigger than KERNEL_IMAGE_SIZE")
262
263#ifdef CONFIG_SMP
264ASSERT((per_cpu__irq_stack_union == 0),
265 "irq_stack_union is not at start of per-cpu area");
266#endif
267
268#endif /* CONFIG_X86_32 */
269
270#ifdef CONFIG_KEXEC
271#include <asm/kexec.h>
272
273ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
274 "kexec control code size is too big")
275#endif
276