blob: 10e12539000ebad641207ee49daf7e9ad5b8cad3 [file] [log] [blame]
Bryan Wu1394f032007-05-06 14:50:22 -07001/*
Robin Getz96f10502009-09-24 14:11:24 +00002 * Copyright 2004-2009 Analog Devices Inc.
Bryan Wu1394f032007-05-06 14:50:22 -07003 *
Robin Getz96f10502009-09-24 14:11:24 +00004 * Licensed under the GPL-2 or later
Bryan Wu1394f032007-05-06 14:50:22 -07005 */
6
7#define VMLINUX_SYMBOL(_sym_) _##_sym_
8
9#include <asm-generic/vmlinux.lds.h>
10#include <asm/mem_map.h>
Mike Frysinger520473b2007-07-12 12:20:20 +080011#include <asm/page.h>
Bernd Schmidt0fa63ad2007-07-25 10:19:59 +080012#include <asm/thread_info.h>
Bryan Wu1394f032007-05-06 14:50:22 -070013
Bryan Wu1394f032007-05-06 14:50:22 -070014OUTPUT_FORMAT("elf32-bfin")
15ENTRY(__start)
16_jiffies = _jiffies_64;
17
Bryan Wu1394f032007-05-06 14:50:22 -070018SECTIONS
19{
20 . = CONFIG_BOOT_LOAD;
Mike Frysingerb7627ac2008-02-02 15:53:17 +080021 /* Neither the text, ro_data or bss section need to be aligned
22 * So pack them back to back
23 */
Bryan Wu1394f032007-05-06 14:50:22 -070024 .text :
25 {
Mike Frysingerde6a9522007-06-11 17:27:05 +080026 __text = .;
27 _text = .;
28 __stext = .;
Sam Ravnborg76647092007-05-13 00:31:33 +020029 TEXT_TEXT
Robin Getzb8d0c772009-03-31 13:40:52 +000030#ifndef CONFIG_SCHEDULE_L1
Bryan Wu1394f032007-05-06 14:50:22 -070031 SCHED_TEXT
Robin Getzb8d0c772009-03-31 13:40:52 +000032#endif
Mike Frysingerde6a9522007-06-11 17:27:05 +080033 LOCK_TEXT
Mike Frysinger1ee76d72009-06-10 04:45:29 -040034 IRQENTRY_TEXT
Mike Frysinger27d875f2007-08-27 16:08:53 +080035 KPROBES_TEXT
36 *(.text.*)
Bryan Wu1394f032007-05-06 14:50:22 -070037 *(.fixup)
Bryan Wu1394f032007-05-06 14:50:22 -070038
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +080039#if !L1_CODE_LENGTH
40 *(.l1.text)
41#endif
42
Mike Frysingerde6a9522007-06-11 17:27:05 +080043 . = ALIGN(16);
44 ___start___ex_table = .;
45 *(__ex_table)
46 ___stop___ex_table = .;
47
Bryan Wu1394f032007-05-06 14:50:22 -070048 __etext = .;
Mike Frysingerde6a9522007-06-11 17:27:05 +080049 }
Bryan Wu1394f032007-05-06 14:50:22 -070050
Bernd Schmidt6f985292009-01-07 23:14:39 +080051 NOTES
52
Mike Frysingerb7627ac2008-02-02 15:53:17 +080053 /* Just in case the first read only is a 32-bit access */
54 RO_DATA(4)
55
56 .bss :
57 {
58 . = ALIGN(4);
59 ___bss_start = .;
60 *(.bss .bss.*)
61 *(COMMON)
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +080062#if !L1_DATA_A_LENGTH
63 *(.l1.bss)
64#endif
65#if !L1_DATA_B_LENGTH
66 *(.l1.bss.B)
67#endif
Mike Frysinger13752042008-08-06 17:10:57 +080068 . = ALIGN(4);
Mike Frysingerb7627ac2008-02-02 15:53:17 +080069 ___bss_stop = .;
70 }
Mike Frysingerde6a9522007-06-11 17:27:05 +080071
72 .data :
73 {
74 __sdata = .;
Mike Frysingerb7627ac2008-02-02 15:53:17 +080075 /* This gets done first, so the glob doesn't suck it in */
Tim Abbott4a5e3512009-09-24 10:36:23 -040076 CACHELINE_ALIGNED_DATA(32)
Mike Frysingerde6a9522007-06-11 17:27:05 +080077
Sonic Zhangb85b82d2008-04-24 06:13:37 +080078#if !L1_DATA_A_LENGTH
79 . = ALIGN(32);
80 *(.data_l1.cacheline_aligned)
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +080081 *(.l1.data)
82#endif
83#if !L1_DATA_B_LENGTH
84 *(.l1.data.B)
Sonic Zhangb85b82d2008-04-24 06:13:37 +080085#endif
Mike Frysinger07aa7be2008-08-13 16:16:11 +080086#if !L2_LENGTH
Sonic Zhang262c3822008-07-19 15:42:41 +080087 . = ALIGN(32);
88 *(.data_l2.cacheline_aligned)
89 *(.l2.data)
90#endif
Sonic Zhangb85b82d2008-04-24 06:13:37 +080091
Mike Frysinger27d875f2007-08-27 16:08:53 +080092 DATA_DATA
Mike Frysinger27d875f2007-08-27 16:08:53 +080093 CONSTRUCTORS
94
Tim Abbott4a5e3512009-09-24 10:36:23 -040095 INIT_TASK_DATA(THREAD_SIZE)
Mike Frysingerb7627ac2008-02-02 15:53:17 +080096
Mike Frysingerde6a9522007-06-11 17:27:05 +080097 __edata = .;
98 }
99
Mike Frysingerb7627ac2008-02-02 15:53:17 +0800100 /* The init section should be last, so when we free it, it goes into
101 * the general memory pool, and (hopefully) will decrease fragmentation
102 * a tiny bit. The init section has a _requirement_ that it be
103 * PAGE_SIZE aligned
104 */
105 . = ALIGN(PAGE_SIZE);
Mike Frysingerde6a9522007-06-11 17:27:05 +0800106 ___init_begin = .;
Mike Frysinger27d875f2007-08-27 16:08:53 +0800107
Tim Abbott4a5e3512009-09-24 10:36:23 -0400108 INIT_TEXT_SECTION(PAGE_SIZE)
109 . = ALIGN(16);
110 INIT_DATA_SECTION(16)
Graf Yang46fa5ee2009-01-07 23:14:39 +0800111 PERCPU(4)
Mike Frysinger70f12562009-06-07 17:18:25 -0400112
113 /* we have to discard exit text and such at runtime, not link time, to
114 * handle embedded cross-section references (alt instructions, bug
115 * table, eh_frame, etc...)
116 */
117 .exit.text :
118 {
119 EXIT_TEXT
120 }
121 .exit.data :
122 {
123 EXIT_DATA
124 }
125
Mike Frysingerde6a9522007-06-11 17:27:05 +0800126 __l1_lma_start = .;
Bryan Wu1394f032007-05-06 14:50:22 -0700127
Tim Abbott4a5e3512009-09-24 10:36:23 -0400128 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
Bryan Wu1394f032007-05-06 14:50:22 -0700129 {
130 . = ALIGN(4);
Mike Frysingerde6a9522007-06-11 17:27:05 +0800131 __stext_l1 = .;
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +0800132 *(.l1.text)
Robin Getzb8d0c772009-03-31 13:40:52 +0000133#ifdef CONFIG_SCHEDULE_L1
134 SCHED_TEXT
135#endif
Bryan Wu1394f032007-05-06 14:50:22 -0700136 . = ALIGN(4);
Mike Frysingerde6a9522007-06-11 17:27:05 +0800137 __etext_l1 = .;
138 }
Mike Frysinger4636b302009-03-29 00:47:31 +0800139 ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
Bryan Wu1394f032007-05-06 14:50:22 -0700140
Mike Frysingerde6a9522007-06-11 17:27:05 +0800141 .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
Bryan Wu1394f032007-05-06 14:50:22 -0700142 {
143 . = ALIGN(4);
Mike Frysingerde6a9522007-06-11 17:27:05 +0800144 __sdata_l1 = .;
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +0800145 *(.l1.data)
Mike Frysingerde6a9522007-06-11 17:27:05 +0800146 __edata_l1 = .;
Bryan Wu1394f032007-05-06 14:50:22 -0700147
Bryan Wu1394f032007-05-06 14:50:22 -0700148 . = ALIGN(32);
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +0800149 *(.data_l1.cacheline_aligned)
Bryan Wu1394f032007-05-06 14:50:22 -0700150
151 . = ALIGN(4);
Sonic Zhang262c3822008-07-19 15:42:41 +0800152 __sbss_l1 = .;
153 *(.l1.bss)
154 . = ALIGN(4);
Mike Frysingerde6a9522007-06-11 17:27:05 +0800155 __ebss_l1 = .;
156 }
Ingo Molnar8d7ac692009-08-18 16:45:25 +0000157 ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
Mike Frysingerde6a9522007-06-11 17:27:05 +0800158
159 .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
Bryan Wu1394f032007-05-06 14:50:22 -0700160 {
161 . = ALIGN(4);
162 __sdata_b_l1 = .;
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +0800163 *(.l1.data.B)
Bryan Wu1394f032007-05-06 14:50:22 -0700164 __edata_b_l1 = .;
165
166 . = ALIGN(4);
167 __sbss_b_l1 = .;
Mike Frysingerbc6e0fa2008-04-24 06:21:25 +0800168 *(.l1.bss.B)
Bryan Wu1394f032007-05-06 14:50:22 -0700169 . = ALIGN(4);
170 __ebss_b_l1 = .;
Mike Frysingerde6a9522007-06-11 17:27:05 +0800171 }
Mike Frysinger4636b302009-03-29 00:47:31 +0800172 ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
Bryan Wu1394f032007-05-06 14:50:22 -0700173
Bernd Schmidt6f985292009-01-07 23:14:39 +0800174 __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
Sonic Zhang262c3822008-07-19 15:42:41 +0800175
176 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
177 {
178 . = ALIGN(4);
179 __stext_l2 = .;
Mike Frysinger07aa7be2008-08-13 16:16:11 +0800180 *(.l2.text)
Sonic Zhang262c3822008-07-19 15:42:41 +0800181 . = ALIGN(4);
182 __etext_l2 = .;
183
184 . = ALIGN(4);
185 __sdata_l2 = .;
Mike Frysinger07aa7be2008-08-13 16:16:11 +0800186 *(.l2.data)
Sonic Zhang262c3822008-07-19 15:42:41 +0800187 __edata_l2 = .;
188
189 . = ALIGN(32);
190 *(.data_l2.cacheline_aligned)
191
192 . = ALIGN(4);
193 __sbss_l2 = .;
Mike Frysinger07aa7be2008-08-13 16:16:11 +0800194 *(.l2.bss)
Sonic Zhang262c3822008-07-19 15:42:41 +0800195 . = ALIGN(4);
196 __ebss_l2 = .;
197 }
Ingo Molnar8d7ac692009-08-18 16:45:25 +0000198 ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!")
Bernd Schmidt6f985292009-01-07 23:14:39 +0800199
Mike Frysinger36208052007-10-30 12:00:02 +0800200 /* Force trailing alignment of our init section so that when we
201 * free our init memory, we don't leave behind a partial page.
202 */
Bernd Schmidt6f985292009-01-07 23:14:39 +0800203 . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
Mike Frysinger36208052007-10-30 12:00:02 +0800204 . = ALIGN(PAGE_SIZE);
205 ___init_end = .;
Bryan Wu1394f032007-05-06 14:50:22 -0700206
Mike Frysingerb7627ac2008-02-02 15:53:17 +0800207 __end =.;
Mike Frysingerde6a9522007-06-11 17:27:05 +0800208
Mike Frysingerc11b5772007-10-11 00:12:41 +0800209 STABS_DEBUG
210
211 DWARF_DEBUG
212
Tejun Heo023bf6f2009-07-09 11:27:40 +0900213 DISCARDS
Bryan Wu1394f032007-05-06 14:50:22 -0700214}