blob: 8c1fc9a9fab238ebe6c4f6c530c371b2888e976a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
2 *
3 * arch/sh/kernel/head.S
4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2010 Matt Fleming
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Head.S contains the SH exception handlers and startup code.
13 */
Tim Abbottbbe215c2009-04-25 22:11:07 -040014#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/linkage.h>
Paul Mundtd153ea82006-09-27 18:20:16 +090016#include <asm/thread_info.h>
Paul Mundta0ab3662010-01-13 18:31:48 +090017#include <asm/mmu.h>
18#include <cpu/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Paul Mundtb7e108e2006-09-27 15:00:04 +090020#ifdef CONFIG_CPU_SH4A
21#define SYNCO() synco
22
23#define PREFI(label, reg) \
24 mov.l label, reg; \
25 prefi @reg
26#else
27#define SYNCO()
28#define PREFI(label, reg)
29#endif
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 .section .empty_zero_page, "aw"
32ENTRY(empty_zero_page)
33 .long 1 /* MOUNT_ROOT_RDONLY */
34 .long 0 /* RAMDISK_FLAGS */
35 .long 0x0200 /* ORIG_ROOT_DEV */
36 .long 1 /* LOADER_TYPE */
Paul Mundt972ad0e2008-05-13 17:41:46 +090037 .long 0x00000000 /* INITRD_START */
38 .long 0x00000000 /* INITRD_SIZE */
Paul Mundta0ab3662010-01-13 18:31:48 +090039#ifdef CONFIG_32BIT
Stuart Menefy7a2eacb2007-11-26 21:29:09 +090040 .long 0x53453f00 + 32 /* "SE?" = 32 bit */
41#else
42 .long 0x53453f00 + 29 /* "SE?" = 29 bit */
43#endif
Paul Mundte2dfb912006-12-12 08:53:29 +0900441:
45 .skip PAGE_SIZE - empty_zero_page - 1b
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Tim Abbottbbe215c2009-04-25 22:11:07 -040047 __HEAD
Paul Mundt339547b2007-07-20 17:40:03 +090048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/*
50 * Condition at the entry of _stext:
51 *
52 * BSC has already been initialized.
53 * INTC may or may not be initialized.
54 * VBR may or may not be initialized.
55 * MMU may or may not be initialized.
56 * Cache may or may not be initialized.
57 * Hardware (including on-chip modules) may or may not be initialized.
58 *
59 */
60ENTRY(_stext)
61 ! Initialize Status Register
62 mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
63 ldc r0, sr
64 ! Initialize global interrupt mask
Yoshinori Satode398402006-11-05 16:15:19 +090065#ifdef CONFIG_CPU_HAS_SR_RB
Paul Mundtaba10302007-09-21 18:32:32 +090066 mov #0, r0
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 ldc r0, r6_bank
Yoshinori Satode398402006-11-05 16:15:19 +090068#endif
69
Paul Mundtb7e108e2006-09-27 15:00:04 +090070 /*
71 * Prefetch if possible to reduce cache miss penalty.
72 *
73 * We do this early on for SH-4A as a micro-optimization,
74 * as later on we will have speculative execution enabled
75 * and this will become less of an issue.
76 */
77 PREFI(5f, r0)
78 PREFI(6f, r0)
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 !
81 mov.l 2f, r0
82 mov r0, r15 ! Set initial r15 (stack pointer)
Yoshinori Satode398402006-11-05 16:15:19 +090083#ifdef CONFIG_CPU_HAS_SR_RB
Paul Mundtaba10302007-09-21 18:32:32 +090084 mov.l 7f, r0
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 ldc r0, r7_bank ! ... and initial thread_info
Yoshinori Satode398402006-11-05 16:15:19 +090086#endif
Paul Mundt740a3e62008-12-17 15:33:43 +090087
Paul Mundtd01447b2010-02-18 18:13:51 +090088#ifdef CONFIG_PMB
Paul Mundt2023b842010-01-21 15:42:58 +090089/*
90 * Reconfigure the initial PMB mappings setup by the hardware.
91 *
92 * When we boot in 32-bit MMU mode there are 2 PMB entries already
93 * setup for us.
94 *
95 * Entry VPN PPN V SZ C UB WT
96 * ---------------------------------------------------------------
97 * 0 0x80000000 0x00000000 1 512MB 1 0 1
98 * 1 0xA0000000 0x00000000 1 512MB 0 0 0
99 *
100 * But we reprogram them here because we want complete control over
101 * our address space and the initial mappings may not map PAGE_OFFSET
102 * to __MEMORY_START (or even map all of our RAM).
103 *
104 * Once we've setup cached and uncached mappings we clear the rest of the
105 * PMB entries. This clearing also deals with the fact that PMB entries
106 * can persist across reboots. The PMB could have been left in any state
107 * when the reboot occurred, so to be safe we clear all entries and start
108 * with with a clean slate.
109 *
110 * The uncached mapping is constructed using the smallest possible
111 * mapping with a single unbufferable page. Only the kernel text needs to
112 * be covered via the uncached mapping so that certain functions can be
113 * run uncached.
114 *
115 * Drivers and the like that have previously abused the 1:1 identity
116 * mapping are unsupported in 32-bit mode and must specify their caching
117 * preference when page tables are constructed.
118 *
119 * This frees up the P2 space for more nefarious purposes.
120 *
121 * Register utilization is as follows:
122 *
123 * r0 = PMB_DATA data field
124 * r1 = PMB_DATA address field
125 * r2 = PMB_ADDR data field
126 * r3 = PMB_ADDR address field
127 * r4 = PMB_E_SHIFT
128 * r5 = remaining amount of RAM to map
129 * r6 = PMB mapping size we're trying to use
130 * r7 = cached_to_uncached
131 * r8 = scratch register
132 * r9 = scratch register
133 * r10 = number of PMB entries we've setup
134 */
Paul Mundt77c20192010-01-21 14:19:41 +0900135
Paul Mundt2023b842010-01-21 15:42:58 +0900136 mov.l .LMMUCR, r1 /* Flush the TLB */
137 mov.l @r1, r0
138 or #MMUCR_TI, r0
139 mov.l r0, @r1
140
141 mov.l .LMEMORY_SIZE, r5
Paul Mundt2023b842010-01-21 15:42:58 +0900142
143 mov #PMB_E_SHIFT, r0
144 mov #0x1, r4
145 shld r0, r4
146
147 mov.l .LFIRST_DATA_ENTRY, r0
148 mov.l .LPMB_DATA, r1
149 mov.l .LFIRST_ADDR_ENTRY, r2
150 mov.l .LPMB_ADDR, r3
151
Paul Mundtd01447b2010-02-18 18:13:51 +0900152 /*
153 * First we need to walk the PMB and figure out if there are any
154 * existing mappings that match the initial mappings VPN/PPN.
155 * If these have already been established by the bootloader, we
156 * don't bother setting up new entries here, and let the late PMB
157 * initialization take care of things instead.
158 *
159 * Note that we may need to coalesce and merge entries in order
160 * to reclaim more available PMB slots, which is much more than
161 * we want to do at this early stage.
162 */
163 mov #0, r10
164 mov #NR_PMB_ENTRIES, r9
165
166 mov r1, r7 /* temporary PMB_DATA iter */
167
168.Lvalidate_existing_mappings:
169
170 mov.l @r7, r8
171 and r0, r8
172 cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
173 bt .Lpmb_done
174
175 add #1, r10 /* Increment the loop counter */
176 cmp/eq r9, r10
177 bf/s .Lvalidate_existing_mappings
178 add r4, r7 /* Increment to the next PMB_DATA entry */
179
180 /*
181 * If we've fallen through, continue with setting up the initial
182 * mappings.
183 */
184
185 mov r5, r7 /* cached_to_uncached */
Paul Mundt2023b842010-01-21 15:42:58 +0900186 mov #0, r10
187
Paul Mundt9edef282010-02-17 16:28:00 +0900188#ifdef CONFIG_UNCACHED_MAPPING
Paul Mundt2023b842010-01-21 15:42:58 +0900189 /*
190 * Uncached mapping
191 */
192 mov #(PMB_SZ_16M >> 2), r9
193 shll2 r9
194
195 mov #(PMB_UB >> 8), r8
196 shll8 r8
197
198 or r0, r8
199 or r9, r8
200 mov.l r8, @r1
201 mov r2, r8
202 add r7, r8
203 mov.l r8, @r3
204
205 add r4, r1
206 add r4, r3
207 add #1, r10
Paul Mundt9edef282010-02-17 16:28:00 +0900208#endif
Paul Mundt2023b842010-01-21 15:42:58 +0900209
210/*
211 * Iterate over all of the available sizes from largest to
212 * smallest for constructing the cached mapping.
213 */
Paul Mundt77c20192010-01-21 14:19:41 +0900214#define __PMB_ITER_BY_SIZE(size) \
215.L##size: \
216 mov #(size >> 4), r6; \
217 shll16 r6; \
218 shll8 r6; \
219 \
220 cmp/hi r5, r6; \
221 bt 9999f; \
222 \
223 mov #(PMB_SZ_##size##M >> 2), r9; \
224 shll2 r9; \
225 \
226 /* \
227 * Cached mapping \
228 */ \
229 mov #PMB_C, r8; \
230 or r0, r8; \
231 or r9, r8; \
232 mov.l r8, @r1; \
233 mov.l r2, @r3; \
234 \
235 /* Increment to the next PMB_DATA entry */ \
236 add r4, r1; \
237 /* Increment to the next PMB_ADDR entry */ \
238 add r4, r3; \
239 /* Increment number of PMB entries */ \
240 add #1, r10; \
241 \
Paul Mundt77c20192010-01-21 14:19:41 +0900242 sub r6, r5; \
243 add r6, r0; \
244 add r6, r2; \
245 \
246 bra .L##size; \
2479999:
248
Paul Mundt77c20192010-01-21 14:19:41 +0900249 __PMB_ITER_BY_SIZE(512)
250 __PMB_ITER_BY_SIZE(128)
251 __PMB_ITER_BY_SIZE(64)
252 __PMB_ITER_BY_SIZE(16)
Matt Fleming3d467672010-01-18 19:33:10 +0900253
Paul Mundt9edef282010-02-17 16:28:00 +0900254#ifdef CONFIG_UNCACHED_MAPPING
Paul Mundt2023b842010-01-21 15:42:58 +0900255 /*
Paul Mundt3125ee72010-01-21 15:54:31 +0900256 * Now that we can access it, update cached_to_uncached and
257 * uncached_size.
Paul Mundt2023b842010-01-21 15:42:58 +0900258 */
Matt Fleming3d467672010-01-18 19:33:10 +0900259 mov.l .Lcached_to_uncached, r0
260 mov.l r7, @r0
261
Paul Mundt3125ee72010-01-21 15:54:31 +0900262 mov.l .Luncached_size, r0
263 mov #1, r7
264 shll16 r7
265 shll8 r7
266 mov.l r7, @r0
Paul Mundt9edef282010-02-17 16:28:00 +0900267#endif
Paul Mundt3125ee72010-01-21 15:54:31 +0900268
Matt Fleming3d467672010-01-18 19:33:10 +0900269 /*
270 * Clear the remaining PMB entries.
271 *
272 * r3 = entry to begin clearing from
273 * r10 = number of entries we've setup so far
274 */
275 mov #0, r1
Paul Mundt51becfd2010-02-17 15:33:30 +0900276 mov #NR_PMB_ENTRIES, r0
Matt Fleming3d467672010-01-18 19:33:10 +0900277
278.Lagain:
279 mov.l r1, @r3 /* Clear PMB_ADDR entry */
280 add #1, r10 /* Increment the loop counter */
281 cmp/eq r0, r10
282 bf/s .Lagain
283 add r4, r3 /* Increment to the next PMB_ADDR entry */
284
285 mov.l 6f, r0
286 icbi @r0
287
Paul Mundtd01447b2010-02-18 18:13:51 +0900288.Lpmb_done:
289#endif /* CONFIG_PMB */
Matt Fleming3d467672010-01-18 19:33:10 +0900290
Paul Mundt740a3e62008-12-17 15:33:43 +0900291#ifndef CONFIG_SH_NO_BSS_INIT
292 /*
293 * Don't clear BSS if running on slow platforms such as an RTL simulation,
294 * remote memory via SHdebug link, etc. For these the memory can be guaranteed
295 * to be all zero on boot anyway.
296 */
297 ! Clear BSS area
Paul Mundtaba10302007-09-21 18:32:32 +0900298#ifdef CONFIG_SMP
299 mov.l 3f, r0
300 cmp/eq #0, r0 ! skip clear if set to zero
301 bt 10f
302#endif
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 mov.l 3f, r1
305 add #4, r1
306 mov.l 4f, r2
307 mov #0, r0
3089: cmp/hs r2, r1
309 bf/s 9b ! while (r1 < r2)
310 mov.l r0,@-r2
Paul Mundtb7e108e2006-09-27 15:00:04 +0900311
Paul Mundtaba10302007-09-21 18:32:32 +090031210:
Paul Mundt740a3e62008-12-17 15:33:43 +0900313#endif
314
Paul Mundtb7e108e2006-09-27 15:00:04 +0900315 ! Additional CPU initialization
316 mov.l 6f, r0
317 jsr @r0
318 nop
319
320 SYNCO() ! Wait for pending instructions..
Paul Mundtaba10302007-09-21 18:32:32 +0900321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 ! Start kernel
323 mov.l 5f, r0
324 jmp @r0
325 nop
326
327 .balign 4
Yoshinori Satode398402006-11-05 16:15:19 +0900328#if defined(CONFIG_CPU_SH2)
3291: .long 0x000000F0 ! IMASK=0xF
330#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
Yoshinori Satode398402006-11-05 16:15:19 +0900332#endif
Paul Mundtaba10302007-09-21 18:32:32 +0900333ENTRY(stack_start)
Paul Mundtd153ea82006-09-27 18:20:16 +09003342: .long init_thread_union+THREAD_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353: .long __bss_start
3364: .long _end
3375: .long start_kernel
Paul Mundt4a6feab2010-04-21 12:20:42 +09003386: .long cpu_init
Paul Mundtaba10302007-09-21 18:32:32 +09003397: .long init_thread_union
Matt Fleming3d467672010-01-18 19:33:10 +0900340
Paul Mundtd01447b2010-02-18 18:13:51 +0900341#ifdef CONFIG_PMB
Matt Fleming3d467672010-01-18 19:33:10 +0900342.LPMB_ADDR: .long PMB_ADDR
343.LPMB_DATA: .long PMB_DATA
344.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
345.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
346.LMMUCR: .long MMUCR
Paul Mundt9edef282010-02-17 16:28:00 +0900347.LMEMORY_SIZE: .long __MEMORY_SIZE
348#ifdef CONFIG_UNCACHED_MAPPING
Matt Fleming3d467672010-01-18 19:33:10 +0900349.Lcached_to_uncached: .long cached_to_uncached
Paul Mundt3125ee72010-01-21 15:54:31 +0900350.Luncached_size: .long uncached_size
Paul Mundt9edef282010-02-17 16:28:00 +0900351#endif
Matt Fleming3d467672010-01-18 19:33:10 +0900352#endif