blob: ee954d1b8cc68952a3e45a9a43c042b12e325abd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
2 *
3 * arch/sh/kernel/head.S
4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2010 Matt Fleming
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Head.S contains the SH exception handlers and startup code.
13 */
Tim Abbottbbe215c2009-04-25 22:11:07 -040014#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/linkage.h>
Paul Mundtd153ea82006-09-27 18:20:16 +090016#include <asm/thread_info.h>
Paul Mundta0ab3662010-01-13 18:31:48 +090017#include <asm/mmu.h>
18#include <cpu/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Paul Mundtb7e108e2006-09-27 15:00:04 +090020#ifdef CONFIG_CPU_SH4A
21#define SYNCO() synco
22
23#define PREFI(label, reg) \
24 mov.l label, reg; \
25 prefi @reg
26#else
27#define SYNCO()
28#define PREFI(label, reg)
29#endif
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 .section .empty_zero_page, "aw"
32ENTRY(empty_zero_page)
33 .long 1 /* MOUNT_ROOT_RDONLY */
34 .long 0 /* RAMDISK_FLAGS */
35 .long 0x0200 /* ORIG_ROOT_DEV */
36 .long 1 /* LOADER_TYPE */
Paul Mundt972ad0e2008-05-13 17:41:46 +090037 .long 0x00000000 /* INITRD_START */
38 .long 0x00000000 /* INITRD_SIZE */
Paul Mundta0ab3662010-01-13 18:31:48 +090039#ifdef CONFIG_32BIT
Stuart Menefy7a2eacb2007-11-26 21:29:09 +090040 .long 0x53453f00 + 32 /* "SE?" = 32 bit */
41#else
42 .long 0x53453f00 + 29 /* "SE?" = 29 bit */
43#endif
Paul Mundte2dfb912006-12-12 08:53:29 +0900441:
45 .skip PAGE_SIZE - empty_zero_page - 1b
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Tim Abbottbbe215c2009-04-25 22:11:07 -040047 __HEAD
Paul Mundt339547b2007-07-20 17:40:03 +090048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/*
50 * Condition at the entry of _stext:
51 *
52 * BSC has already been initialized.
53 * INTC may or may not be initialized.
54 * VBR may or may not be initialized.
55 * MMU may or may not be initialized.
56 * Cache may or may not be initialized.
57 * Hardware (including on-chip modules) may or may not be initialized.
58 *
59 */
60ENTRY(_stext)
61 ! Initialize Status Register
62 mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
63 ldc r0, sr
64 ! Initialize global interrupt mask
Yoshinori Satode398402006-11-05 16:15:19 +090065#ifdef CONFIG_CPU_HAS_SR_RB
Paul Mundtaba10302007-09-21 18:32:32 +090066 mov #0, r0
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 ldc r0, r6_bank
Yoshinori Satode398402006-11-05 16:15:19 +090068#endif
69
Paul Mundtb7e108e2006-09-27 15:00:04 +090070 /*
71 * Prefetch if possible to reduce cache miss penalty.
72 *
73 * We do this early on for SH-4A as a micro-optimization,
74 * as later on we will have speculative execution enabled
75 * and this will become less of an issue.
76 */
77 PREFI(5f, r0)
78 PREFI(6f, r0)
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 !
81 mov.l 2f, r0
82 mov r0, r15 ! Set initial r15 (stack pointer)
Yoshinori Satode398402006-11-05 16:15:19 +090083#ifdef CONFIG_CPU_HAS_SR_RB
Paul Mundtaba10302007-09-21 18:32:32 +090084 mov.l 7f, r0
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 ldc r0, r7_bank ! ... and initial thread_info
Yoshinori Satode398402006-11-05 16:15:19 +090086#endif
Paul Mundt740a3e62008-12-17 15:33:43 +090087
Matt Fleming3d467672010-01-18 19:33:10 +090088#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
Paul Mundt77c20192010-01-21 14:19:41 +090089
90#define __PMB_ITER_BY_SIZE(size) \
91.L##size: \
92 mov #(size >> 4), r6; \
93 shll16 r6; \
94 shll8 r6; \
95 \
96 cmp/hi r5, r6; \
97 bt 9999f; \
98 \
99 mov #(PMB_SZ_##size##M >> 2), r9; \
100 shll2 r9; \
101 \
102 /* \
103 * Cached mapping \
104 */ \
105 mov #PMB_C, r8; \
106 or r0, r8; \
107 or r9, r8; \
108 mov.l r8, @r1; \
109 mov.l r2, @r3; \
110 \
111 /* Increment to the next PMB_DATA entry */ \
112 add r4, r1; \
113 /* Increment to the next PMB_ADDR entry */ \
114 add r4, r3; \
115 /* Increment number of PMB entries */ \
116 add #1, r10; \
117 \
118 /* \
119 * Uncached mapping \
120 */ \
121 mov #(PMB_UB >> 8), r8; \
122 shll8 r8; \
123 \
124 or r0, r8; \
125 or r9, r8; \
126 mov.l r8, @r1; \
127 mov r2, r8; \
128 add r7, r8; \
129 mov.l r8, @r3; \
130 \
131 /* Increment to the next PMB_DATA entry */ \
132 add r4, r1; \
133 /* Increment to the next PMB_ADDR entry */ \
134 add r4, r3; \
135 /* Increment number of PMB entries */ \
136 add #1, r10; \
137 \
138 sub r6, r5; \
139 add r6, r0; \
140 add r6, r2; \
141 \
142 bra .L##size; \
1439999:
144
Matt Fleming3d467672010-01-18 19:33:10 +0900145 /*
146 * Reconfigure the initial PMB mappings setup by the hardware.
147 *
148 * When we boot in 32-bit MMU mode there are 2 PMB entries already
149 * setup for us.
150 *
151 * Entry VPN PPN V SZ C UB WT
152 * ---------------------------------------------------------------
153 * 0 0x80000000 0x00000000 1 512MB 1 0 1
154 * 1 0xA0000000 0x00000000 1 512MB 0 0 0
155 *
156 * But we reprogram them here because we want complete control over
157 * our address space and the initial mappings may not map PAGE_OFFSET
158 * to __MEMORY_START (or even map all of our RAM).
159 *
160 * Once we've setup cached and uncached mappings for all of RAM we
161 * clear the rest of the PMB entries.
162 *
163 * This clearing also deals with the fact that PMB entries can persist
164 * across reboots. The PMB could have been left in any state when the
165 * reboot occurred, so to be safe we clear all entries and start with
166 * with a clean slate.
167 */
168
169 mov.l .LMMUCR, r1 /* Flush the TLB */
170 mov.l @r1, r0
171 or #MMUCR_TI, r0
172 mov.l r0, @r1
173
174 mov.l .LMEMORY_SIZE, r5
175 mov r5, r7
176
177 mov #PMB_E_SHIFT, r0
178 mov #0x1, r4
179 shld r0, r4
180
181 mov.l .LFIRST_DATA_ENTRY, r0
182 mov.l .LPMB_DATA, r1
183 mov.l .LFIRST_ADDR_ENTRY, r2
184 mov.l .LPMB_ADDR, r3
185
186 mov #0, r10
187
188 /*
189 * r0 = PMB_DATA data field
190 * r1 = PMB_DATA address field
191 * r2 = PMB_ADDR data field
192 * r3 = PMB_ADDR address field
193 * r4 = PMB_E_SHIFT
194 * r5 = remaining amount of RAM to map
195 * r6 = PMB mapping size we're trying to use
196 * r7 = cached_to_uncached
197 * r8 = scratch register
198 * r9 = scratch register
199 * r10 = number of PMB entries we've setup
200 */
Paul Mundt77c20192010-01-21 14:19:41 +0900201 __PMB_ITER_BY_SIZE(512)
202 __PMB_ITER_BY_SIZE(128)
203 __PMB_ITER_BY_SIZE(64)
204 __PMB_ITER_BY_SIZE(16)
Matt Fleming3d467672010-01-18 19:33:10 +0900205
Matt Fleming3d467672010-01-18 19:33:10 +0900206 /* Update cached_to_uncached */
207 mov.l .Lcached_to_uncached, r0
208 mov.l r7, @r0
209
210 /*
211 * Clear the remaining PMB entries.
212 *
213 * r3 = entry to begin clearing from
214 * r10 = number of entries we've setup so far
215 */
216 mov #0, r1
217 mov #PMB_ENTRY_MAX, r0
218
219.Lagain:
220 mov.l r1, @r3 /* Clear PMB_ADDR entry */
221 add #1, r10 /* Increment the loop counter */
222 cmp/eq r0, r10
223 bf/s .Lagain
224 add r4, r3 /* Increment to the next PMB_ADDR entry */
225
226 mov.l 6f, r0
227 icbi @r0
228
229#endif /* !CONFIG_PMB_LEGACY */
230
Paul Mundt740a3e62008-12-17 15:33:43 +0900231#ifndef CONFIG_SH_NO_BSS_INIT
232 /*
233 * Don't clear BSS if running on slow platforms such as an RTL simulation,
234 * remote memory via SHdebug link, etc. For these the memory can be guaranteed
235 * to be all zero on boot anyway.
236 */
237 ! Clear BSS area
Paul Mundtaba10302007-09-21 18:32:32 +0900238#ifdef CONFIG_SMP
239 mov.l 3f, r0
240 cmp/eq #0, r0 ! skip clear if set to zero
241 bt 10f
242#endif
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 mov.l 3f, r1
245 add #4, r1
246 mov.l 4f, r2
247 mov #0, r0
2489: cmp/hs r2, r1
249 bf/s 9b ! while (r1 < r2)
250 mov.l r0,@-r2
Paul Mundtb7e108e2006-09-27 15:00:04 +0900251
Paul Mundtaba10302007-09-21 18:32:32 +090025210:
Paul Mundt740a3e62008-12-17 15:33:43 +0900253#endif
254
Paul Mundtb7e108e2006-09-27 15:00:04 +0900255 ! Additional CPU initialization
256 mov.l 6f, r0
257 jsr @r0
258 nop
259
260 SYNCO() ! Wait for pending instructions..
Paul Mundtaba10302007-09-21 18:32:32 +0900261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 ! Start kernel
263 mov.l 5f, r0
264 jmp @r0
265 nop
266
267 .balign 4
Yoshinori Satode398402006-11-05 16:15:19 +0900268#if defined(CONFIG_CPU_SH2)
2691: .long 0x000000F0 ! IMASK=0xF
270#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
Yoshinori Satode398402006-11-05 16:15:19 +0900272#endif
Paul Mundtaba10302007-09-21 18:32:32 +0900273ENTRY(stack_start)
Paul Mundtd153ea82006-09-27 18:20:16 +09002742: .long init_thread_union+THREAD_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753: .long __bss_start
2764: .long _end
2775: .long start_kernel
2786: .long sh_cpu_init
Paul Mundtaba10302007-09-21 18:32:32 +09002797: .long init_thread_union
Matt Fleming3d467672010-01-18 19:33:10 +0900280
281#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
282.LPMB_ADDR: .long PMB_ADDR
283.LPMB_DATA: .long PMB_DATA
284.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
285.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
286.LMMUCR: .long MMUCR
287.Lcached_to_uncached: .long cached_to_uncached
288.LMEMORY_SIZE: .long __MEMORY_SIZE
289#endif