| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $ | 
|  | 2 | * | 
|  | 3 | *  arch/sh/kernel/head.S | 
|  | 4 | * | 
|  | 5 | *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 6 | *  Copyright (C) 2010  Matt Fleming | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * | 
|  | 8 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 9 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 10 | * for more details. | 
|  | 11 | * | 
|  | 12 | * Head.S contains the SH exception handlers and startup code. | 
|  | 13 | */ | 
| Tim Abbott | bbe215c | 2009-04-25 22:11:07 -0400 | [diff] [blame] | 14 | #include <linux/init.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/linkage.h> | 
| Paul Mundt | d153ea8 | 2006-09-27 18:20:16 +0900 | [diff] [blame] | 16 | #include <asm/thread_info.h> | 
| Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 17 | #include <asm/mmu.h> | 
|  | 18 | #include <cpu/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
| Paul Mundt | b7e108e | 2006-09-27 15:00:04 +0900 | [diff] [blame] | 20 | #ifdef CONFIG_CPU_SH4A | 
|  | 21 | #define SYNCO()		synco | 
|  | 22 |  | 
|  | 23 | #define PREFI(label, reg)	\ | 
|  | 24 | mov.l	label, reg;	\ | 
|  | 25 | prefi	@reg | 
|  | 26 | #else | 
|  | 27 | #define SYNCO() | 
|  | 28 | #define PREFI(label, reg) | 
|  | 29 | #endif | 
|  | 30 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | .section	.empty_zero_page, "aw" | 
|  | 32 | ENTRY(empty_zero_page) | 
|  | 33 | .long	1		/* MOUNT_ROOT_RDONLY */ | 
|  | 34 | .long	0		/* RAMDISK_FLAGS */ | 
|  | 35 | .long	0x0200		/* ORIG_ROOT_DEV */ | 
|  | 36 | .long	1		/* LOADER_TYPE */ | 
| Paul Mundt | 972ad0e | 2008-05-13 17:41:46 +0900 | [diff] [blame] | 37 | .long	0x00000000	/* INITRD_START */ | 
|  | 38 | .long	0x00000000	/* INITRD_SIZE */ | 
| Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 39 | #ifdef CONFIG_32BIT | 
| Stuart Menefy | 7a2eacb | 2007-11-26 21:29:09 +0900 | [diff] [blame] | 40 | .long	0x53453f00 + 32	/* "SE?" = 32 bit */ | 
|  | 41 | #else | 
|  | 42 | .long	0x53453f00 + 29	/* "SE?" = 29 bit */ | 
|  | 43 | #endif | 
| Paul Mundt | e2dfb91 | 2006-12-12 08:53:29 +0900 | [diff] [blame] | 44 | 1: | 
|  | 45 | .skip	PAGE_SIZE - empty_zero_page - 1b | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
| Tim Abbott | bbe215c | 2009-04-25 22:11:07 -0400 | [diff] [blame] | 47 | __HEAD | 
| Paul Mundt | 339547b | 2007-07-20 17:40:03 +0900 | [diff] [blame] | 48 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | /* | 
|  | 50 | * Condition at the entry of _stext: | 
|  | 51 | * | 
|  | 52 | *   BSC has already been initialized. | 
|  | 53 | *   INTC may or may not be initialized. | 
|  | 54 | *   VBR may or may not be initialized. | 
|  | 55 | *   MMU may or may not be initialized. | 
|  | 56 | *   Cache may or may not be initialized. | 
|  | 57 | *   Hardware (including on-chip modules) may or may not be initialized. | 
|  | 58 | * | 
|  | 59 | */ | 
|  | 60 | ENTRY(_stext) | 
|  | 61 | !			Initialize Status Register | 
|  | 62 | mov.l	1f, r0		! MD=1, RB=0, BL=0, IMASK=0xF | 
|  | 63 | ldc	r0, sr | 
|  | 64 | !			Initialize global interrupt mask | 
| Yoshinori Sato | de39840 | 2006-11-05 16:15:19 +0900 | [diff] [blame] | 65 | #ifdef CONFIG_CPU_HAS_SR_RB | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 66 | mov	#0, r0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | ldc	r0, r6_bank | 
| Yoshinori Sato | de39840 | 2006-11-05 16:15:19 +0900 | [diff] [blame] | 68 | #endif | 
|  | 69 |  | 
| Paul Mundt | b7e108e | 2006-09-27 15:00:04 +0900 | [diff] [blame] | 70 | /* | 
|  | 71 | * Prefetch if possible to reduce cache miss penalty. | 
|  | 72 | * | 
|  | 73 | * We do this early on for SH-4A as a micro-optimization, | 
|  | 74 | * as later on we will have speculative execution enabled | 
|  | 75 | * and this will become less of an issue. | 
|  | 76 | */ | 
|  | 77 | PREFI(5f, r0) | 
|  | 78 | PREFI(6f, r0) | 
|  | 79 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | ! | 
|  | 81 | mov.l	2f, r0 | 
|  | 82 | mov	r0, r15		! Set initial r15 (stack pointer) | 
| Yoshinori Sato | de39840 | 2006-11-05 16:15:19 +0900 | [diff] [blame] | 83 | #ifdef CONFIG_CPU_HAS_SR_RB | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 84 | mov.l	7f, r0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | ldc	r0, r7_bank	! ... and initial thread_info | 
| Yoshinori Sato | de39840 | 2006-11-05 16:15:19 +0900 | [diff] [blame] | 86 | #endif | 
| Paul Mundt | 740a3e6 | 2008-12-17 15:33:43 +0900 | [diff] [blame] | 87 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 88 | #ifdef CONFIG_PMB | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 89 | /* | 
|  | 90 | * Reconfigure the initial PMB mappings setup by the hardware. | 
|  | 91 | * | 
|  | 92 | * When we boot in 32-bit MMU mode there are 2 PMB entries already | 
|  | 93 | * setup for us. | 
|  | 94 | * | 
|  | 95 | * Entry       VPN	   PPN	    V	SZ	C	UB	WT | 
|  | 96 | * --------------------------------------------------------------- | 
|  | 97 | *   0	    0x80000000 0x00000000   1  512MB	1	0	1 | 
|  | 98 | *   1	    0xA0000000 0x00000000   1  512MB	0	0	0 | 
|  | 99 | * | 
|  | 100 | * But we reprogram them here because we want complete control over | 
|  | 101 | * our address space and the initial mappings may not map PAGE_OFFSET | 
|  | 102 | * to __MEMORY_START (or even map all of our RAM). | 
|  | 103 | * | 
|  | 104 | * Once we've setup cached and uncached mappings we clear the rest of the | 
|  | 105 | * PMB entries. This clearing also deals with the fact that PMB entries | 
|  | 106 | * can persist across reboots. The PMB could have been left in any state | 
|  | 107 | * when the reboot occurred, so to be safe we clear all entries and start | 
|  | 108 | * with with a clean slate. | 
|  | 109 | * | 
|  | 110 | * The uncached mapping is constructed using the smallest possible | 
|  | 111 | * mapping with a single unbufferable page. Only the kernel text needs to | 
|  | 112 | * be covered via the uncached mapping so that certain functions can be | 
|  | 113 | * run uncached. | 
|  | 114 | * | 
|  | 115 | * Drivers and the like that have previously abused the 1:1 identity | 
|  | 116 | * mapping are unsupported in 32-bit mode and must specify their caching | 
|  | 117 | * preference when page tables are constructed. | 
|  | 118 | * | 
|  | 119 | * This frees up the P2 space for more nefarious purposes. | 
|  | 120 | * | 
|  | 121 | * Register utilization is as follows: | 
|  | 122 | * | 
|  | 123 | *	r0 = PMB_DATA data field | 
|  | 124 | *	r1 = PMB_DATA address field | 
|  | 125 | *	r2 = PMB_ADDR data field | 
|  | 126 | *	r3 = PMB_ADDR address field | 
|  | 127 | *	r4 = PMB_E_SHIFT | 
|  | 128 | *	r5 = remaining amount of RAM to map | 
|  | 129 | *	r6 = PMB mapping size we're trying to use | 
|  | 130 | *	r7 = cached_to_uncached | 
|  | 131 | *	r8 = scratch register | 
|  | 132 | *	r9 = scratch register | 
|  | 133 | *	r10 = number of PMB entries we've setup | 
| Matt Fleming | 035ca59 | 2010-04-25 20:18:41 +0100 | [diff] [blame] | 134 | *	r11 = scratch register | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 135 | */ | 
| Paul Mundt | 77c2019 | 2010-01-21 14:19:41 +0900 | [diff] [blame] | 136 |  | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 137 | mov.l	.LMMUCR, r1	/* Flush the TLB */ | 
|  | 138 | mov.l	@r1, r0 | 
|  | 139 | or	#MMUCR_TI, r0 | 
|  | 140 | mov.l	r0, @r1 | 
|  | 141 |  | 
|  | 142 | mov.l	.LMEMORY_SIZE, r5 | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 143 |  | 
|  | 144 | mov	#PMB_E_SHIFT, r0 | 
|  | 145 | mov	#0x1, r4 | 
|  | 146 | shld	r0, r4 | 
|  | 147 |  | 
|  | 148 | mov.l	.LFIRST_DATA_ENTRY, r0 | 
|  | 149 | mov.l	.LPMB_DATA, r1 | 
|  | 150 | mov.l	.LFIRST_ADDR_ENTRY, r2 | 
|  | 151 | mov.l	.LPMB_ADDR, r3 | 
|  | 152 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 153 | /* | 
|  | 154 | * First we need to walk the PMB and figure out if there are any | 
|  | 155 | * existing mappings that match the initial mappings VPN/PPN. | 
|  | 156 | * If these have already been established by the bootloader, we | 
|  | 157 | * don't bother setting up new entries here, and let the late PMB | 
|  | 158 | * initialization take care of things instead. | 
|  | 159 | * | 
|  | 160 | * Note that we may need to coalesce and merge entries in order | 
|  | 161 | * to reclaim more available PMB slots, which is much more than | 
|  | 162 | * we want to do at this early stage. | 
|  | 163 | */ | 
|  | 164 | mov	#0, r10 | 
|  | 165 | mov	#NR_PMB_ENTRIES, r9 | 
|  | 166 |  | 
|  | 167 | mov	r1, r7		/* temporary PMB_DATA iter */ | 
|  | 168 |  | 
|  | 169 | .Lvalidate_existing_mappings: | 
|  | 170 |  | 
| Matt Fleming | 035ca59 | 2010-04-25 20:18:41 +0100 | [diff] [blame] | 171 | mov.l	.LPMB_DATA_MASK, r11 | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 172 | mov.l	@r7, r8 | 
| Matt Fleming | 035ca59 | 2010-04-25 20:18:41 +0100 | [diff] [blame] | 173 | and	r11, r8 | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 174 | cmp/eq	r0, r8		/* Check for valid __MEMORY_START mappings */ | 
|  | 175 | bt	.Lpmb_done | 
|  | 176 |  | 
|  | 177 | add	#1, r10		/* Increment the loop counter */ | 
|  | 178 | cmp/eq	r9, r10 | 
|  | 179 | bf/s	.Lvalidate_existing_mappings | 
|  | 180 | add	r4, r7		/* Increment to the next PMB_DATA entry */ | 
|  | 181 |  | 
|  | 182 | /* | 
|  | 183 | * If we've fallen through, continue with setting up the initial | 
|  | 184 | * mappings. | 
|  | 185 | */ | 
|  | 186 |  | 
|  | 187 | mov	r5, r7		/* cached_to_uncached */ | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 188 | mov	#0, r10 | 
|  | 189 |  | 
| Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 190 | #ifdef CONFIG_UNCACHED_MAPPING | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 191 | /* | 
|  | 192 | * Uncached mapping | 
|  | 193 | */ | 
|  | 194 | mov	#(PMB_SZ_16M >> 2), r9 | 
|  | 195 | shll2	r9 | 
|  | 196 |  | 
|  | 197 | mov	#(PMB_UB >> 8), r8 | 
|  | 198 | shll8	r8 | 
|  | 199 |  | 
|  | 200 | or	r0, r8 | 
|  | 201 | or	r9, r8 | 
|  | 202 | mov.l	r8, @r1 | 
|  | 203 | mov	r2, r8 | 
|  | 204 | add	r7, r8 | 
|  | 205 | mov.l	r8, @r3 | 
|  | 206 |  | 
|  | 207 | add	r4, r1 | 
|  | 208 | add	r4, r3 | 
|  | 209 | add	#1, r10 | 
| Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 210 | #endif | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 211 |  | 
|  | 212 | /* | 
|  | 213 | * Iterate over all of the available sizes from largest to | 
|  | 214 | * smallest for constructing the cached mapping. | 
|  | 215 | */ | 
| Paul Mundt | 77c2019 | 2010-01-21 14:19:41 +0900 | [diff] [blame] | 216 | #define __PMB_ITER_BY_SIZE(size)			\ | 
|  | 217 | .L##size:						\ | 
|  | 218 | mov	#(size >> 4), r6;			\ | 
|  | 219 | shll16	r6;					\ | 
|  | 220 | shll8	r6;					\ | 
|  | 221 | \ | 
|  | 222 | cmp/hi	r5, r6;					\ | 
|  | 223 | bt	9999f;					\ | 
|  | 224 | \ | 
|  | 225 | mov	#(PMB_SZ_##size##M >> 2), r9;		\ | 
|  | 226 | shll2	r9;					\ | 
|  | 227 | \ | 
|  | 228 | /*						\ | 
|  | 229 | * Cached mapping				\ | 
|  | 230 | */						\ | 
|  | 231 | mov	#PMB_C, r8;				\ | 
|  | 232 | or	r0, r8;					\ | 
|  | 233 | or	r9, r8;					\ | 
|  | 234 | mov.l	r8, @r1;				\ | 
|  | 235 | mov.l	r2, @r3;				\ | 
|  | 236 | \ | 
|  | 237 | /* Increment to the next PMB_DATA entry */	\ | 
|  | 238 | add	r4, r1;					\ | 
|  | 239 | /* Increment to the next PMB_ADDR entry */	\ | 
|  | 240 | add	r4, r3;					\ | 
|  | 241 | /* Increment number of PMB entries */		\ | 
|  | 242 | add	#1, r10;				\ | 
|  | 243 | \ | 
| Paul Mundt | 77c2019 | 2010-01-21 14:19:41 +0900 | [diff] [blame] | 244 | sub	r6, r5;					\ | 
|  | 245 | add	r6, r0;					\ | 
|  | 246 | add	r6, r2;					\ | 
|  | 247 | \ | 
|  | 248 | bra	.L##size;				\ | 
|  | 249 | 9999: | 
|  | 250 |  | 
| Paul Mundt | 77c2019 | 2010-01-21 14:19:41 +0900 | [diff] [blame] | 251 | __PMB_ITER_BY_SIZE(512) | 
|  | 252 | __PMB_ITER_BY_SIZE(128) | 
|  | 253 | __PMB_ITER_BY_SIZE(64) | 
|  | 254 | __PMB_ITER_BY_SIZE(16) | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 255 |  | 
| Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 256 | #ifdef CONFIG_UNCACHED_MAPPING | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 257 | /* | 
| Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 258 | * Now that we can access it, update cached_to_uncached and | 
|  | 259 | * uncached_size. | 
| Paul Mundt | 2023b84 | 2010-01-21 15:42:58 +0900 | [diff] [blame] | 260 | */ | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 261 | mov.l	.Lcached_to_uncached, r0 | 
|  | 262 | mov.l	r7, @r0 | 
|  | 263 |  | 
| Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 264 | mov.l	.Luncached_size, r0 | 
|  | 265 | mov	#1, r7 | 
|  | 266 | shll16	r7 | 
|  | 267 | shll8	r7 | 
|  | 268 | mov.l	r7, @r0 | 
| Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 269 | #endif | 
| Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 270 |  | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 271 | /* | 
|  | 272 | * Clear the remaining PMB entries. | 
|  | 273 | * | 
|  | 274 | * r3 = entry to begin clearing from | 
|  | 275 | * r10 = number of entries we've setup so far | 
|  | 276 | */ | 
|  | 277 | mov	#0, r1 | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 278 | mov	#NR_PMB_ENTRIES, r0 | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 279 |  | 
|  | 280 | .Lagain: | 
|  | 281 | mov.l	r1, @r3		/* Clear PMB_ADDR entry */ | 
|  | 282 | add	#1, r10		/* Increment the loop counter */ | 
|  | 283 | cmp/eq	r0, r10 | 
|  | 284 | bf/s	.Lagain | 
|  | 285 | add	r4, r3		/* Increment to the next PMB_ADDR entry */ | 
|  | 286 |  | 
|  | 287 | mov.l	6f, r0 | 
|  | 288 | icbi	@r0 | 
|  | 289 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 290 | .Lpmb_done: | 
|  | 291 | #endif /* CONFIG_PMB */ | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 292 |  | 
| Paul Mundt | 740a3e6 | 2008-12-17 15:33:43 +0900 | [diff] [blame] | 293 | #ifndef CONFIG_SH_NO_BSS_INIT | 
|  | 294 | /* | 
|  | 295 | * Don't clear BSS if running on slow platforms such as an RTL simulation, | 
|  | 296 | * remote memory via SHdebug link, etc.  For these the memory can be guaranteed | 
|  | 297 | * to be all zero on boot anyway. | 
|  | 298 | */ | 
|  | 299 | ! Clear BSS area | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 300 | #ifdef CONFIG_SMP | 
|  | 301 | mov.l	3f, r0 | 
|  | 302 | cmp/eq	#0, r0		! skip clear if set to zero | 
|  | 303 | bt	10f | 
|  | 304 | #endif | 
|  | 305 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | mov.l	3f, r1 | 
|  | 307 | add	#4, r1 | 
|  | 308 | mov.l	4f, r2 | 
|  | 309 | mov	#0, r0 | 
|  | 310 | 9:	cmp/hs	r2, r1 | 
|  | 311 | bf/s	9b		! while (r1 < r2) | 
|  | 312 | mov.l	r0,@-r2 | 
| Paul Mundt | b7e108e | 2006-09-27 15:00:04 +0900 | [diff] [blame] | 313 |  | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 314 | 10: | 
| Paul Mundt | 740a3e6 | 2008-12-17 15:33:43 +0900 | [diff] [blame] | 315 | #endif | 
|  | 316 |  | 
| Paul Mundt | b7e108e | 2006-09-27 15:00:04 +0900 | [diff] [blame] | 317 | !			Additional CPU initialization | 
|  | 318 | mov.l	6f, r0 | 
|  | 319 | jsr	@r0 | 
|  | 320 | nop | 
|  | 321 |  | 
|  | 322 | SYNCO()			! Wait for pending instructions.. | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 323 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | !			Start kernel | 
|  | 325 | mov.l	5f, r0 | 
|  | 326 | jmp	@r0 | 
|  | 327 | nop | 
|  | 328 |  | 
|  | 329 | .balign 4 | 
| Yoshinori Sato | de39840 | 2006-11-05 16:15:19 +0900 | [diff] [blame] | 330 | #if defined(CONFIG_CPU_SH2) | 
|  | 331 | 1:	.long	0x000000F0		! IMASK=0xF | 
|  | 332 | #else | 
| Magnus Damm | 68a1aed | 2010-09-24 09:05:38 +0000 | [diff] [blame] | 333 | 1:	.long	0x500080F0		! MD=1, RB=0, BL=1, FD=1, IMASK=0xF | 
| Yoshinori Sato | de39840 | 2006-11-05 16:15:19 +0900 | [diff] [blame] | 334 | #endif | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 335 | ENTRY(stack_start) | 
| Paul Mundt | d153ea8 | 2006-09-27 18:20:16 +0900 | [diff] [blame] | 336 | 2:	.long	init_thread_union+THREAD_SIZE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | 3:	.long	__bss_start | 
|  | 338 | 4:	.long	_end | 
|  | 339 | 5:	.long	start_kernel | 
| Paul Mundt | 4a6feab | 2010-04-21 12:20:42 +0900 | [diff] [blame] | 340 | 6:	.long	cpu_init | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 341 | 7:	.long	init_thread_union | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 342 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 343 | #ifdef CONFIG_PMB | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 344 | .LPMB_ADDR:		.long	PMB_ADDR | 
|  | 345 | .LPMB_DATA:		.long	PMB_DATA | 
| Matt Fleming | 035ca59 | 2010-04-25 20:18:41 +0100 | [diff] [blame] | 346 | .LPMB_DATA_MASK:	.long	PMB_PFN_MASK | PMB_V | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 347 | .LFIRST_ADDR_ENTRY:	.long	PAGE_OFFSET | PMB_V | 
|  | 348 | .LFIRST_DATA_ENTRY:	.long	__MEMORY_START | PMB_V | 
|  | 349 | .LMMUCR:		.long	MMUCR | 
| Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 350 | .LMEMORY_SIZE:		.long	__MEMORY_SIZE | 
|  | 351 | #ifdef CONFIG_UNCACHED_MAPPING | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 352 | .Lcached_to_uncached:	.long	cached_to_uncached | 
| Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 353 | .Luncached_size:	.long	uncached_size | 
| Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 354 | #endif | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 355 | #endif |