| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $ | 
|  | 2 | * ultra.S: Don't expand these all over the place... | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com) | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #include <linux/config.h> | 
|  | 8 | #include <asm/asi.h> | 
|  | 9 | #include <asm/pgtable.h> | 
|  | 10 | #include <asm/page.h> | 
|  | 11 | #include <asm/spitfire.h> | 
|  | 12 | #include <asm/mmu_context.h> | 
|  | 13 | #include <asm/pil.h> | 
|  | 14 | #include <asm/head.h> | 
|  | 15 | #include <asm/thread_info.h> | 
|  | 16 | #include <asm/cacheflush.h> | 
|  | 17 |  | 
|  | 18 | /* Basically, most of the Spitfire vs. Cheetah madness | 
|  | 19 | * has to do with the fact that Cheetah does not support | 
|  | 20 | * IMMU flushes out of the secondary context.  Someone needs | 
|  | 21 | * to throw a south lake birthday party for the folks | 
|  | 22 | * in Microelectronics who refused to fix this shit. | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | /* This file is meant to be read efficiently by the CPU, not humans. | 
|  | 26 | * Staraj sie tego nikomu nie pierdolnac... | 
|  | 27 | */ | 
|  | 28 | .text | 
|  | 29 | .align		32 | 
|  | 30 | .globl		__flush_tlb_mm | 
|  | 31 | __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | 
|  | 32 | ldxa		[%o1] ASI_DMMU, %g2 | 
|  | 33 | cmp		%g2, %o0 | 
|  | 34 | bne,pn		%icc, __spitfire_flush_tlb_mm_slow | 
|  | 35 | mov		0x50, %g3 | 
|  | 36 | stxa		%g0, [%g3] ASI_DMMU_DEMAP | 
|  | 37 | stxa		%g0, [%g3] ASI_IMMU_DEMAP | 
|  | 38 | retl | 
|  | 39 | flush		%g6 | 
|  | 40 | nop | 
|  | 41 | nop | 
|  | 42 | nop | 
|  | 43 | nop | 
|  | 44 | nop | 
|  | 45 | nop | 
|  | 46 | nop | 
|  | 47 | nop | 
|  | 48 |  | 
|  | 49 | .align		32 | 
|  | 50 | .globl		__flush_tlb_pending | 
|  | 51 | __flush_tlb_pending: | 
|  | 52 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 
|  | 53 | rdpr		%pstate, %g7 | 
|  | 54 | sllx		%o1, 3, %o1 | 
|  | 55 | andn		%g7, PSTATE_IE, %g2 | 
|  | 56 | wrpr		%g2, %pstate | 
|  | 57 | mov		SECONDARY_CONTEXT, %o4 | 
|  | 58 | ldxa		[%o4] ASI_DMMU, %g2 | 
|  | 59 | stxa		%o0, [%o4] ASI_DMMU | 
|  | 60 | 1:	sub		%o1, (1 << 3), %o1 | 
|  | 61 | ldx		[%o2 + %o1], %o3 | 
|  | 62 | andcc		%o3, 1, %g0 | 
|  | 63 | andn		%o3, 1, %o3 | 
|  | 64 | be,pn		%icc, 2f | 
|  | 65 | or		%o3, 0x10, %o3 | 
|  | 66 | stxa		%g0, [%o3] ASI_IMMU_DEMAP | 
|  | 67 | 2:	stxa		%g0, [%o3] ASI_DMMU_DEMAP | 
|  | 68 | membar		#Sync | 
|  | 69 | brnz,pt		%o1, 1b | 
|  | 70 | nop | 
|  | 71 | stxa		%g2, [%o4] ASI_DMMU | 
|  | 72 | flush		%g6 | 
|  | 73 | retl | 
|  | 74 | wrpr		%g7, 0x0, %pstate | 
|  | 75 |  | 
|  | 76 | .align		32 | 
|  | 77 | .globl		__flush_tlb_kernel_range | 
|  | 78 | __flush_tlb_kernel_range:	/* %o0=start, %o1=end */ | 
|  | 79 | cmp		%o0, %o1 | 
|  | 80 | be,pn		%xcc, 2f | 
|  | 81 | sethi		%hi(PAGE_SIZE), %o4 | 
|  | 82 | sub		%o1, %o0, %o3 | 
|  | 83 | sub		%o3, %o4, %o3 | 
|  | 84 | or		%o0, 0x20, %o0		! Nucleus | 
|  | 85 | 1:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP | 
|  | 86 | stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP | 
|  | 87 | membar		#Sync | 
|  | 88 | brnz,pt		%o3, 1b | 
|  | 89 | sub		%o3, %o4, %o3 | 
|  | 90 | 2:	retl | 
|  | 91 | flush		%g6 | 
|  | 92 |  | 
|  | 93 | __spitfire_flush_tlb_mm_slow: | 
|  | 94 | rdpr		%pstate, %g1 | 
|  | 95 | wrpr		%g1, PSTATE_IE, %pstate | 
|  | 96 | stxa		%o0, [%o1] ASI_DMMU | 
|  | 97 | stxa		%g0, [%g3] ASI_DMMU_DEMAP | 
|  | 98 | stxa		%g0, [%g3] ASI_IMMU_DEMAP | 
|  | 99 | flush		%g6 | 
|  | 100 | stxa		%g2, [%o1] ASI_DMMU | 
|  | 101 | flush		%g6 | 
|  | 102 | retl | 
|  | 103 | wrpr		%g1, 0, %pstate | 
|  | 104 |  | 
|  | 105 | /* | 
|  | 106 | * The following code flushes one page_size worth. | 
|  | 107 | */ | 
|  | 108 | #if (PAGE_SHIFT == 13) | 
|  | 109 | #define ITAG_MASK 0xfe | 
|  | 110 | #elif (PAGE_SHIFT == 16) | 
|  | 111 | #define ITAG_MASK 0x7fe | 
|  | 112 | #else | 
|  | 113 | #error unsupported PAGE_SIZE | 
|  | 114 | #endif | 
|  | 115 | .align		32 | 
|  | 116 | .globl		__flush_icache_page | 
|  | 117 | __flush_icache_page:	/* %o0 = phys_page */ | 
|  | 118 | membar		#StoreStore | 
|  | 119 | srlx		%o0, PAGE_SHIFT, %o0 | 
|  | 120 | sethi		%uhi(PAGE_OFFSET), %g1 | 
|  | 121 | sllx		%o0, PAGE_SHIFT, %o0 | 
|  | 122 | sethi		%hi(PAGE_SIZE), %g2 | 
|  | 123 | sllx		%g1, 32, %g1 | 
|  | 124 | add		%o0, %g1, %o0 | 
|  | 125 | 1:	subcc		%g2, 32, %g2 | 
|  | 126 | bne,pt		%icc, 1b | 
|  | 127 | flush		%o0 + %g2 | 
|  | 128 | retl | 
|  | 129 | nop | 
|  | 130 |  | 
|  | 131 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 132 |  | 
|  | 133 | #if (PAGE_SHIFT != 13) | 
|  | 134 | #error only page shift of 13 is supported by dcache flush | 
|  | 135 | #endif | 
|  | 136 |  | 
|  | 137 | #define DTAG_MASK 0x3 | 
|  | 138 |  | 
|  | 139 | .align		64 | 
|  | 140 | .globl		__flush_dcache_page | 
|  | 141 | __flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */ | 
|  | 142 | sethi		%uhi(PAGE_OFFSET), %g1 | 
|  | 143 | sllx		%g1, 32, %g1 | 
|  | 144 | sub		%o0, %g1, %o0 | 
|  | 145 | clr		%o4 | 
|  | 146 | srlx		%o0, 11, %o0 | 
|  | 147 | sethi		%hi(1 << 14), %o2 | 
|  | 148 | 1:	ldxa		[%o4] ASI_DCACHE_TAG, %o3	! LSU	Group | 
|  | 149 | add		%o4, (1 << 5), %o4		! IEU0 | 
|  | 150 | ldxa		[%o4] ASI_DCACHE_TAG, %g1	! LSU	Group | 
|  | 151 | add		%o4, (1 << 5), %o4		! IEU0 | 
|  | 152 | ldxa		[%o4] ASI_DCACHE_TAG, %g2	! LSU	Group	o3 available | 
|  | 153 | add		%o4, (1 << 5), %o4		! IEU0 | 
|  | 154 | andn		%o3, DTAG_MASK, %o3		! IEU1 | 
|  | 155 | ldxa		[%o4] ASI_DCACHE_TAG, %g3	! LSU	Group | 
|  | 156 | add		%o4, (1 << 5), %o4		! IEU0 | 
|  | 157 | andn		%g1, DTAG_MASK, %g1		! IEU1 | 
|  | 158 | cmp		%o0, %o3			! IEU1	Group | 
|  | 159 | be,a,pn		%xcc, dflush1			! CTI | 
|  | 160 | sub		%o4, (4 << 5), %o4		! IEU0	(Group) | 
|  | 161 | cmp		%o0, %g1			! IEU1	Group | 
|  | 162 | andn		%g2, DTAG_MASK, %g2		! IEU0 | 
|  | 163 | be,a,pn		%xcc, dflush2			! CTI | 
|  | 164 | sub		%o4, (3 << 5), %o4		! IEU0	(Group) | 
|  | 165 | cmp		%o0, %g2			! IEU1	Group | 
|  | 166 | andn		%g3, DTAG_MASK, %g3		! IEU0 | 
|  | 167 | be,a,pn		%xcc, dflush3			! CTI | 
|  | 168 | sub		%o4, (2 << 5), %o4		! IEU0	(Group) | 
|  | 169 | cmp		%o0, %g3			! IEU1	Group | 
|  | 170 | be,a,pn		%xcc, dflush4			! CTI | 
|  | 171 | sub		%o4, (1 << 5), %o4		! IEU0 | 
|  | 172 | 2:	cmp		%o4, %o2			! IEU1	Group | 
|  | 173 | bne,pt		%xcc, 1b			! CTI | 
|  | 174 | nop						! IEU0 | 
|  | 175 |  | 
|  | 176 | /* The I-cache does not snoop local stores so we | 
|  | 177 | * better flush that too when necessary. | 
|  | 178 | */ | 
|  | 179 | brnz,pt		%o1, __flush_icache_page | 
|  | 180 | sllx		%o0, 11, %o0 | 
|  | 181 | retl | 
|  | 182 | nop | 
|  | 183 |  | 
|  | 184 | dflush1:stxa		%g0, [%o4] ASI_DCACHE_TAG | 
|  | 185 | add		%o4, (1 << 5), %o4 | 
|  | 186 | dflush2:stxa		%g0, [%o4] ASI_DCACHE_TAG | 
|  | 187 | add		%o4, (1 << 5), %o4 | 
|  | 188 | dflush3:stxa		%g0, [%o4] ASI_DCACHE_TAG | 
|  | 189 | add		%o4, (1 << 5), %o4 | 
|  | 190 | dflush4:stxa		%g0, [%o4] ASI_DCACHE_TAG | 
|  | 191 | add		%o4, (1 << 5), %o4 | 
|  | 192 | membar		#Sync | 
|  | 193 | ba,pt		%xcc, 2b | 
|  | 194 | nop | 
|  | 195 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 
|  | 196 |  | 
|  | 197 | .align		32 | 
|  | 198 | __prefill_dtlb: | 
|  | 199 | rdpr		%pstate, %g7 | 
|  | 200 | wrpr		%g7, PSTATE_IE, %pstate | 
|  | 201 | mov		TLB_TAG_ACCESS, %g1 | 
|  | 202 | stxa		%o5, [%g1] ASI_DMMU | 
|  | 203 | stxa		%o2, [%g0] ASI_DTLB_DATA_IN | 
|  | 204 | flush		%g6 | 
|  | 205 | retl | 
|  | 206 | wrpr		%g7, %pstate | 
|  | 207 | __prefill_itlb: | 
|  | 208 | rdpr		%pstate, %g7 | 
|  | 209 | wrpr		%g7, PSTATE_IE, %pstate | 
|  | 210 | mov		TLB_TAG_ACCESS, %g1 | 
|  | 211 | stxa		%o5, [%g1] ASI_IMMU | 
|  | 212 | stxa		%o2, [%g0] ASI_ITLB_DATA_IN | 
|  | 213 | flush		%g6 | 
|  | 214 | retl | 
|  | 215 | wrpr		%g7, %pstate | 
|  | 216 |  | 
|  | 217 | .globl		__update_mmu_cache | 
|  | 218 | __update_mmu_cache:	/* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */ | 
|  | 219 | srlx		%o1, PAGE_SHIFT, %o1 | 
|  | 220 | andcc		%o3, FAULT_CODE_DTLB, %g0 | 
|  | 221 | sllx		%o1, PAGE_SHIFT, %o5 | 
|  | 222 | bne,pt		%xcc, __prefill_dtlb | 
|  | 223 | or		%o5, %o0, %o5 | 
|  | 224 | ba,a,pt		%xcc, __prefill_itlb | 
|  | 225 |  | 
|  | 226 | /* Cheetah specific versions, patched at boot time. | 
|  | 227 | * | 
|  | 228 | * This writes of the PRIMARY_CONTEXT register in this file are | 
|  | 229 | * safe even on Cheetah+ and later wrt. the page size fields. | 
|  | 230 | * The nucleus page size fields do not matter because we make | 
|  | 231 | * no data references, and these instructions execute out of a | 
|  | 232 | * locked I-TLB entry sitting in the fully assosciative I-TLB. | 
|  | 233 | * This sequence should also never trap. | 
|  | 234 | */ | 
|  | 235 | __cheetah_flush_tlb_mm: /* 15 insns */ | 
|  | 236 | rdpr		%pstate, %g7 | 
|  | 237 | andn		%g7, PSTATE_IE, %g2 | 
|  | 238 | wrpr		%g2, 0x0, %pstate | 
|  | 239 | wrpr		%g0, 1, %tl | 
|  | 240 | mov		PRIMARY_CONTEXT, %o2 | 
|  | 241 | mov		0x40, %g3 | 
|  | 242 | ldxa		[%o2] ASI_DMMU, %g2 | 
|  | 243 | stxa		%o0, [%o2] ASI_DMMU | 
|  | 244 | stxa		%g0, [%g3] ASI_DMMU_DEMAP | 
|  | 245 | stxa		%g0, [%g3] ASI_IMMU_DEMAP | 
|  | 246 | stxa		%g2, [%o2] ASI_DMMU | 
|  | 247 | flush		%g6 | 
|  | 248 | wrpr		%g0, 0, %tl | 
|  | 249 | retl | 
|  | 250 | wrpr		%g7, 0x0, %pstate | 
|  | 251 |  | 
|  | 252 | __cheetah_flush_tlb_pending:	/* 22 insns */ | 
|  | 253 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 
|  | 254 | rdpr		%pstate, %g7 | 
|  | 255 | sllx		%o1, 3, %o1 | 
|  | 256 | andn		%g7, PSTATE_IE, %g2 | 
|  | 257 | wrpr		%g2, 0x0, %pstate | 
|  | 258 | wrpr		%g0, 1, %tl | 
|  | 259 | mov		PRIMARY_CONTEXT, %o4 | 
|  | 260 | ldxa		[%o4] ASI_DMMU, %g2 | 
|  | 261 | stxa		%o0, [%o4] ASI_DMMU | 
|  | 262 | 1:	sub		%o1, (1 << 3), %o1 | 
|  | 263 | ldx		[%o2 + %o1], %o3 | 
|  | 264 | andcc		%o3, 1, %g0 | 
|  | 265 | be,pn		%icc, 2f | 
|  | 266 | andn		%o3, 1, %o3 | 
|  | 267 | stxa		%g0, [%o3] ASI_IMMU_DEMAP | 
|  | 268 | 2:	stxa		%g0, [%o3] ASI_DMMU_DEMAP | 
|  | 269 | brnz,pt		%o1, 1b | 
|  | 270 | membar		#Sync | 
|  | 271 | stxa		%g2, [%o4] ASI_DMMU | 
|  | 272 | flush		%g6 | 
|  | 273 | wrpr		%g0, 0, %tl | 
|  | 274 | retl | 
|  | 275 | wrpr		%g7, 0x0, %pstate | 
|  | 276 |  | 
|  | 277 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 278 | flush_dcpage_cheetah: /* 11 insns */ | 
|  | 279 | sethi		%uhi(PAGE_OFFSET), %g1 | 
|  | 280 | sllx		%g1, 32, %g1 | 
|  | 281 | sub		%o0, %g1, %o0 | 
|  | 282 | sethi		%hi(PAGE_SIZE), %o4 | 
|  | 283 | 1:	subcc		%o4, (1 << 5), %o4 | 
|  | 284 | stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE | 
|  | 285 | membar		#Sync | 
|  | 286 | bne,pt		%icc, 1b | 
|  | 287 | nop | 
|  | 288 | retl		/* I-cache flush never needed on Cheetah, see callers. */ | 
|  | 289 | nop | 
|  | 290 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 
|  | 291 |  | 
|  | 292 | cheetah_patch_one: | 
|  | 293 | 1:	lduw		[%o1], %g1 | 
|  | 294 | stw		%g1, [%o0] | 
|  | 295 | flush		%o0 | 
|  | 296 | subcc		%o2, 1, %o2 | 
|  | 297 | add		%o1, 4, %o1 | 
|  | 298 | bne,pt		%icc, 1b | 
|  | 299 | add		%o0, 4, %o0 | 
|  | 300 | retl | 
|  | 301 | nop | 
|  | 302 |  | 
|  | 303 | .globl		cheetah_patch_cachetlbops | 
|  | 304 | cheetah_patch_cachetlbops: | 
|  | 305 | save		%sp, -128, %sp | 
|  | 306 |  | 
|  | 307 | sethi		%hi(__flush_tlb_mm), %o0 | 
|  | 308 | or		%o0, %lo(__flush_tlb_mm), %o0 | 
|  | 309 | sethi		%hi(__cheetah_flush_tlb_mm), %o1 | 
|  | 310 | or		%o1, %lo(__cheetah_flush_tlb_mm), %o1 | 
|  | 311 | call		cheetah_patch_one | 
|  | 312 | mov		15, %o2 | 
|  | 313 |  | 
|  | 314 | sethi		%hi(__flush_tlb_pending), %o0 | 
|  | 315 | or		%o0, %lo(__flush_tlb_pending), %o0 | 
|  | 316 | sethi		%hi(__cheetah_flush_tlb_pending), %o1 | 
|  | 317 | or		%o1, %lo(__cheetah_flush_tlb_pending), %o1 | 
|  | 318 | call		cheetah_patch_one | 
|  | 319 | mov		22, %o2 | 
|  | 320 |  | 
|  | 321 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 322 | sethi		%hi(__flush_dcache_page), %o0 | 
|  | 323 | or		%o0, %lo(__flush_dcache_page), %o0 | 
|  | 324 | sethi		%hi(flush_dcpage_cheetah), %o1 | 
|  | 325 | or		%o1, %lo(flush_dcpage_cheetah), %o1 | 
|  | 326 | call		cheetah_patch_one | 
|  | 327 | mov		11, %o2 | 
|  | 328 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 
|  | 329 |  | 
|  | 330 | ret | 
|  | 331 | restore | 
|  | 332 |  | 
|  | 333 | #ifdef CONFIG_SMP | 
|  | 334 | /* These are all called by the slaves of a cross call, at | 
|  | 335 | * trap level 1, with interrupts fully disabled. | 
|  | 336 | * | 
|  | 337 | * Register usage: | 
|  | 338 | *   %g5	mm->context	(all tlb flushes) | 
|  | 339 | *   %g1	address arg 1	(tlb page and range flushes) | 
|  | 340 | *   %g7	address arg 2	(tlb range flush only) | 
|  | 341 | * | 
|  | 342 | *   %g6	ivector table, don't touch | 
|  | 343 | *   %g2	scratch 1 | 
|  | 344 | *   %g3	scratch 2 | 
|  | 345 | *   %g4	scratch 3 | 
|  | 346 | * | 
|  | 347 | * TODO: Make xcall TLB range flushes use the tricks above... -DaveM | 
|  | 348 | */ | 
|  | 349 | .align		32 | 
|  | 350 | .globl		xcall_flush_tlb_mm | 
|  | 351 | xcall_flush_tlb_mm: | 
|  | 352 | mov		PRIMARY_CONTEXT, %g2 | 
|  | 353 | mov		0x40, %g4 | 
|  | 354 | ldxa		[%g2] ASI_DMMU, %g3 | 
|  | 355 | stxa		%g5, [%g2] ASI_DMMU | 
|  | 356 | stxa		%g0, [%g4] ASI_DMMU_DEMAP | 
|  | 357 | stxa		%g0, [%g4] ASI_IMMU_DEMAP | 
|  | 358 | stxa		%g3, [%g2] ASI_DMMU | 
|  | 359 | retry | 
|  | 360 |  | 
|  | 361 | .globl		xcall_flush_tlb_pending | 
|  | 362 | xcall_flush_tlb_pending: | 
|  | 363 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ | 
|  | 364 | sllx		%g1, 3, %g1 | 
|  | 365 | mov		PRIMARY_CONTEXT, %g4 | 
|  | 366 | ldxa		[%g4] ASI_DMMU, %g2 | 
|  | 367 | stxa		%g5, [%g4] ASI_DMMU | 
|  | 368 | 1:	sub		%g1, (1 << 3), %g1 | 
|  | 369 | ldx		[%g7 + %g1], %g5 | 
|  | 370 | andcc		%g5, 0x1, %g0 | 
|  | 371 | be,pn		%icc, 2f | 
|  | 372 |  | 
|  | 373 | andn		%g5, 0x1, %g5 | 
|  | 374 | stxa		%g0, [%g5] ASI_IMMU_DEMAP | 
|  | 375 | 2:	stxa		%g0, [%g5] ASI_DMMU_DEMAP | 
|  | 376 | membar		#Sync | 
|  | 377 | brnz,pt		%g1, 1b | 
|  | 378 | nop | 
|  | 379 | stxa		%g2, [%g4] ASI_DMMU | 
|  | 380 | retry | 
|  | 381 |  | 
|  | 382 | .globl		xcall_flush_tlb_kernel_range | 
|  | 383 | xcall_flush_tlb_kernel_range: | 
|  | 384 | sethi		%hi(PAGE_SIZE - 1), %g2 | 
|  | 385 | or		%g2, %lo(PAGE_SIZE - 1), %g2 | 
|  | 386 | andn		%g1, %g2, %g1 | 
|  | 387 | andn		%g7, %g2, %g7 | 
|  | 388 | sub		%g7, %g1, %g3 | 
|  | 389 | add		%g2, 1, %g2 | 
|  | 390 | sub		%g3, %g2, %g3 | 
|  | 391 | or		%g1, 0x20, %g1		! Nucleus | 
|  | 392 | 1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP | 
|  | 393 | stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP | 
|  | 394 | membar		#Sync | 
|  | 395 | brnz,pt		%g3, 1b | 
|  | 396 | sub		%g3, %g2, %g3 | 
|  | 397 | retry | 
|  | 398 | nop | 
|  | 399 | nop | 
|  | 400 |  | 
|  | 401 | /* This runs in a very controlled environment, so we do | 
|  | 402 | * not need to worry about BH races etc. | 
|  | 403 | */ | 
|  | 404 | .globl		xcall_sync_tick | 
|  | 405 | xcall_sync_tick: | 
|  | 406 | rdpr		%pstate, %g2 | 
|  | 407 | wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate | 
|  | 408 | rdpr		%pil, %g2 | 
|  | 409 | wrpr		%g0, 15, %pil | 
|  | 410 | sethi		%hi(109f), %g7 | 
|  | 411 | b,pt		%xcc, etrap_irq | 
|  | 412 | 109:	 or		%g7, %lo(109b), %g7 | 
|  | 413 | call		smp_synchronize_tick_client | 
|  | 414 | nop | 
|  | 415 | clr		%l6 | 
|  | 416 | b		rtrap_xcall | 
|  | 417 | ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | 
|  | 418 |  | 
|  | 419 | /* NOTE: This is SPECIAL!!  We do etrap/rtrap however | 
|  | 420 | *       we choose to deal with the "BH's run with | 
|  | 421 | *       %pil==15" problem (described in asm/pil.h) | 
|  | 422 | *       by just invoking rtrap directly past where | 
|  | 423 | *       BH's are checked for. | 
|  | 424 | * | 
|  | 425 | *       We do it like this because we do not want %pil==15 | 
|  | 426 | *       lockups to prevent regs being reported. | 
|  | 427 | */ | 
|  | 428 | .globl		xcall_report_regs | 
|  | 429 | xcall_report_regs: | 
|  | 430 | rdpr		%pstate, %g2 | 
|  | 431 | wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate | 
|  | 432 | rdpr		%pil, %g2 | 
|  | 433 | wrpr		%g0, 15, %pil | 
|  | 434 | sethi		%hi(109f), %g7 | 
|  | 435 | b,pt		%xcc, etrap_irq | 
|  | 436 | 109:	 or		%g7, %lo(109b), %g7 | 
|  | 437 | call		__show_regs | 
|  | 438 | add		%sp, PTREGS_OFF, %o0 | 
|  | 439 | clr		%l6 | 
|  | 440 | /* Has to be a non-v9 branch due to the large distance. */ | 
|  | 441 | b		rtrap_xcall | 
|  | 442 | ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | 
|  | 443 |  | 
|  | 444 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 445 | .align		32 | 
|  | 446 | .globl		xcall_flush_dcache_page_cheetah | 
|  | 447 | xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ | 
|  | 448 | sethi		%hi(PAGE_SIZE), %g3 | 
|  | 449 | 1:	subcc		%g3, (1 << 5), %g3 | 
|  | 450 | stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE | 
|  | 451 | membar		#Sync | 
|  | 452 | bne,pt		%icc, 1b | 
|  | 453 | nop | 
|  | 454 | retry | 
|  | 455 | nop | 
|  | 456 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 
|  | 457 |  | 
|  | 458 | .globl		xcall_flush_dcache_page_spitfire | 
|  | 459 | xcall_flush_dcache_page_spitfire: /* %g1 == physical page address | 
|  | 460 | %g7 == kernel page virtual address | 
|  | 461 | %g5 == (page->mapping != NULL)  */ | 
|  | 462 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 463 | srlx		%g1, (13 - 2), %g1	! Form tag comparitor | 
|  | 464 | sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K | 
|  | 465 | sub		%g3, (1 << 5), %g3	! D$ linesize == 32 | 
|  | 466 | 1:	ldxa		[%g3] ASI_DCACHE_TAG, %g2 | 
|  | 467 | andcc		%g2, 0x3, %g0 | 
|  | 468 | be,pn		%xcc, 2f | 
|  | 469 | andn		%g2, 0x3, %g2 | 
|  | 470 | cmp		%g2, %g1 | 
|  | 471 |  | 
|  | 472 | bne,pt		%xcc, 2f | 
|  | 473 | nop | 
|  | 474 | stxa		%g0, [%g3] ASI_DCACHE_TAG | 
|  | 475 | membar		#Sync | 
|  | 476 | 2:	cmp		%g3, 0 | 
|  | 477 | bne,pt		%xcc, 1b | 
|  | 478 | sub		%g3, (1 << 5), %g3 | 
|  | 479 |  | 
|  | 480 | brz,pn		%g5, 2f | 
|  | 481 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 
|  | 482 | sethi		%hi(PAGE_SIZE), %g3 | 
|  | 483 |  | 
|  | 484 | 1:	flush		%g7 | 
|  | 485 | subcc		%g3, (1 << 5), %g3 | 
|  | 486 | bne,pt		%icc, 1b | 
|  | 487 | add		%g7, (1 << 5), %g7 | 
|  | 488 |  | 
|  | 489 | 2:	retry | 
|  | 490 | nop | 
|  | 491 | nop | 
|  | 492 |  | 
|  | 493 | .globl		xcall_promstop | 
|  | 494 | xcall_promstop: | 
|  | 495 | rdpr		%pstate, %g2 | 
|  | 496 | wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate | 
|  | 497 | rdpr		%pil, %g2 | 
|  | 498 | wrpr		%g0, 15, %pil | 
|  | 499 | sethi		%hi(109f), %g7 | 
|  | 500 | b,pt		%xcc, etrap_irq | 
|  | 501 | 109:	 or		%g7, %lo(109b), %g7 | 
|  | 502 | flushw | 
|  | 503 | call		prom_stopself | 
|  | 504 | nop | 
|  | 505 | /* We should not return, just spin if we do... */ | 
|  | 506 | 1:	b,a,pt		%xcc, 1b | 
|  | 507 | nop | 
|  | 508 |  | 
|  | 509 | .data | 
|  | 510 |  | 
|  | 511 | errata32_hwbug: | 
|  | 512 | .xword	0 | 
|  | 513 |  | 
|  | 514 | .text | 
|  | 515 |  | 
|  | 516 | /* These two are not performance critical... */ | 
|  | 517 | .globl		xcall_flush_tlb_all_spitfire | 
|  | 518 | xcall_flush_tlb_all_spitfire: | 
|  | 519 | /* Spitfire Errata #32 workaround. */ | 
|  | 520 | sethi		%hi(errata32_hwbug), %g4 | 
|  | 521 | stx		%g0, [%g4 + %lo(errata32_hwbug)] | 
|  | 522 |  | 
|  | 523 | clr		%g2 | 
|  | 524 | clr		%g3 | 
|  | 525 | 1:	ldxa		[%g3] ASI_DTLB_DATA_ACCESS, %g4 | 
|  | 526 | and		%g4, _PAGE_L, %g5 | 
|  | 527 | brnz,pn		%g5, 2f | 
|  | 528 | mov		TLB_TAG_ACCESS, %g7 | 
|  | 529 |  | 
|  | 530 | stxa		%g0, [%g7] ASI_DMMU | 
|  | 531 | membar		#Sync | 
|  | 532 | stxa		%g0, [%g3] ASI_DTLB_DATA_ACCESS | 
|  | 533 | membar		#Sync | 
|  | 534 |  | 
|  | 535 | /* Spitfire Errata #32 workaround. */ | 
|  | 536 | sethi		%hi(errata32_hwbug), %g4 | 
|  | 537 | stx		%g0, [%g4 + %lo(errata32_hwbug)] | 
|  | 538 |  | 
|  | 539 | 2:	ldxa		[%g3] ASI_ITLB_DATA_ACCESS, %g4 | 
|  | 540 | and		%g4, _PAGE_L, %g5 | 
|  | 541 | brnz,pn		%g5, 2f | 
|  | 542 | mov		TLB_TAG_ACCESS, %g7 | 
|  | 543 |  | 
|  | 544 | stxa		%g0, [%g7] ASI_IMMU | 
|  | 545 | membar		#Sync | 
|  | 546 | stxa		%g0, [%g3] ASI_ITLB_DATA_ACCESS | 
|  | 547 | membar		#Sync | 
|  | 548 |  | 
|  | 549 | /* Spitfire Errata #32 workaround. */ | 
|  | 550 | sethi		%hi(errata32_hwbug), %g4 | 
|  | 551 | stx		%g0, [%g4 + %lo(errata32_hwbug)] | 
|  | 552 |  | 
|  | 553 | 2:	add		%g2, 1, %g2 | 
|  | 554 | cmp		%g2, SPITFIRE_HIGHEST_LOCKED_TLBENT | 
|  | 555 | ble,pt		%icc, 1b | 
|  | 556 | sll		%g2, 3, %g3 | 
|  | 557 | flush		%g6 | 
|  | 558 | retry | 
|  | 559 |  | 
|  | 560 | .globl		xcall_flush_tlb_all_cheetah | 
|  | 561 | xcall_flush_tlb_all_cheetah: | 
|  | 562 | mov		0x80, %g2 | 
|  | 563 | stxa		%g0, [%g2] ASI_DMMU_DEMAP | 
|  | 564 | stxa		%g0, [%g2] ASI_IMMU_DEMAP | 
|  | 565 | retry | 
|  | 566 |  | 
|  | 567 | /* These just get rescheduled to PIL vectors. */ | 
|  | 568 | .globl		xcall_call_function | 
|  | 569 | xcall_call_function: | 
|  | 570 | wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint | 
|  | 571 | retry | 
|  | 572 |  | 
|  | 573 | .globl		xcall_receive_signal | 
|  | 574 | xcall_receive_signal: | 
|  | 575 | wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint | 
|  | 576 | retry | 
|  | 577 |  | 
|  | 578 | .globl		xcall_capture | 
|  | 579 | xcall_capture: | 
|  | 580 | wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint | 
|  | 581 | retry | 
|  | 582 |  | 
|  | 583 | #endif /* CONFIG_SMP */ |