| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * Low-level SLB routines | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | 
 | 5 |  * | 
 | 6 |  * Based on earlier C version: | 
 | 7 |  * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | 
 | 8 |  *    Copyright (c) 2001 Dave Engebretsen | 
 | 9 |  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | 
 | 10 |  * | 
 | 11 |  *  This program is free software; you can redistribute it and/or | 
 | 12 |  *  modify it under the terms of the GNU General Public License | 
 | 13 |  *  as published by the Free Software Foundation; either version | 
 | 14 |  *  2 of the License, or (at your option) any later version. | 
 | 15 |  */ | 
 | 16 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/processor.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/ppc_asm.h> | 
| Sam Ravnborg | 0013a85 | 2005-09-09 20:57:26 +0200 | [diff] [blame] | 19 | #include <asm/asm-offsets.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/cputable.h> | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 21 | #include <asm/page.h> | 
 | 22 | #include <asm/mmu.h> | 
 | 23 | #include <asm/pgtable.h> | 
| Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 24 | #include <asm/firmware.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 26 | /* void slb_allocate_realmode(unsigned long ea); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 |  * | 
 | 28 |  * Create an SLB entry for the given EA (user or kernel). | 
 | 29 |  * 	r3 = faulting address, r13 = PACA | 
 | 30 |  *	r9, r10, r11 are clobbered by this function | 
 | 31 |  * No other registers are examined or changed. | 
 | 32 |  */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 33 | _GLOBAL(slb_allocate_realmode) | 
 | 34 | 	/* r3 = faulting address */ | 
 | 35 |  | 
 | 36 | 	srdi	r9,r3,60		/* get region */ | 
 | 37 | 	srdi	r10,r3,28		/* get esid */ | 
| Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 38 | 	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 39 |  | 
| Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 40 | 	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 41 | 	blt	cr7,0f			/* user or kernel? */ | 
 | 42 |  | 
 | 43 | 	/* kernel address: proto-VSID = ESID */ | 
 | 44 | 	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but | 
 | 45 | 	 * this code will generate the protoVSID 0xfffffffff for the | 
 | 46 | 	 * top segment.  That's ok, the scramble below will translate | 
 | 47 | 	 * it to VSID 0, which is reserved as a bad VSID - one which | 
 | 48 | 	 * will never have any pages in it.  */ | 
 | 49 |  | 
| Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 50 | 	/* Check if hitting the linear mapping or some other kernel space | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 51 | 	*/ | 
 | 52 | 	bne	cr7,1f | 
 | 53 |  | 
 | 54 | 	/* Linear mapping encoding bits, the "li" instruction below will | 
 | 55 | 	 * be patched by the kernel at boot | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | 	 */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 57 | _GLOBAL(slb_miss_kernel_load_linear) | 
 | 58 | 	li	r11,0 | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 59 | BEGIN_FTR_SECTION | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 60 | 	b	slb_finish_load | 
| Matt Evans | 44ae3ab | 2011-04-06 19:48:50 +0000 | [diff] [blame] | 61 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 62 | 	b	slb_finish_load_1T | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 63 |  | 
| Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 64 | 1: | 
 | 65 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
 | 66 | 	/* Check virtual memmap region. To be patches at kernel boot */ | 
 | 67 | 	cmpldi	cr0,r9,0xf | 
 | 68 | 	bne	1f | 
 | 69 | _GLOBAL(slb_miss_kernel_load_vmemmap) | 
 | 70 | 	li	r11,0 | 
 | 71 | 	b	6f | 
 | 72 | 1: | 
 | 73 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 
 | 74 |  | 
| Benjamin Herrenschmidt | 8d8997f | 2009-10-12 20:43:47 +0000 | [diff] [blame] | 75 | 	/* vmalloc mapping gets the encoding from the PACA as the mapping | 
 | 76 | 	 * can be demoted from 64K -> 4K dynamically on some machines | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 77 | 	 */ | 
| Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 78 | 	clrldi	r11,r10,48 | 
 | 79 | 	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1 | 
 | 80 | 	bgt	5f | 
 | 81 | 	lhz	r11,PACAVMALLOCSLLP(r13) | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 82 | 	b	6f | 
| Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 83 | 5: | 
| Benjamin Herrenschmidt | 8d8997f | 2009-10-12 20:43:47 +0000 | [diff] [blame] | 84 | 	/* IO mapping */ | 
 | 85 | 	_GLOBAL(slb_miss_kernel_load_io) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 86 | 	li	r11,0 | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 87 | 6: | 
 | 88 | BEGIN_FTR_SECTION | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 89 | 	b	slb_finish_load | 
| Matt Evans | 44ae3ab | 2011-04-06 19:48:50 +0000 | [diff] [blame] | 90 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 91 | 	b	slb_finish_load_1T | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 92 |  | 
 | 93 | 0:	/* user address: proto-VSID = context << 15 | ESID. First check | 
 | 94 | 	 * if the address is within the boundaries of the user region | 
 | 95 | 	 */ | 
 | 96 | 	srdi.	r9,r10,USER_ESID_BITS | 
 | 97 | 	bne-	8f			/* invalid ea bits set */ | 
 | 98 |  | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 99 |  | 
 | 100 | 	/* when using slices, we extract the psize off the slice bitmaps | 
 | 101 | 	 * and then we need to get the sllp encoding off the mmu_psize_defs | 
 | 102 | 	 * array. | 
 | 103 | 	 * | 
 | 104 | 	 * XXX This is a bit inefficient especially for the normal case, | 
 | 105 | 	 * so we should try to implement a fast path for the standard page | 
 | 106 | 	 * size using the old sllp value so we avoid the array. We cannot | 
 | 107 | 	 * really do dynamic patching unfortunately as processes might flip | 
 | 108 | 	 * between 4k and 64k standard page size | 
 | 109 | 	 */ | 
 | 110 | #ifdef CONFIG_PPC_MM_SLICES | 
| David Gibson | 7d24f0b | 2005-11-07 00:57:52 -0800 | [diff] [blame] | 111 | 	cmpldi	r10,16 | 
 | 112 |  | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 113 | 	/* Get the slice index * 4 in r11 and matching slice size mask in r9 */ | 
 | 114 | 	ld	r9,PACALOWSLICESPSIZE(r13) | 
 | 115 | 	sldi	r11,r10,2 | 
| David Gibson | 7d24f0b | 2005-11-07 00:57:52 -0800 | [diff] [blame] | 116 | 	blt	5f | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 117 | 	ld	r9,PACAHIGHSLICEPSIZE(r13) | 
 | 118 | 	srdi	r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2) | 
 | 119 | 	andi.	r11,r11,0x3c | 
| David Gibson | 7d24f0b | 2005-11-07 00:57:52 -0800 | [diff] [blame] | 120 |  | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 121 | 5:	/* Extract the psize and multiply to get an array offset */ | 
 | 122 | 	srd	r9,r9,r11 | 
 | 123 | 	andi.	r9,r9,0xf | 
 | 124 | 	mulli	r9,r9,MMUPSIZEDEFSIZE | 
| David Gibson | 7d24f0b | 2005-11-07 00:57:52 -0800 | [diff] [blame] | 125 |  | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 126 | 	/* Now get to the array and obtain the sllp | 
 | 127 | 	 */ | 
 | 128 | 	ld	r11,PACATOC(r13) | 
 | 129 | 	ld	r11,mmu_psize_defs@got(r11) | 
 | 130 | 	add	r11,r11,r9 | 
 | 131 | 	ld	r11,MMUPSIZESLLP(r11) | 
 | 132 | 	ori	r11,r11,SLB_VSID_USER | 
 | 133 | #else | 
 | 134 | 	/* paca context sllp already contains the SLB_VSID_USER bits */ | 
| Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 135 | 	lhz	r11,PACACONTEXTSLLP(r13) | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 136 | #endif /* CONFIG_PPC_MM_SLICES */ | 
 | 137 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 138 | 	ld	r9,PACACONTEXTID(r13) | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 139 | BEGIN_FTR_SECTION | 
 | 140 | 	cmpldi	r10,0x1000 | 
| Matt Evans | 44ae3ab | 2011-04-06 19:48:50 +0000 | [diff] [blame] | 141 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 142 | 	rldimi	r10,r9,USER_ESID_BITS,0 | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 143 | BEGIN_FTR_SECTION | 
 | 144 | 	bge	slb_finish_load_1T | 
| Matt Evans | 44ae3ab | 2011-04-06 19:48:50 +0000 | [diff] [blame] | 145 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 146 | 	b	slb_finish_load | 
 | 147 |  | 
 | 148 | 8:	/* invalid EA */ | 
 | 149 | 	li	r10,0			/* BAD_VSID */ | 
 | 150 | 	li	r11,SLB_VSID_USER	/* flags don't much matter */ | 
 | 151 | 	b	slb_finish_load | 
 | 152 |  | 
 | 153 | #ifdef __DISABLED__ | 
 | 154 |  | 
 | 155 | /* void slb_allocate_user(unsigned long ea); | 
 | 156 |  * | 
 | 157 |  * Create an SLB entry for the given EA (user or kernel). | 
 | 158 |  * 	r3 = faulting address, r13 = PACA | 
 | 159 |  *	r9, r10, r11 are clobbered by this function | 
 | 160 |  * No other registers are examined or changed. | 
 | 161 |  * | 
 | 162 |  * It is called with translation enabled in order to be able to walk the | 
 | 163 |  * page tables. This is not currently used. | 
 | 164 |  */ | 
 | 165 | _GLOBAL(slb_allocate_user) | 
 | 166 | 	/* r3 = faulting address */ | 
 | 167 | 	srdi	r10,r3,28		/* get esid */ | 
 | 168 |  | 
 | 169 | 	crset	4*cr7+lt		/* set "user" flag for later */ | 
 | 170 |  | 
 | 171 | 	/* check if we fit in the range covered by the pagetables*/ | 
 | 172 | 	srdi.	r9,r3,PGTABLE_EADDR_SIZE | 
 | 173 | 	crnot	4*cr0+eq,4*cr0+eq | 
 | 174 | 	beqlr | 
 | 175 |  | 
 | 176 | 	/* now we need to get to the page tables in order to get the page | 
 | 177 | 	 * size encoding from the PMD. In the future, we'll be able to deal | 
 | 178 | 	 * with 1T segments too by getting the encoding from the PGD instead | 
 | 179 | 	 */ | 
 | 180 | 	ld	r9,PACAPGDIR(r13) | 
 | 181 | 	cmpldi	cr0,r9,0 | 
 | 182 | 	beqlr | 
 | 183 | 	rlwinm	r11,r10,8,25,28 | 
 | 184 | 	ldx	r9,r9,r11		/* get pgd_t */ | 
 | 185 | 	cmpldi	cr0,r9,0 | 
 | 186 | 	beqlr | 
 | 187 | 	rlwinm	r11,r10,3,17,28 | 
 | 188 | 	ldx	r9,r9,r11		/* get pmd_t */ | 
 | 189 | 	cmpldi	cr0,r9,0 | 
 | 190 | 	beqlr | 
 | 191 |  | 
 | 192 | 	/* build vsid flags */ | 
 | 193 | 	andi.	r11,r9,SLB_VSID_LLP | 
 | 194 | 	ori	r11,r11,SLB_VSID_USER | 
 | 195 |  | 
 | 196 | 	/* get context to calculate proto-VSID */ | 
 | 197 | 	ld	r9,PACACONTEXTID(r13) | 
 | 198 | 	rldimi	r10,r9,USER_ESID_BITS,0 | 
 | 199 |  | 
 | 200 | 	/* fall through slb_finish_load */ | 
 | 201 |  | 
 | 202 | #endif /* __DISABLED__ */ | 
 | 203 |  | 
 | 204 |  | 
 | 205 | /* | 
 | 206 |  * Finish loading of an SLB entry and return | 
 | 207 |  * | 
| Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 208 |  * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 209 |  */ | 
 | 210 | slb_finish_load: | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 211 | 	ASM_VSID_SCRAMBLE(r10,r9,256M) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 212 | 	rldimi	r11,r10,SLB_VSID_SHIFT,16	/* combine VSID and flags */ | 
 | 213 |  | 
 | 214 | 	/* r3 = EA, r11 = VSID data */ | 
 | 215 | 	/* | 
 | 216 | 	 * Find a slot, round robin. Previously we tried to find a | 
 | 217 | 	 * free slot first but that took too long. Unfortunately we | 
 | 218 |  	 * dont have any LRU information to help us choose a slot. | 
 | 219 |  	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | #ifdef CONFIG_PPC_ISERIES | 
| Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 221 | BEGIN_FW_FTR_SECTION | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | 	/* | 
 | 223 | 	 * On iSeries, the "bolted" stack segment can be cast out on | 
 | 224 | 	 * shared processor switch so we need to check for a miss on | 
 | 225 | 	 * it and restore it to the right slot. | 
 | 226 | 	 */ | 
 | 227 | 	ld	r9,PACAKSAVE(r13) | 
 | 228 | 	clrrdi	r9,r9,28 | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 229 | 	clrrdi	r3,r3,28 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | 	li	r10,SLB_NUM_BOLTED-1	/* Stack goes in last bolted slot */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 231 | 	cmpld	r9,r3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | 	beq	3f | 
| Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 233 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | #endif /* CONFIG_PPC_ISERIES */ | 
 | 235 |  | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 236 | 7:	ld	r10,PACASTABRR(r13) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 	addi	r10,r10,1 | 
| Michael Neuling | 584f8b7 | 2007-12-06 17:24:48 +1100 | [diff] [blame] | 238 | 	/* This gets soft patched on boot. */ | 
 | 239 | _GLOBAL(slb_compare_rr_to_size) | 
 | 240 | 	cmpldi	r10,0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 |  | 
 | 242 | 	blt+	4f | 
 | 243 | 	li	r10,SLB_NUM_BOLTED | 
 | 244 |  | 
 | 245 | 4: | 
 | 246 | 	std	r10,PACASTABRR(r13) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 247 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | 3: | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 249 | 	rldimi	r3,r10,0,36		/* r3= EA[0:35] | entry */ | 
 | 250 | 	oris	r10,r3,SLB_ESID_V@h	/* r3 |= SLB_ESID_V */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 252 | 	/* r3 = ESID data, r11 = VSID data */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 |  | 
 | 254 | 	/* | 
 | 255 | 	 * No need for an isync before or after this slbmte. The exception | 
 | 256 | 	 * we enter with and the rfid we exit with are context synchronizing. | 
 | 257 | 	 */ | 
 | 258 | 	slbmte	r11,r10 | 
 | 259 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 260 | 	/* we're done for kernel addresses */ | 
 | 261 | 	crclr	4*cr0+eq		/* set result to "success" */ | 
 | 262 | 	bgelr	cr7 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 |  | 
 | 264 | 	/* Update the slb cache */ | 
 | 265 | 	lhz	r3,PACASLBCACHEPTR(r13)	/* offset = paca->slb_cache_ptr */ | 
 | 266 | 	cmpldi	r3,SLB_CACHE_ENTRIES | 
 | 267 | 	bge	1f | 
 | 268 |  | 
 | 269 | 	/* still room in the slb cache */ | 
 | 270 | 	sldi	r11,r3,1		/* r11 = offset * sizeof(u16) */ | 
 | 271 | 	rldicl	r10,r10,36,28		/* get low 16 bits of the ESID */ | 
 | 272 | 	add	r11,r11,r13		/* r11 = (u16 *)paca + offset */ | 
 | 273 | 	sth	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */ | 
 | 274 | 	addi	r3,r3,1			/* offset++ */ | 
 | 275 | 	b	2f | 
 | 276 | 1:					/* offset >= SLB_CACHE_ENTRIES */ | 
 | 277 | 	li	r3,SLB_CACHE_ENTRIES+1 | 
 | 278 | 2: | 
 | 279 | 	sth	r3,PACASLBCACHEPTR(r13)	/* paca->slb_cache_ptr = offset */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 280 | 	crclr	4*cr0+eq		/* set result to "success" */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | 	blr | 
 | 282 |  | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 283 | /* | 
 | 284 |  * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | 
 | 285 |  * We assume legacy iSeries will never have 1T segments. | 
 | 286 |  * | 
 | 287 |  * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | 
 | 288 |  */ | 
 | 289 | slb_finish_load_1T: | 
 | 290 | 	srdi	r10,r10,40-28		/* get 1T ESID */ | 
 | 291 | 	ASM_VSID_SCRAMBLE(r10,r9,1T) | 
 | 292 | 	rldimi	r11,r10,SLB_VSID_SHIFT_1T,16	/* combine VSID and flags */ | 
 | 293 | 	li	r10,MMU_SEGSIZE_1T | 
 | 294 | 	rldimi	r11,r10,SLB_VSID_SSIZE_SHIFT,0	/* insert segment size */ | 
 | 295 |  | 
 | 296 | 	/* r3 = EA, r11 = VSID data */ | 
 | 297 | 	clrrdi	r3,r3,SID_SHIFT_1T	/* clear out non-ESID bits */ | 
 | 298 | 	b	7b | 
 | 299 |  |