| Xiantao Zhang | 7fc86bd | 2008-04-01 14:57:09 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * arch/ia64/vmx/optvfault.S | 
 | 3 |  * optimize virtualization fault handler | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2006 Intel Co | 
 | 6 |  *	Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <asm/asmmacro.h> | 
 | 10 | #include <asm/processor.h> | 
 | 11 |  | 
 | 12 | #include "vti.h" | 
 | 13 | #include "asm-offsets.h" | 
 | 14 |  | 
 | 15 | #define ACCE_MOV_FROM_AR | 
 | 16 | #define ACCE_MOV_FROM_RR | 
 | 17 | #define ACCE_MOV_TO_RR | 
 | 18 | #define ACCE_RSM | 
 | 19 | #define ACCE_SSM | 
 | 20 | #define ACCE_MOV_TO_PSR | 
 | 21 | #define ACCE_THASH | 
 | 22 |  | 
 | 23 | //mov r1=ar3 | 
 | 24 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) | 
 | 25 | #ifndef ACCE_MOV_FROM_AR | 
 | 26 | 	br.many kvm_virtualization_fault_back | 
 | 27 | #endif | 
 | 28 | 	add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 | 
 | 29 | 	add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 | 
 | 30 | 	extr.u r17=r25,6,7 | 
 | 31 | 	;; | 
 | 32 | 	ld8 r18=[r18] | 
 | 33 | 	mov r19=ar.itc | 
 | 34 | 	mov r24=b0 | 
 | 35 | 	;; | 
 | 36 | 	add r19=r19,r18 | 
 | 37 | 	addl r20=@gprel(asm_mov_to_reg),gp | 
 | 38 | 	;; | 
 | 39 | 	st8 [r16] = r19 | 
 | 40 | 	adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 | 
 | 41 | 	shladd r17=r17,4,r20 | 
 | 42 | 	;; | 
 | 43 | 	mov b0=r17 | 
 | 44 | 	br.sptk.few b0 | 
 | 45 | 	;; | 
 | 46 | END(kvm_asm_mov_from_ar) | 
 | 47 |  | 
 | 48 |  | 
 | 49 | // mov r1=rr[r3] | 
 | 50 | GLOBAL_ENTRY(kvm_asm_mov_from_rr) | 
 | 51 | #ifndef ACCE_MOV_FROM_RR | 
 | 52 | 	br.many kvm_virtualization_fault_back | 
 | 53 | #endif | 
 | 54 | 	extr.u r16=r25,20,7 | 
 | 55 | 	extr.u r17=r25,6,7 | 
 | 56 | 	addl r20=@gprel(asm_mov_from_reg),gp | 
 | 57 | 	;; | 
 | 58 | 	adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20 | 
 | 59 | 	shladd r16=r16,4,r20 | 
 | 60 | 	mov r24=b0 | 
 | 61 | 	;; | 
 | 62 | 	add r27=VMM_VCPU_VRR0_OFFSET,r21 | 
 | 63 | 	mov b0=r16 | 
 | 64 | 	br.many b0 | 
 | 65 | 	;; | 
 | 66 | kvm_asm_mov_from_rr_back_1: | 
 | 67 | 	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | 
 | 68 | 	adds r22=asm_mov_to_reg-asm_mov_from_reg,r20 | 
 | 69 | 	shr.u r26=r19,61 | 
 | 70 | 	;; | 
 | 71 | 	shladd r17=r17,4,r22 | 
 | 72 | 	shladd r27=r26,3,r27 | 
 | 73 | 	;; | 
 | 74 | 	ld8 r19=[r27] | 
 | 75 | 	mov b0=r17 | 
 | 76 | 	br.many b0 | 
 | 77 | END(kvm_asm_mov_from_rr) | 
 | 78 |  | 
 | 79 |  | 
 | 80 | // mov rr[r3]=r2 | 
 | 81 | GLOBAL_ENTRY(kvm_asm_mov_to_rr) | 
 | 82 | #ifndef ACCE_MOV_TO_RR | 
 | 83 | 	br.many kvm_virtualization_fault_back | 
 | 84 | #endif | 
 | 85 | 	extr.u r16=r25,20,7 | 
 | 86 | 	extr.u r17=r25,13,7 | 
 | 87 | 	addl r20=@gprel(asm_mov_from_reg),gp | 
 | 88 | 	;; | 
 | 89 | 	adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20 | 
 | 90 | 	shladd r16=r16,4,r20 | 
 | 91 | 	mov r22=b0 | 
 | 92 | 	;; | 
 | 93 | 	add r27=VMM_VCPU_VRR0_OFFSET,r21 | 
 | 94 | 	mov b0=r16 | 
 | 95 | 	br.many b0 | 
 | 96 | 	;; | 
 | 97 | kvm_asm_mov_to_rr_back_1: | 
 | 98 | 	adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20 | 
 | 99 | 	shr.u r23=r19,61 | 
 | 100 | 	shladd r17=r17,4,r20 | 
 | 101 | 	;; | 
 | 102 | 	//if rr6, go back | 
 | 103 | 	cmp.eq p6,p0=6,r23 | 
 | 104 | 	mov b0=r22 | 
 | 105 | 	(p6) br.cond.dpnt.many kvm_virtualization_fault_back | 
 | 106 | 	;; | 
 | 107 | 	mov r28=r19 | 
 | 108 | 	mov b0=r17 | 
 | 109 | 	br.many b0 | 
 | 110 | kvm_asm_mov_to_rr_back_2: | 
 | 111 | 	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | 
 | 112 | 	shladd r27=r23,3,r27 | 
 | 113 | 	;; // vrr.rid<<4 |0xe | 
 | 114 | 	st8 [r27]=r19 | 
 | 115 | 	mov b0=r30 | 
 | 116 | 	;; | 
 | 117 | 	extr.u r16=r19,8,26 | 
 | 118 | 	extr.u r18 =r19,2,6 | 
 | 119 | 	mov r17 =0xe | 
 | 120 | 	;; | 
 | 121 | 	shladd r16 = r16, 4, r17 | 
 | 122 | 	extr.u r19 =r19,0,8 | 
 | 123 | 	;; | 
 | 124 | 	shl r16 = r16,8 | 
 | 125 | 	;; | 
 | 126 | 	add r19 = r19, r16 | 
 | 127 | 	;; //set ve 1 | 
 | 128 | 	dep r19=-1,r19,0,1 | 
 | 129 | 	cmp.lt p6,p0=14,r18 | 
 | 130 | 	;; | 
 | 131 | 	(p6) mov r18=14 | 
 | 132 | 	;; | 
 | 133 | 	(p6) dep r19=r18,r19,2,6 | 
 | 134 | 	;; | 
 | 135 | 	cmp.eq p6,p0=0,r23 | 
 | 136 | 	;; | 
 | 137 | 	cmp.eq.or p6,p0=4,r23 | 
 | 138 | 	;; | 
 | 139 | 	adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | 
 | 140 | 	(p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | 
 | 141 | 	;; | 
 | 142 | 	ld4 r16=[r16] | 
 | 143 | 	cmp.eq p7,p0=r0,r0 | 
 | 144 | 	(p6) shladd r17=r23,1,r17 | 
 | 145 | 	;; | 
 | 146 | 	(p6) st8 [r17]=r19 | 
 | 147 | 	(p6) tbit.nz p6,p7=r16,0 | 
 | 148 | 	;; | 
 | 149 | 	(p7) mov rr[r28]=r19 | 
 | 150 | 	mov r24=r22 | 
 | 151 | 	br.many b0 | 
 | 152 | END(kvm_asm_mov_to_rr) | 
 | 153 |  | 
 | 154 |  | 
 | 155 | //rsm | 
 | 156 | GLOBAL_ENTRY(kvm_asm_rsm) | 
 | 157 | #ifndef ACCE_RSM | 
 | 158 | 	br.many kvm_virtualization_fault_back | 
 | 159 | #endif | 
 | 160 | 	add r16=VMM_VPD_BASE_OFFSET,r21 | 
 | 161 | 	extr.u r26=r25,6,21 | 
 | 162 | 	extr.u r27=r25,31,2 | 
 | 163 | 	;; | 
 | 164 | 	ld8 r16=[r16] | 
 | 165 | 	extr.u r28=r25,36,1 | 
 | 166 | 	dep r26=r27,r26,21,2 | 
 | 167 | 	;; | 
 | 168 | 	add r17=VPD_VPSR_START_OFFSET,r16 | 
 | 169 | 	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | 
 | 170 | 	//r26 is imm24 | 
 | 171 | 	dep r26=r28,r26,23,1 | 
 | 172 | 	;; | 
 | 173 | 	ld8 r18=[r17] | 
 | 174 | 	movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI | 
 | 175 | 	ld4 r23=[r22] | 
 | 176 | 	sub r27=-1,r26 | 
 | 177 | 	mov r24=b0 | 
 | 178 | 	;; | 
 | 179 | 	mov r20=cr.ipsr | 
 | 180 | 	or r28=r27,r28 | 
 | 181 | 	and r19=r18,r27 | 
 | 182 | 	;; | 
 | 183 | 	st8 [r17]=r19 | 
 | 184 | 	and r20=r20,r28 | 
 | 185 | 	/* Comment it out due to short of fp lazy alorgithm support | 
 | 186 | 	adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 | 
 | 187 | 	;; | 
 | 188 | 	ld8 r27=[r27] | 
 | 189 | 	;; | 
 | 190 | 	tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT | 
 | 191 | 	;; | 
 | 192 | 	(p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 | 
 | 193 | 	*/ | 
 | 194 | 	;; | 
 | 195 | 	mov cr.ipsr=r20 | 
 | 196 | 	tbit.nz p6,p0=r23,0 | 
 | 197 | 	;; | 
 | 198 | 	tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT | 
 | 199 | 	(p6) br.dptk kvm_resume_to_guest | 
 | 200 | 	;; | 
 | 201 | 	add r26=VMM_VCPU_META_RR0_OFFSET,r21 | 
 | 202 | 	add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | 
 | 203 | 	dep r23=-1,r23,0,1 | 
 | 204 | 	;; | 
 | 205 | 	ld8 r26=[r26] | 
 | 206 | 	ld8 r27=[r27] | 
 | 207 | 	st4 [r22]=r23 | 
 | 208 | 	dep.z r28=4,61,3 | 
 | 209 | 	;; | 
 | 210 | 	mov rr[r0]=r26 | 
 | 211 | 	;; | 
 | 212 | 	mov rr[r28]=r27 | 
 | 213 | 	;; | 
 | 214 | 	srlz.d | 
 | 215 | 	br.many kvm_resume_to_guest | 
 | 216 | END(kvm_asm_rsm) | 
 | 217 |  | 
 | 218 |  | 
 | 219 | //ssm | 
 | 220 | GLOBAL_ENTRY(kvm_asm_ssm) | 
 | 221 | #ifndef ACCE_SSM | 
 | 222 | 	br.many kvm_virtualization_fault_back | 
 | 223 | #endif | 
 | 224 | 	add r16=VMM_VPD_BASE_OFFSET,r21 | 
 | 225 | 	extr.u r26=r25,6,21 | 
 | 226 | 	extr.u r27=r25,31,2 | 
 | 227 | 	;; | 
 | 228 | 	ld8 r16=[r16] | 
 | 229 | 	extr.u r28=r25,36,1 | 
 | 230 | 	dep r26=r27,r26,21,2 | 
 | 231 | 	;;  //r26 is imm24 | 
 | 232 | 	add r27=VPD_VPSR_START_OFFSET,r16 | 
 | 233 | 	dep r26=r28,r26,23,1 | 
 | 234 | 	;;  //r19 vpsr | 
 | 235 | 	ld8 r29=[r27] | 
 | 236 | 	mov r24=b0 | 
 | 237 | 	;; | 
 | 238 | 	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | 
 | 239 | 	mov r20=cr.ipsr | 
 | 240 | 	or r19=r29,r26 | 
 | 241 | 	;; | 
 | 242 | 	ld4 r23=[r22] | 
 | 243 | 	st8 [r27]=r19 | 
 | 244 | 	or r20=r20,r26 | 
 | 245 | 	;; | 
 | 246 | 	mov cr.ipsr=r20 | 
 | 247 | 	movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT | 
 | 248 | 	;; | 
 | 249 | 	and r19=r28,r19 | 
 | 250 | 	tbit.z p6,p0=r23,0 | 
 | 251 | 	;; | 
 | 252 | 	cmp.ne.or p6,p0=r28,r19 | 
 | 253 | 	(p6) br.dptk kvm_asm_ssm_1 | 
 | 254 | 	;; | 
 | 255 | 	add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | 
 | 256 | 	add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 | 
 | 257 | 	dep r23=0,r23,0,1 | 
 | 258 | 	;; | 
 | 259 | 	ld8 r26=[r26] | 
 | 260 | 	ld8 r27=[r27] | 
 | 261 | 	st4 [r22]=r23 | 
 | 262 | 	dep.z r28=4,61,3 | 
 | 263 | 	;; | 
 | 264 | 	mov rr[r0]=r26 | 
 | 265 | 	;; | 
 | 266 | 	mov rr[r28]=r27 | 
 | 267 | 	;; | 
 | 268 | 	srlz.d | 
 | 269 | 	;; | 
 | 270 | kvm_asm_ssm_1: | 
 | 271 | 	tbit.nz p6,p0=r29,IA64_PSR_I_BIT | 
 | 272 | 	;; | 
 | 273 | 	tbit.z.or p6,p0=r19,IA64_PSR_I_BIT | 
 | 274 | 	(p6) br.dptk kvm_resume_to_guest | 
 | 275 | 	;; | 
 | 276 | 	add r29=VPD_VTPR_START_OFFSET,r16 | 
 | 277 | 	add r30=VPD_VHPI_START_OFFSET,r16 | 
 | 278 | 	;; | 
 | 279 | 	ld8 r29=[r29] | 
 | 280 | 	ld8 r30=[r30] | 
 | 281 | 	;; | 
 | 282 | 	extr.u r17=r29,4,4 | 
 | 283 | 	extr.u r18=r29,16,1 | 
 | 284 | 	;; | 
 | 285 | 	dep r17=r18,r17,4,1 | 
 | 286 | 	;; | 
 | 287 | 	cmp.gt p6,p0=r30,r17 | 
 | 288 | 	(p6) br.dpnt.few kvm_asm_dispatch_vexirq | 
 | 289 | 	br.many kvm_resume_to_guest | 
 | 290 | END(kvm_asm_ssm) | 
 | 291 |  | 
 | 292 |  | 
 | 293 | //mov psr.l=r2 | 
 | 294 | GLOBAL_ENTRY(kvm_asm_mov_to_psr) | 
 | 295 | #ifndef ACCE_MOV_TO_PSR | 
 | 296 | 	br.many kvm_virtualization_fault_back | 
 | 297 | #endif | 
 | 298 | 	add r16=VMM_VPD_BASE_OFFSET,r21 | 
 | 299 | 	extr.u r26=r25,13,7 //r2 | 
 | 300 | 	;; | 
 | 301 | 	ld8 r16=[r16] | 
 | 302 | 	addl r20=@gprel(asm_mov_from_reg),gp | 
 | 303 | 	;; | 
 | 304 | 	adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 | 
 | 305 | 	shladd r26=r26,4,r20 | 
 | 306 | 	mov r24=b0 | 
 | 307 | 	;; | 
 | 308 | 	add r27=VPD_VPSR_START_OFFSET,r16 | 
 | 309 | 	mov b0=r26 | 
 | 310 | 	br.many b0 | 
 | 311 | 	;; | 
 | 312 | kvm_asm_mov_to_psr_back: | 
 | 313 | 	ld8 r17=[r27] | 
 | 314 | 	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | 
 | 315 | 	dep r19=0,r19,32,32 | 
 | 316 | 	;; | 
 | 317 | 	ld4 r23=[r22] | 
 | 318 | 	dep r18=0,r17,0,32 | 
 | 319 | 	;; | 
 | 320 | 	add r30=r18,r19 | 
 | 321 | 	movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT | 
 | 322 | 	;; | 
 | 323 | 	st8 [r27]=r30 | 
 | 324 | 	and r27=r28,r30 | 
 | 325 | 	and r29=r28,r17 | 
 | 326 | 	;; | 
 | 327 | 	cmp.eq p5,p0=r29,r27 | 
 | 328 | 	cmp.eq p6,p7=r28,r27 | 
 | 329 | 	(p5) br.many kvm_asm_mov_to_psr_1 | 
 | 330 | 	;; | 
 | 331 | 	//virtual to physical | 
 | 332 | 	(p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21 | 
 | 333 | 	(p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | 
 | 334 | 	(p7) dep r23=-1,r23,0,1 | 
 | 335 | 	;; | 
 | 336 | 	//physical to virtual | 
 | 337 | 	(p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | 
 | 338 | 	(p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 | 
 | 339 | 	(p6) dep r23=0,r23,0,1 | 
 | 340 | 	;; | 
 | 341 | 	ld8 r26=[r26] | 
 | 342 | 	ld8 r27=[r27] | 
 | 343 | 	st4 [r22]=r23 | 
 | 344 | 	dep.z r28=4,61,3 | 
 | 345 | 	;; | 
 | 346 | 	mov rr[r0]=r26 | 
 | 347 | 	;; | 
 | 348 | 	mov rr[r28]=r27 | 
 | 349 | 	;; | 
 | 350 | 	srlz.d | 
 | 351 | 	;; | 
 | 352 | kvm_asm_mov_to_psr_1: | 
 | 353 | 	mov r20=cr.ipsr | 
 | 354 | 	movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT | 
 | 355 | 	;; | 
 | 356 | 	or r19=r19,r28 | 
 | 357 | 	dep r20=0,r20,0,32 | 
 | 358 | 	;; | 
 | 359 | 	add r20=r19,r20 | 
 | 360 | 	mov b0=r24 | 
 | 361 | 	;; | 
 | 362 | 	/* Comment it out due to short of fp lazy algorithm support | 
 | 363 | 	adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 | 
 | 364 | 	;; | 
 | 365 | 	ld8 r27=[r27] | 
 | 366 | 	;; | 
 | 367 | 	tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT | 
 | 368 | 	;; | 
 | 369 | 	(p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 | 
 | 370 | 	;; | 
 | 371 | 	*/ | 
 | 372 | 	mov cr.ipsr=r20 | 
 | 373 | 	cmp.ne p6,p0=r0,r0 | 
 | 374 | 	;; | 
 | 375 | 	tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT | 
 | 376 | 	tbit.z.or p6,p0=r30,IA64_PSR_I_BIT | 
 | 377 | 	(p6) br.dpnt.few kvm_resume_to_guest | 
 | 378 | 	;; | 
 | 379 | 	add r29=VPD_VTPR_START_OFFSET,r16 | 
 | 380 | 	add r30=VPD_VHPI_START_OFFSET,r16 | 
 | 381 | 	;; | 
 | 382 | 	ld8 r29=[r29] | 
 | 383 | 	ld8 r30=[r30] | 
 | 384 | 	;; | 
 | 385 | 	extr.u r17=r29,4,4 | 
 | 386 | 	extr.u r18=r29,16,1 | 
 | 387 | 	;; | 
 | 388 | 	dep r17=r18,r17,4,1 | 
 | 389 | 	;; | 
 | 390 | 	cmp.gt p6,p0=r30,r17 | 
 | 391 | 	(p6) br.dpnt.few kvm_asm_dispatch_vexirq | 
 | 392 | 	br.many kvm_resume_to_guest | 
 | 393 | END(kvm_asm_mov_to_psr) | 
 | 394 |  | 
 | 395 |  | 
 | 396 | ENTRY(kvm_asm_dispatch_vexirq) | 
 | 397 | //increment iip | 
 | 398 | 	mov r16=cr.ipsr | 
 | 399 | 	;; | 
 | 400 | 	extr.u r17=r16,IA64_PSR_RI_BIT,2 | 
 | 401 | 	tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | 
 | 402 | 	;; | 
 | 403 | 	(p6) mov r18=cr.iip | 
 | 404 | 	(p6) mov r17=r0 | 
 | 405 | 	(p7) add r17=1,r17 | 
 | 406 | 	;; | 
 | 407 | 	(p6) add r18=0x10,r18 | 
 | 408 | 	dep r16=r17,r16,IA64_PSR_RI_BIT,2 | 
 | 409 | 	;; | 
 | 410 | 	(p6) mov cr.iip=r18 | 
 | 411 | 	mov cr.ipsr=r16 | 
 | 412 | 	mov r30 =1 | 
 | 413 | 	br.many kvm_dispatch_vexirq | 
 | 414 | END(kvm_asm_dispatch_vexirq) | 
 | 415 |  | 
 | 416 | // thash | 
 | 417 | // TODO: add support when pta.vf = 1 | 
 | 418 | GLOBAL_ENTRY(kvm_asm_thash) | 
 | 419 | #ifndef ACCE_THASH | 
 | 420 | 	br.many kvm_virtualization_fault_back | 
 | 421 | #endif | 
 | 422 | 	extr.u r17=r25,20,7		// get r3 from opcode in r25 | 
 | 423 | 	extr.u r18=r25,6,7		// get r1 from opcode in r25 | 
 | 424 | 	addl r20=@gprel(asm_mov_from_reg),gp | 
 | 425 | 	;; | 
 | 426 | 	adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20 | 
 | 427 | 	shladd r17=r17,4,r20	// get addr of MOVE_FROM_REG(r17) | 
 | 428 | 	adds r16=VMM_VPD_BASE_OFFSET,r21	// get vcpu.arch.priveregs | 
 | 429 | 	;; | 
 | 430 | 	mov r24=b0 | 
 | 431 | 	;; | 
 | 432 | 	ld8 r16=[r16]		// get VPD addr | 
 | 433 | 	mov b0=r17 | 
 | 434 | 	br.many b0			// r19 return value | 
 | 435 | 	;; | 
 | 436 | kvm_asm_thash_back1: | 
 | 437 | 	shr.u r23=r19,61		// get RR number | 
 | 438 | 	adds r25=VMM_VCPU_VRR0_OFFSET,r21	// get vcpu->arch.vrr[0]'s addr | 
 | 439 | 	adds r16=VMM_VPD_VPTA_OFFSET,r16	// get vpta | 
 | 440 | 	;; | 
 | 441 | 	shladd r27=r23,3,r25	// get vcpu->arch.vrr[r23]'s addr | 
 | 442 | 	ld8 r17=[r16]		// get PTA | 
 | 443 | 	mov r26=1 | 
 | 444 | 	;; | 
 | 445 | 	extr.u r29=r17,2,6		// get pta.size | 
 | 446 | 	ld8 r25=[r27]		// get vcpu->arch.vrr[r23]'s value | 
 | 447 | 	;; | 
 | 448 | 	extr.u r25=r25,2,6		// get rr.ps | 
 | 449 | 	shl r22=r26,r29		// 1UL << pta.size | 
 | 450 | 	;; | 
 | 451 | 	shr.u r23=r19,r25		// vaddr >> rr.ps | 
 | 452 | 	adds r26=3,r29		// pta.size + 3 | 
 | 453 | 	shl r27=r17,3		// pta << 3 | 
 | 454 | 	;; | 
 | 455 | 	shl r23=r23,3		// (vaddr >> rr.ps) << 3 | 
 | 456 | 	shr.u r27=r27,r26		// (pta << 3) >> (pta.size+3) | 
 | 457 | 	movl r16=7<<61 | 
 | 458 | 	;; | 
 | 459 | 	adds r22=-1,r22		// (1UL << pta.size) - 1 | 
 | 460 | 	shl r27=r27,r29		// ((pta<<3)>>(pta.size+3))<<pta.size | 
 | 461 | 	and r19=r19,r16		// vaddr & VRN_MASK | 
 | 462 | 	;; | 
 | 463 | 	and r22=r22,r23		// vhpt_offset | 
 | 464 | 	or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size) | 
 | 465 | 	adds r26=asm_mov_to_reg-asm_mov_from_reg,r20 | 
 | 466 | 	;; | 
 | 467 | 	or r19=r19,r22		// calc pval | 
 | 468 | 	shladd r17=r18,4,r26 | 
 | 469 | 	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | 
 | 470 | 	;; | 
 | 471 | 	mov b0=r17 | 
 | 472 | 	br.many b0 | 
 | 473 | END(kvm_asm_thash) | 
 | 474 |  | 
 | 475 | #define MOV_TO_REG0	\ | 
 | 476 | {;			\ | 
 | 477 | 	nop.b 0x0;		\ | 
 | 478 | 	nop.b 0x0;		\ | 
 | 479 | 	nop.b 0x0;		\ | 
 | 480 | 	;;			\ | 
 | 481 | }; | 
 | 482 |  | 
 | 483 |  | 
 | 484 | #define MOV_TO_REG(n)	\ | 
 | 485 | {;			\ | 
 | 486 | 	mov r##n##=r19;	\ | 
 | 487 | 	mov b0=r30;	\ | 
 | 488 | 	br.sptk.many b0;	\ | 
 | 489 | 	;;			\ | 
 | 490 | }; | 
 | 491 |  | 
 | 492 |  | 
 | 493 | #define MOV_FROM_REG(n)	\ | 
 | 494 | {;				\ | 
 | 495 | 	mov r19=r##n##;		\ | 
 | 496 | 	mov b0=r30;		\ | 
 | 497 | 	br.sptk.many b0;		\ | 
 | 498 | 	;;				\ | 
 | 499 | }; | 
 | 500 |  | 
 | 501 |  | 
 | 502 | #define MOV_TO_BANK0_REG(n)			\ | 
 | 503 | ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##);	\ | 
 | 504 | {;						\ | 
 | 505 | 	mov r26=r2;				\ | 
 | 506 | 	mov r2=r19;				\ | 
 | 507 | 	bsw.1;					\ | 
 | 508 | 	;;						\ | 
 | 509 | };						\ | 
 | 510 | {;						\ | 
 | 511 | 	mov r##n##=r2;				\ | 
 | 512 | 	nop.b 0x0;					\ | 
 | 513 | 	bsw.0;					\ | 
 | 514 | 	;;						\ | 
 | 515 | };						\ | 
 | 516 | {;						\ | 
 | 517 | 	mov r2=r26;				\ | 
 | 518 | 	mov b0=r30;				\ | 
 | 519 | 	br.sptk.many b0;				\ | 
 | 520 | 	;;						\ | 
 | 521 | };						\ | 
 | 522 | END(asm_mov_to_bank0_reg##n##) | 
 | 523 |  | 
 | 524 |  | 
 | 525 | #define MOV_FROM_BANK0_REG(n)			\ | 
 | 526 | ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##);	\ | 
 | 527 | {;						\ | 
 | 528 | 	mov r26=r2;				\ | 
 | 529 | 	nop.b 0x0;					\ | 
 | 530 | 	bsw.1;					\ | 
 | 531 | 	;;						\ | 
 | 532 | };						\ | 
 | 533 | {;						\ | 
 | 534 | 	mov r2=r##n##;				\ | 
 | 535 | 	nop.b 0x0;					\ | 
 | 536 | 	bsw.0;					\ | 
 | 537 | 	;;						\ | 
 | 538 | };						\ | 
 | 539 | {;						\ | 
 | 540 | 	mov r19=r2;				\ | 
 | 541 | 	mov r2=r26;				\ | 
 | 542 | 	mov b0=r30;				\ | 
 | 543 | };						\ | 
 | 544 | {;						\ | 
 | 545 | 	nop.b 0x0;					\ | 
 | 546 | 	nop.b 0x0;					\ | 
 | 547 | 	br.sptk.many b0;				\ | 
 | 548 | 	;;						\ | 
 | 549 | };						\ | 
 | 550 | END(asm_mov_from_bank0_reg##n##) | 
 | 551 |  | 
 | 552 |  | 
 | 553 | #define JMP_TO_MOV_TO_BANK0_REG(n)		\ | 
 | 554 | {;						\ | 
 | 555 | 	nop.b 0x0;					\ | 
 | 556 | 	nop.b 0x0;					\ | 
 | 557 | 	br.sptk.many asm_mov_to_bank0_reg##n##;	\ | 
 | 558 | 	;;						\ | 
 | 559 | } | 
 | 560 |  | 
 | 561 |  | 
 | 562 | #define JMP_TO_MOV_FROM_BANK0_REG(n)		\ | 
 | 563 | {;						\ | 
 | 564 | 	nop.b 0x0;					\ | 
 | 565 | 	nop.b 0x0;					\ | 
 | 566 | 	br.sptk.many asm_mov_from_bank0_reg##n##;	\ | 
 | 567 | 	;;						\ | 
 | 568 | } | 
 | 569 |  | 
 | 570 |  | 
 | 571 | MOV_FROM_BANK0_REG(16) | 
 | 572 | MOV_FROM_BANK0_REG(17) | 
 | 573 | MOV_FROM_BANK0_REG(18) | 
 | 574 | MOV_FROM_BANK0_REG(19) | 
 | 575 | MOV_FROM_BANK0_REG(20) | 
 | 576 | MOV_FROM_BANK0_REG(21) | 
 | 577 | MOV_FROM_BANK0_REG(22) | 
 | 578 | MOV_FROM_BANK0_REG(23) | 
 | 579 | MOV_FROM_BANK0_REG(24) | 
 | 580 | MOV_FROM_BANK0_REG(25) | 
 | 581 | MOV_FROM_BANK0_REG(26) | 
 | 582 | MOV_FROM_BANK0_REG(27) | 
 | 583 | MOV_FROM_BANK0_REG(28) | 
 | 584 | MOV_FROM_BANK0_REG(29) | 
 | 585 | MOV_FROM_BANK0_REG(30) | 
 | 586 | MOV_FROM_BANK0_REG(31) | 
 | 587 |  | 
 | 588 |  | 
 | 589 | // mov from reg table | 
 | 590 | ENTRY(asm_mov_from_reg) | 
 | 591 | 	MOV_FROM_REG(0) | 
 | 592 | 	MOV_FROM_REG(1) | 
 | 593 | 	MOV_FROM_REG(2) | 
 | 594 | 	MOV_FROM_REG(3) | 
 | 595 | 	MOV_FROM_REG(4) | 
 | 596 | 	MOV_FROM_REG(5) | 
 | 597 | 	MOV_FROM_REG(6) | 
 | 598 | 	MOV_FROM_REG(7) | 
 | 599 | 	MOV_FROM_REG(8) | 
 | 600 | 	MOV_FROM_REG(9) | 
 | 601 | 	MOV_FROM_REG(10) | 
 | 602 | 	MOV_FROM_REG(11) | 
 | 603 | 	MOV_FROM_REG(12) | 
 | 604 | 	MOV_FROM_REG(13) | 
 | 605 | 	MOV_FROM_REG(14) | 
 | 606 | 	MOV_FROM_REG(15) | 
 | 607 | 	JMP_TO_MOV_FROM_BANK0_REG(16) | 
 | 608 | 	JMP_TO_MOV_FROM_BANK0_REG(17) | 
 | 609 | 	JMP_TO_MOV_FROM_BANK0_REG(18) | 
 | 610 | 	JMP_TO_MOV_FROM_BANK0_REG(19) | 
 | 611 | 	JMP_TO_MOV_FROM_BANK0_REG(20) | 
 | 612 | 	JMP_TO_MOV_FROM_BANK0_REG(21) | 
 | 613 | 	JMP_TO_MOV_FROM_BANK0_REG(22) | 
 | 614 | 	JMP_TO_MOV_FROM_BANK0_REG(23) | 
 | 615 | 	JMP_TO_MOV_FROM_BANK0_REG(24) | 
 | 616 | 	JMP_TO_MOV_FROM_BANK0_REG(25) | 
 | 617 | 	JMP_TO_MOV_FROM_BANK0_REG(26) | 
 | 618 | 	JMP_TO_MOV_FROM_BANK0_REG(27) | 
 | 619 | 	JMP_TO_MOV_FROM_BANK0_REG(28) | 
 | 620 | 	JMP_TO_MOV_FROM_BANK0_REG(29) | 
 | 621 | 	JMP_TO_MOV_FROM_BANK0_REG(30) | 
 | 622 | 	JMP_TO_MOV_FROM_BANK0_REG(31) | 
 | 623 | 	MOV_FROM_REG(32) | 
 | 624 | 	MOV_FROM_REG(33) | 
 | 625 | 	MOV_FROM_REG(34) | 
 | 626 | 	MOV_FROM_REG(35) | 
 | 627 | 	MOV_FROM_REG(36) | 
 | 628 | 	MOV_FROM_REG(37) | 
 | 629 | 	MOV_FROM_REG(38) | 
 | 630 | 	MOV_FROM_REG(39) | 
 | 631 | 	MOV_FROM_REG(40) | 
 | 632 | 	MOV_FROM_REG(41) | 
 | 633 | 	MOV_FROM_REG(42) | 
 | 634 | 	MOV_FROM_REG(43) | 
 | 635 | 	MOV_FROM_REG(44) | 
 | 636 | 	MOV_FROM_REG(45) | 
 | 637 | 	MOV_FROM_REG(46) | 
 | 638 | 	MOV_FROM_REG(47) | 
 | 639 | 	MOV_FROM_REG(48) | 
 | 640 | 	MOV_FROM_REG(49) | 
 | 641 | 	MOV_FROM_REG(50) | 
 | 642 | 	MOV_FROM_REG(51) | 
 | 643 | 	MOV_FROM_REG(52) | 
 | 644 | 	MOV_FROM_REG(53) | 
 | 645 | 	MOV_FROM_REG(54) | 
 | 646 | 	MOV_FROM_REG(55) | 
 | 647 | 	MOV_FROM_REG(56) | 
 | 648 | 	MOV_FROM_REG(57) | 
 | 649 | 	MOV_FROM_REG(58) | 
 | 650 | 	MOV_FROM_REG(59) | 
 | 651 | 	MOV_FROM_REG(60) | 
 | 652 | 	MOV_FROM_REG(61) | 
 | 653 | 	MOV_FROM_REG(62) | 
 | 654 | 	MOV_FROM_REG(63) | 
 | 655 | 	MOV_FROM_REG(64) | 
 | 656 | 	MOV_FROM_REG(65) | 
 | 657 | 	MOV_FROM_REG(66) | 
 | 658 | 	MOV_FROM_REG(67) | 
 | 659 | 	MOV_FROM_REG(68) | 
 | 660 | 	MOV_FROM_REG(69) | 
 | 661 | 	MOV_FROM_REG(70) | 
 | 662 | 	MOV_FROM_REG(71) | 
 | 663 | 	MOV_FROM_REG(72) | 
 | 664 | 	MOV_FROM_REG(73) | 
 | 665 | 	MOV_FROM_REG(74) | 
 | 666 | 	MOV_FROM_REG(75) | 
 | 667 | 	MOV_FROM_REG(76) | 
 | 668 | 	MOV_FROM_REG(77) | 
 | 669 | 	MOV_FROM_REG(78) | 
 | 670 | 	MOV_FROM_REG(79) | 
 | 671 | 	MOV_FROM_REG(80) | 
 | 672 | 	MOV_FROM_REG(81) | 
 | 673 | 	MOV_FROM_REG(82) | 
 | 674 | 	MOV_FROM_REG(83) | 
 | 675 | 	MOV_FROM_REG(84) | 
 | 676 | 	MOV_FROM_REG(85) | 
 | 677 | 	MOV_FROM_REG(86) | 
 | 678 | 	MOV_FROM_REG(87) | 
 | 679 | 	MOV_FROM_REG(88) | 
 | 680 | 	MOV_FROM_REG(89) | 
 | 681 | 	MOV_FROM_REG(90) | 
 | 682 | 	MOV_FROM_REG(91) | 
 | 683 | 	MOV_FROM_REG(92) | 
 | 684 | 	MOV_FROM_REG(93) | 
 | 685 | 	MOV_FROM_REG(94) | 
 | 686 | 	MOV_FROM_REG(95) | 
 | 687 | 	MOV_FROM_REG(96) | 
 | 688 | 	MOV_FROM_REG(97) | 
 | 689 | 	MOV_FROM_REG(98) | 
 | 690 | 	MOV_FROM_REG(99) | 
 | 691 | 	MOV_FROM_REG(100) | 
 | 692 | 	MOV_FROM_REG(101) | 
 | 693 | 	MOV_FROM_REG(102) | 
 | 694 | 	MOV_FROM_REG(103) | 
 | 695 | 	MOV_FROM_REG(104) | 
 | 696 | 	MOV_FROM_REG(105) | 
 | 697 | 	MOV_FROM_REG(106) | 
 | 698 | 	MOV_FROM_REG(107) | 
 | 699 | 	MOV_FROM_REG(108) | 
 | 700 | 	MOV_FROM_REG(109) | 
 | 701 | 	MOV_FROM_REG(110) | 
 | 702 | 	MOV_FROM_REG(111) | 
 | 703 | 	MOV_FROM_REG(112) | 
 | 704 | 	MOV_FROM_REG(113) | 
 | 705 | 	MOV_FROM_REG(114) | 
 | 706 | 	MOV_FROM_REG(115) | 
 | 707 | 	MOV_FROM_REG(116) | 
 | 708 | 	MOV_FROM_REG(117) | 
 | 709 | 	MOV_FROM_REG(118) | 
 | 710 | 	MOV_FROM_REG(119) | 
 | 711 | 	MOV_FROM_REG(120) | 
 | 712 | 	MOV_FROM_REG(121) | 
 | 713 | 	MOV_FROM_REG(122) | 
 | 714 | 	MOV_FROM_REG(123) | 
 | 715 | 	MOV_FROM_REG(124) | 
 | 716 | 	MOV_FROM_REG(125) | 
 | 717 | 	MOV_FROM_REG(126) | 
 | 718 | 	MOV_FROM_REG(127) | 
 | 719 | END(asm_mov_from_reg) | 
 | 720 |  | 
 | 721 |  | 
 | 722 | /* must be in bank 0 | 
 | 723 |  * parameter: | 
 | 724 |  * r31: pr | 
 | 725 |  * r24: b0 | 
 | 726 |  */ | 
 | 727 | ENTRY(kvm_resume_to_guest) | 
 | 728 | 	adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 
 | 729 | 	;; | 
 | 730 | 	ld8 r1 =[r16] | 
 | 731 | 	adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21 | 
 | 732 | 	;; | 
 | 733 | 	mov r16=cr.ipsr | 
 | 734 | 	;; | 
 | 735 | 	ld8 r20 = [r20] | 
 | 736 | 	adds r19=VMM_VPD_BASE_OFFSET,r21 | 
 | 737 | 	;; | 
 | 738 | 	ld8 r25=[r19] | 
 | 739 | 	extr.u r17=r16,IA64_PSR_RI_BIT,2 | 
 | 740 | 	tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | 
 | 741 | 	;; | 
 | 742 | 	(p6) mov r18=cr.iip | 
 | 743 | 	(p6) mov r17=r0 | 
 | 744 | 	;; | 
 | 745 | 	(p6) add r18=0x10,r18 | 
 | 746 | 	(p7) add r17=1,r17 | 
 | 747 | 	;; | 
 | 748 | 	(p6) mov cr.iip=r18 | 
 | 749 | 	dep r16=r17,r16,IA64_PSR_RI_BIT,2 | 
 | 750 | 	;; | 
 | 751 | 	mov cr.ipsr=r16 | 
 | 752 | 	adds r19= VPD_VPSR_START_OFFSET,r25 | 
 | 753 | 	add r28=PAL_VPS_RESUME_NORMAL,r20 | 
 | 754 | 	add r29=PAL_VPS_RESUME_HANDLER,r20 | 
 | 755 | 	;; | 
 | 756 | 	ld8 r19=[r19] | 
 | 757 | 	mov b0=r29 | 
 | 758 | 	cmp.ne p6,p7 = r0,r0 | 
 | 759 | 	;; | 
 | 760 | 	tbit.z p6,p7 = r19,IA64_PSR_IC_BIT		// p1=vpsr.ic | 
 | 761 | 	;; | 
 | 762 | 	(p6) ld8 r26=[r25] | 
 | 763 | 	(p7) mov b0=r28 | 
 | 764 | 	mov pr=r31,-2 | 
 | 765 | 	br.sptk.many b0             // call pal service | 
 | 766 | 	;; | 
 | 767 | END(kvm_resume_to_guest) | 
 | 768 |  | 
 | 769 |  | 
 | 770 | MOV_TO_BANK0_REG(16) | 
 | 771 | MOV_TO_BANK0_REG(17) | 
 | 772 | MOV_TO_BANK0_REG(18) | 
 | 773 | MOV_TO_BANK0_REG(19) | 
 | 774 | MOV_TO_BANK0_REG(20) | 
 | 775 | MOV_TO_BANK0_REG(21) | 
 | 776 | MOV_TO_BANK0_REG(22) | 
 | 777 | MOV_TO_BANK0_REG(23) | 
 | 778 | MOV_TO_BANK0_REG(24) | 
 | 779 | MOV_TO_BANK0_REG(25) | 
 | 780 | MOV_TO_BANK0_REG(26) | 
 | 781 | MOV_TO_BANK0_REG(27) | 
 | 782 | MOV_TO_BANK0_REG(28) | 
 | 783 | MOV_TO_BANK0_REG(29) | 
 | 784 | MOV_TO_BANK0_REG(30) | 
 | 785 | MOV_TO_BANK0_REG(31) | 
 | 786 |  | 
 | 787 |  | 
 | 788 | // mov to reg table | 
 | 789 | ENTRY(asm_mov_to_reg) | 
 | 790 | 	MOV_TO_REG0 | 
 | 791 | 	MOV_TO_REG(1) | 
 | 792 | 	MOV_TO_REG(2) | 
 | 793 | 	MOV_TO_REG(3) | 
 | 794 | 	MOV_TO_REG(4) | 
 | 795 | 	MOV_TO_REG(5) | 
 | 796 | 	MOV_TO_REG(6) | 
 | 797 | 	MOV_TO_REG(7) | 
 | 798 | 	MOV_TO_REG(8) | 
 | 799 | 	MOV_TO_REG(9) | 
 | 800 | 	MOV_TO_REG(10) | 
 | 801 | 	MOV_TO_REG(11) | 
 | 802 | 	MOV_TO_REG(12) | 
 | 803 | 	MOV_TO_REG(13) | 
 | 804 | 	MOV_TO_REG(14) | 
 | 805 | 	MOV_TO_REG(15) | 
 | 806 | 	JMP_TO_MOV_TO_BANK0_REG(16) | 
 | 807 | 	JMP_TO_MOV_TO_BANK0_REG(17) | 
 | 808 | 	JMP_TO_MOV_TO_BANK0_REG(18) | 
 | 809 | 	JMP_TO_MOV_TO_BANK0_REG(19) | 
 | 810 | 	JMP_TO_MOV_TO_BANK0_REG(20) | 
 | 811 | 	JMP_TO_MOV_TO_BANK0_REG(21) | 
 | 812 | 	JMP_TO_MOV_TO_BANK0_REG(22) | 
 | 813 | 	JMP_TO_MOV_TO_BANK0_REG(23) | 
 | 814 | 	JMP_TO_MOV_TO_BANK0_REG(24) | 
 | 815 | 	JMP_TO_MOV_TO_BANK0_REG(25) | 
 | 816 | 	JMP_TO_MOV_TO_BANK0_REG(26) | 
 | 817 | 	JMP_TO_MOV_TO_BANK0_REG(27) | 
 | 818 | 	JMP_TO_MOV_TO_BANK0_REG(28) | 
 | 819 | 	JMP_TO_MOV_TO_BANK0_REG(29) | 
 | 820 | 	JMP_TO_MOV_TO_BANK0_REG(30) | 
 | 821 | 	JMP_TO_MOV_TO_BANK0_REG(31) | 
 | 822 | 	MOV_TO_REG(32) | 
 | 823 | 	MOV_TO_REG(33) | 
 | 824 | 	MOV_TO_REG(34) | 
 | 825 | 	MOV_TO_REG(35) | 
 | 826 | 	MOV_TO_REG(36) | 
 | 827 | 	MOV_TO_REG(37) | 
 | 828 | 	MOV_TO_REG(38) | 
 | 829 | 	MOV_TO_REG(39) | 
 | 830 | 	MOV_TO_REG(40) | 
 | 831 | 	MOV_TO_REG(41) | 
 | 832 | 	MOV_TO_REG(42) | 
 | 833 | 	MOV_TO_REG(43) | 
 | 834 | 	MOV_TO_REG(44) | 
 | 835 | 	MOV_TO_REG(45) | 
 | 836 | 	MOV_TO_REG(46) | 
 | 837 | 	MOV_TO_REG(47) | 
 | 838 | 	MOV_TO_REG(48) | 
 | 839 | 	MOV_TO_REG(49) | 
 | 840 | 	MOV_TO_REG(50) | 
 | 841 | 	MOV_TO_REG(51) | 
 | 842 | 	MOV_TO_REG(52) | 
 | 843 | 	MOV_TO_REG(53) | 
 | 844 | 	MOV_TO_REG(54) | 
 | 845 | 	MOV_TO_REG(55) | 
 | 846 | 	MOV_TO_REG(56) | 
 | 847 | 	MOV_TO_REG(57) | 
 | 848 | 	MOV_TO_REG(58) | 
 | 849 | 	MOV_TO_REG(59) | 
 | 850 | 	MOV_TO_REG(60) | 
 | 851 | 	MOV_TO_REG(61) | 
 | 852 | 	MOV_TO_REG(62) | 
 | 853 | 	MOV_TO_REG(63) | 
 | 854 | 	MOV_TO_REG(64) | 
 | 855 | 	MOV_TO_REG(65) | 
 | 856 | 	MOV_TO_REG(66) | 
 | 857 | 	MOV_TO_REG(67) | 
 | 858 | 	MOV_TO_REG(68) | 
 | 859 | 	MOV_TO_REG(69) | 
 | 860 | 	MOV_TO_REG(70) | 
 | 861 | 	MOV_TO_REG(71) | 
 | 862 | 	MOV_TO_REG(72) | 
 | 863 | 	MOV_TO_REG(73) | 
 | 864 | 	MOV_TO_REG(74) | 
 | 865 | 	MOV_TO_REG(75) | 
 | 866 | 	MOV_TO_REG(76) | 
 | 867 | 	MOV_TO_REG(77) | 
 | 868 | 	MOV_TO_REG(78) | 
 | 869 | 	MOV_TO_REG(79) | 
 | 870 | 	MOV_TO_REG(80) | 
 | 871 | 	MOV_TO_REG(81) | 
 | 872 | 	MOV_TO_REG(82) | 
 | 873 | 	MOV_TO_REG(83) | 
 | 874 | 	MOV_TO_REG(84) | 
 | 875 | 	MOV_TO_REG(85) | 
 | 876 | 	MOV_TO_REG(86) | 
 | 877 | 	MOV_TO_REG(87) | 
 | 878 | 	MOV_TO_REG(88) | 
 | 879 | 	MOV_TO_REG(89) | 
 | 880 | 	MOV_TO_REG(90) | 
 | 881 | 	MOV_TO_REG(91) | 
 | 882 | 	MOV_TO_REG(92) | 
 | 883 | 	MOV_TO_REG(93) | 
 | 884 | 	MOV_TO_REG(94) | 
 | 885 | 	MOV_TO_REG(95) | 
 | 886 | 	MOV_TO_REG(96) | 
 | 887 | 	MOV_TO_REG(97) | 
 | 888 | 	MOV_TO_REG(98) | 
 | 889 | 	MOV_TO_REG(99) | 
 | 890 | 	MOV_TO_REG(100) | 
 | 891 | 	MOV_TO_REG(101) | 
 | 892 | 	MOV_TO_REG(102) | 
 | 893 | 	MOV_TO_REG(103) | 
 | 894 | 	MOV_TO_REG(104) | 
 | 895 | 	MOV_TO_REG(105) | 
 | 896 | 	MOV_TO_REG(106) | 
 | 897 | 	MOV_TO_REG(107) | 
 | 898 | 	MOV_TO_REG(108) | 
 | 899 | 	MOV_TO_REG(109) | 
 | 900 | 	MOV_TO_REG(110) | 
 | 901 | 	MOV_TO_REG(111) | 
 | 902 | 	MOV_TO_REG(112) | 
 | 903 | 	MOV_TO_REG(113) | 
 | 904 | 	MOV_TO_REG(114) | 
 | 905 | 	MOV_TO_REG(115) | 
 | 906 | 	MOV_TO_REG(116) | 
 | 907 | 	MOV_TO_REG(117) | 
 | 908 | 	MOV_TO_REG(118) | 
 | 909 | 	MOV_TO_REG(119) | 
 | 910 | 	MOV_TO_REG(120) | 
 | 911 | 	MOV_TO_REG(121) | 
 | 912 | 	MOV_TO_REG(122) | 
 | 913 | 	MOV_TO_REG(123) | 
 | 914 | 	MOV_TO_REG(124) | 
 | 915 | 	MOV_TO_REG(125) | 
 | 916 | 	MOV_TO_REG(126) | 
 | 917 | 	MOV_TO_REG(127) | 
 | 918 | END(asm_mov_to_reg) |