| Xiantao Zhang | a4f5003 | 2008-04-01 16:00:24 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  *  kvm_minstate.h: min save macros | 
 | 3 |  *  Copyright (c) 2007, Intel Corporation. | 
 | 4 |  * | 
 | 5 |  *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | 
 | 6 |  *  Xiantao Zhang (xiantao.zhang@intel.com) | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or modify it | 
 | 9 |  * under the terms and conditions of the GNU General Public License, | 
 | 10 |  * version 2, as published by the Free Software Foundation. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope it will be useful, but WITHOUT | 
 | 13 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 14 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 15 |  * more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License along with | 
 | 18 |  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 
 | 19 |  * Place - Suite 330, Boston, MA 02111-1307 USA. | 
 | 20 |  * | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 |  | 
 | 24 | #include <asm/asmmacro.h> | 
 | 25 | #include <asm/types.h> | 
 | 26 | #include <asm/kregs.h> | 
 | 27 | #include "asm-offsets.h" | 
 | 28 |  | 
 | 29 | #define KVM_MINSTATE_START_SAVE_MIN	     					\ | 
 | 30 | 	mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\ | 
 | 31 | 	;;									\ | 
 | 32 | 	mov.m r28 = ar.rnat;                                  			\ | 
 | 33 | 	addl r22 = VMM_RBS_OFFSET,r1;            /* compute base of RBS */	\ | 
 | 34 | 	;;									\ | 
 | 35 | 	lfetch.fault.excl.nt1 [r22];						\ | 
 | 36 | 	addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1;  /* compute base of memory stack */  \ | 
 | 37 | 	mov r23 = ar.bspstore;			/* save ar.bspstore */          \ | 
 | 38 | 	;;									\ | 
 | 39 | 	mov ar.bspstore = r22;				/* switch to kernel RBS */\ | 
 | 40 | 	;;									\ | 
 | 41 | 	mov r18 = ar.bsp;							\ | 
 | 42 | 	mov ar.rsc = 0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */ | 
 | 43 |  | 
 | 44 |  | 
 | 45 |  | 
 | 46 | #define KVM_MINSTATE_END_SAVE_MIN						\ | 
 | 47 | 	bsw.1;          /* switch back to bank 1 (must be last in insn group) */\ | 
 | 48 | 	;; | 
 | 49 |  | 
 | 50 |  | 
 | 51 | #define PAL_VSA_SYNC_READ						\ | 
 | 52 | 	/* begin to call pal vps sync_read */				\ | 
 | 53 | 	add r25 = VMM_VPD_BASE_OFFSET, r21;				\ | 
 | 54 | 	adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21;  /* entry point */	\ | 
 | 55 | 	;;								\ | 
 | 56 | 	ld8 r25 = [r25];      /* read vpd base */			\ | 
 | 57 | 	ld8 r20 = [r20];						\ | 
 | 58 | 	;;								\ | 
 | 59 | 	add r20 = PAL_VPS_SYNC_READ,r20;				\ | 
 | 60 | 	;;								\ | 
 | 61 | { .mii;									\ | 
 | 62 | 	nop 0x0;							\ | 
 | 63 | 	mov r24 = ip;							\ | 
 | 64 | 	mov b0 = r20;							\ | 
 | 65 | 	;;								\ | 
 | 66 | };									\ | 
 | 67 | { .mmb;									\ | 
 | 68 | 	add r24 = 0x20, r24;						\ | 
 | 69 | 	nop 0x0;							\ | 
 | 70 | 	br.cond.sptk b0;        /*  call the service */			\ | 
 | 71 | 	;;								\ | 
 | 72 | }; | 
 | 73 |  | 
 | 74 |  | 
 | 75 |  | 
 | 76 | #define KVM_MINSTATE_GET_CURRENT(reg)   mov reg=r21 | 
 | 77 |  | 
 | 78 | /* | 
 | 79 |  * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 
 | 80 |  * the minimum state necessary that allows us to turn psr.ic back | 
 | 81 |  * on. | 
 | 82 |  * | 
 | 83 |  * Assumed state upon entry: | 
 | 84 |  *  psr.ic: off | 
 | 85 |  *  r31:	contains saved predicates (pr) | 
 | 86 |  * | 
 | 87 |  * Upon exit, the state is as follows: | 
 | 88 |  *  psr.ic: off | 
 | 89 |  *   r2 = points to &pt_regs.r16 | 
 | 90 |  *   r8 = contents of ar.ccv | 
 | 91 |  *   r9 = contents of ar.csd | 
 | 92 |  *  r10 = contents of ar.ssd | 
 | 93 |  *  r11 = FPSR_DEFAULT | 
 | 94 |  *  r12 = kernel sp (kernel virtual address) | 
 | 95 |  *  r13 = points to current task_struct (kernel virtual address) | 
 | 96 |  *  p15 = TRUE if psr.i is set in cr.ipsr | 
 | 97 |  *  predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: | 
 | 98 |  *	  preserved | 
 | 99 |  * | 
 | 100 |  * Note that psr.ic is NOT turned on by this macro.  This is so that | 
 | 101 |  * we can pass interruption state as arguments to a handler. | 
 | 102 |  */ | 
 | 103 |  | 
 | 104 |  | 
 | 105 | #define PT(f) (VMM_PT_REGS_##f##_OFFSET) | 
 | 106 |  | 
 | 107 | #define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)			\ | 
 | 108 | 	KVM_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */	\ | 
 | 109 | 	mov r27 = ar.rsc;         /* M */			\ | 
 | 110 | 	mov r20 = r1;         /* A */				\ | 
 | 111 | 	mov r25 = ar.unat;        /* M */			\ | 
 | 112 | 	mov r29 = cr.ipsr;        /* M */			\ | 
 | 113 | 	mov r26 = ar.pfs;         /* I */			\ | 
 | 114 | 	mov r18 = cr.isr;         				\ | 
 | 115 | 	COVER;              /* B;; (or nothing) */		\ | 
 | 116 | 	;;							\ | 
 | 117 | 	tbit.z p0,p15 = r29,IA64_PSR_I_BIT;			\ | 
 | 118 | 	mov r1 = r16;						\ | 
 | 119 | /*	mov r21=r16;	*/					\ | 
 | 120 | 	/* switch from user to kernel RBS: */			\ | 
 | 121 | 	;;							\ | 
 | 122 | 	invala;             /* M */				\ | 
 | 123 | 	SAVE_IFS;						\ | 
 | 124 | 	;;							\ | 
 | 125 | 	KVM_MINSTATE_START_SAVE_MIN				\ | 
 | 126 | 	adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */	\ | 
 | 127 | 	adds r16 = PT(CR_IPSR),r1;				\ | 
 | 128 | 	;;							\ | 
 | 129 | 	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;		\ | 
 | 130 | 	st8 [r16] = r29;      /* save cr.ipsr */		\ | 
 | 131 | 	;;							\ | 
 | 132 | 	lfetch.fault.excl.nt1 [r17];				\ | 
 | 133 | 	tbit.nz p15,p0 = r29,IA64_PSR_I_BIT;			\ | 
 | 134 | 	mov r29 = b0						\ | 
 | 135 | 	;;							\ | 
 | 136 | 	adds r16 = PT(R8),r1; /* initialize first base pointer */\ | 
 | 137 | 	adds r17 = PT(R9),r1; /* initialize second base pointer */\ | 
 | 138 | 	;;							\ | 
 | 139 | .mem.offset 0,0; st8.spill [r16] = r8,16;			\ | 
 | 140 | .mem.offset 8,0; st8.spill [r17] = r9,16;			\ | 
 | 141 | 	;;							\ | 
 | 142 | .mem.offset 0,0; st8.spill [r16] = r10,24;			\ | 
 | 143 | .mem.offset 8,0; st8.spill [r17] = r11,24;			\ | 
 | 144 | 	;;							\ | 
 | 145 | 	mov r9 = cr.iip;         /* M */			\ | 
 | 146 | 	mov r10 = ar.fpsr;        /* M */			\ | 
 | 147 | 	;;							\ | 
 | 148 | 	st8 [r16] = r9,16;    /* save cr.iip */			\ | 
 | 149 | 	st8 [r17] = r30,16;   /* save cr.ifs */			\ | 
 | 150 | 	sub r18 = r18,r22;    /* r18=RSE.ndirty*8 */		\ | 
 | 151 | 	;;							\ | 
 | 152 | 	st8 [r16] = r25,16;   /* save ar.unat */		\ | 
 | 153 | 	st8 [r17] = r26,16;    /* save ar.pfs */		\ | 
 | 154 | 	shl r18 = r18,16;     /* calu ar.rsc used for "loadrs" */\ | 
 | 155 | 	;;							\ | 
 | 156 | 	st8 [r16] = r27,16;   /* save ar.rsc */			\ | 
 | 157 | 	st8 [r17] = r28,16;   /* save ar.rnat */		\ | 
 | 158 | 	;;          /* avoid RAW on r16 & r17 */		\ | 
 | 159 | 	st8 [r16] = r23,16;   /* save ar.bspstore */		\ | 
 | 160 | 	st8 [r17] = r31,16;   /* save predicates */		\ | 
 | 161 | 	;;							\ | 
 | 162 | 	st8 [r16] = r29,16;   /* save b0 */			\ | 
 | 163 | 	st8 [r17] = r18,16;   /* save ar.rsc value for "loadrs" */\ | 
 | 164 | 	;;							\ | 
 | 165 | .mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */  \ | 
 | 166 | .mem.offset 8,0; st8.spill [r17] = r12,16;			\ | 
 | 167 | 	adds r12 = -16,r1;    /* switch to kernel memory stack */  \ | 
 | 168 | 	;;							\ | 
 | 169 | .mem.offset 0,0; st8.spill [r16] = r13,16;			\ | 
 | 170 | .mem.offset 8,0; st8.spill [r17] = r10,16;	/* save ar.fpsr */\ | 
 | 171 | 	mov r13 = r21;   /* establish `current' */		\ | 
 | 172 | 	;;							\ | 
 | 173 | .mem.offset 0,0; st8.spill [r16] = r15,16;			\ | 
 | 174 | .mem.offset 8,0; st8.spill [r17] = r14,16;			\ | 
 | 175 | 	;;							\ | 
 | 176 | .mem.offset 0,0; st8.spill [r16] = r2,16;			\ | 
 | 177 | .mem.offset 8,0; st8.spill [r17] = r3,16;			\ | 
 | 178 | 	adds r2 = VMM_PT_REGS_R16_OFFSET,r1;			\ | 
 | 179 | 	 ;;							\ | 
 | 180 | 	adds r16 = VMM_VCPU_IIPA_OFFSET,r13;			\ | 
 | 181 | 	adds r17 = VMM_VCPU_ISR_OFFSET,r13;			\ | 
 | 182 | 	mov r26 = cr.iipa;					\ | 
 | 183 | 	mov r27 = cr.isr;					\ | 
 | 184 | 	;;							\ | 
 | 185 | 	st8 [r16] = r26;					\ | 
 | 186 | 	st8 [r17] = r27;					\ | 
 | 187 | 	;;							\ | 
 | 188 | 	EXTRA;							\ | 
 | 189 | 	mov r8 = ar.ccv;					\ | 
 | 190 | 	mov r9 = ar.csd;					\ | 
 | 191 | 	mov r10 = ar.ssd;					\ | 
 | 192 | 	movl r11 = FPSR_DEFAULT;   /* L-unit */			\ | 
 | 193 | 	adds r17 = VMM_VCPU_GP_OFFSET,r13;			\ | 
 | 194 | 	;;							\ | 
 | 195 | 	ld8 r1 = [r17];/* establish kernel global pointer */	\ | 
 | 196 | 	;;							\ | 
 | 197 | 	PAL_VSA_SYNC_READ					\ | 
 | 198 | 	KVM_MINSTATE_END_SAVE_MIN | 
 | 199 |  | 
 | 200 | /* | 
 | 201 |  * SAVE_REST saves the remainder of pt_regs (with psr.ic on). | 
 | 202 |  * | 
 | 203 |  * Assumed state upon entry: | 
 | 204 |  *  psr.ic: on | 
 | 205 |  *  r2: points to &pt_regs.f6 | 
 | 206 |  *  r3: points to &pt_regs.f7 | 
 | 207 |  *  r8: contents of ar.ccv | 
 | 208 |  *  r9: contents of ar.csd | 
 | 209 |  *  r10:	contents of ar.ssd | 
 | 210 |  *  r11:	FPSR_DEFAULT | 
 | 211 |  * | 
 | 212 |  * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. | 
 | 213 |  */ | 
 | 214 | #define KVM_SAVE_REST				\ | 
 | 215 | .mem.offset 0,0; st8.spill [r2] = r16,16;	\ | 
 | 216 | .mem.offset 8,0; st8.spill [r3] = r17,16;	\ | 
 | 217 | 	;;				\ | 
 | 218 | .mem.offset 0,0; st8.spill [r2] = r18,16;	\ | 
 | 219 | .mem.offset 8,0; st8.spill [r3] = r19,16;	\ | 
 | 220 | 	;;				\ | 
 | 221 | .mem.offset 0,0; st8.spill [r2] = r20,16;	\ | 
 | 222 | .mem.offset 8,0; st8.spill [r3] = r21,16;	\ | 
 | 223 | 	mov r18=b6;			\ | 
 | 224 | 	;;				\ | 
 | 225 | .mem.offset 0,0; st8.spill [r2] = r22,16;	\ | 
 | 226 | .mem.offset 8,0; st8.spill [r3] = r23,16;	\ | 
 | 227 | 	mov r19 = b7;				\ | 
 | 228 | 	;;					\ | 
 | 229 | .mem.offset 0,0; st8.spill [r2] = r24,16;	\ | 
 | 230 | .mem.offset 8,0; st8.spill [r3] = r25,16;	\ | 
 | 231 | 	;;					\ | 
 | 232 | .mem.offset 0,0; st8.spill [r2] = r26,16;	\ | 
 | 233 | .mem.offset 8,0; st8.spill [r3] = r27,16;	\ | 
 | 234 | 	;;					\ | 
 | 235 | .mem.offset 0,0; st8.spill [r2] = r28,16;	\ | 
 | 236 | .mem.offset 8,0; st8.spill [r3] = r29,16;	\ | 
 | 237 | 	;;					\ | 
 | 238 | .mem.offset 0,0; st8.spill [r2] = r30,16;	\ | 
 | 239 | .mem.offset 8,0; st8.spill [r3] = r31,32;	\ | 
 | 240 | 	;;					\ | 
 | 241 | 	mov ar.fpsr = r11;			\ | 
 | 242 | 	st8 [r2] = r8,8;			\ | 
 | 243 | 	adds r24 = PT(B6)-PT(F7),r3;		\ | 
 | 244 | 	adds r25 = PT(B7)-PT(F7),r3;		\ | 
 | 245 | 	;;					\ | 
 | 246 | 	st8 [r24] = r18,16;       /* b6 */	\ | 
 | 247 | 	st8 [r25] = r19,16;       /* b7 */	\ | 
 | 248 | 	adds r2 = PT(R4)-PT(F6),r2;		\ | 
 | 249 | 	adds r3 = PT(R5)-PT(F7),r3;		\ | 
 | 250 | 	;;					\ | 
 | 251 | 	st8 [r24] = r9;	/* ar.csd */		\ | 
 | 252 | 	st8 [r25] = r10;	/* ar.ssd */	\ | 
 | 253 | 	;;					\ | 
 | 254 | 	mov r18 = ar.unat;			\ | 
 | 255 | 	adds r19 = PT(EML_UNAT)-PT(R4),r2;	\ | 
 | 256 | 	;;					\ | 
 | 257 | 	st8 [r19] = r18; /* eml_unat */ 	\ | 
 | 258 |  | 
 | 259 |  | 
 | 260 | #define KVM_SAVE_EXTRA				\ | 
 | 261 | .mem.offset 0,0; st8.spill [r2] = r4,16;	\ | 
 | 262 | .mem.offset 8,0; st8.spill [r3] = r5,16;	\ | 
 | 263 | 	;;					\ | 
 | 264 | .mem.offset 0,0; st8.spill [r2] = r6,16;	\ | 
 | 265 | .mem.offset 8,0; st8.spill [r3] = r7;		\ | 
 | 266 | 	;;					\ | 
 | 267 | 	mov r26 = ar.unat;			\ | 
 | 268 | 	;;					\ | 
 | 269 | 	st8 [r2] = r26;/* eml_unat */ 		\ | 
 | 270 |  | 
 | 271 | #define KVM_SAVE_MIN_WITH_COVER		KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,) | 
 | 272 | #define KVM_SAVE_MIN_WITH_COVER_R19	KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19) | 
 | 273 | #define KVM_SAVE_MIN			KVM_DO_SAVE_MIN(     , mov r30 = r0, ) |