Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1 | /* |
| 2 | * /ia64/kvm_ivt.S |
| 3 | * |
| 4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co |
| 5 | * Stephane Eranian <eranian@hpl.hp.com> |
| 6 | * David Mosberger <davidm@hpl.hp.com> |
| 7 | * Copyright (C) 2000, 2002-2003 Intel Co |
| 8 | * Asit Mallick <asit.k.mallick@intel.com> |
| 9 | * Suresh Siddha <suresh.b.siddha@intel.com> |
| 10 | * Kenneth Chen <kenneth.w.chen@intel.com> |
| 11 | * Fenghua Yu <fenghua.yu@intel.com> |
| 12 | * |
| 13 | * |
| 14 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling |
| 15 | * for SMP |
| 16 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB |
| 17 | * handler now uses virtual PT. |
| 18 | * |
| 19 | * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) |
| 20 | * Supporting Intel virtualization architecture |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | /* |
| 25 | * This file defines the interruption vector table used by the CPU. |
| 26 | * It does not include one entry per possible cause of interruption. |
| 27 | * |
| 28 | * The first 20 entries of the table contain 64 bundles each while the |
| 29 | * remaining 48 entries contain only 16 bundles each. |
| 30 | * |
| 31 | * The 64 bundles are used to allow inlining the whole handler for |
| 32 | * critical |
| 33 | * interruptions like TLB misses. |
| 34 | * |
| 35 | * For each entry, the comment is as follows: |
| 36 | * |
| 37 | * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss |
| 38 | * (12,51) |
| 39 | * entry offset ----/ / / / |
| 40 | * / |
| 41 | * entry number ---------/ / / |
| 42 | * / |
| 43 | * size of the entry -------------/ / |
| 44 | * / |
| 45 | * vector name -------------------------------------/ |
| 46 | * / |
| 47 | * interruptions triggering this vector |
| 48 | * ----------------------/ |
| 49 | * |
| 50 | * The table is 32KB in size and must be aligned on 32KB |
| 51 | * boundary. |
| 52 | * (The CPU ignores the 15 lower bits of the address) |
| 53 | * |
| 54 | * Table is based upon EAS2.6 (Oct 1999) |
| 55 | */ |
| 56 | |
| 57 | |
| 58 | #include <asm/asmmacro.h> |
| 59 | #include <asm/cache.h> |
| 60 | #include <asm/pgtable.h> |
| 61 | |
| 62 | #include "asm-offsets.h" |
| 63 | #include "vcpu.h" |
| 64 | #include "kvm_minstate.h" |
| 65 | #include "vti.h" |
| 66 | |
| 67 | #if 1 |
| 68 | # define PSR_DEFAULT_BITS psr.ac |
| 69 | #else |
| 70 | # define PSR_DEFAULT_BITS 0 |
| 71 | #endif |
| 72 | |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 73 | #define KVM_FAULT(n) \ |
| 74 | kvm_fault_##n:; \ |
| 75 | mov r19=n;; \ |
Xiantao Zhang | 9f7d5bb | 2008-11-21 17:16:07 +0800 | [diff] [blame^] | 76 | br.sptk.many kvm_vmm_panic; \ |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 77 | ;; \ |
| 78 | |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 79 | #define KVM_REFLECT(n) \ |
| 80 | mov r31=pr; \ |
| 81 | mov r19=n; /* prepare to save predicates */ \ |
| 82 | mov r29=cr.ipsr; \ |
| 83 | ;; \ |
| 84 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ |
| 85 | (p7)br.sptk.many kvm_dispatch_reflection; \ |
Xiantao Zhang | 9f7d5bb | 2008-11-21 17:16:07 +0800 | [diff] [blame^] | 86 | br.sptk.many kvm_vmm_panic; \ |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 87 | |
Xiantao Zhang | 9f7d5bb | 2008-11-21 17:16:07 +0800 | [diff] [blame^] | 88 | GLOBAL_ENTRY(kvm_vmm_panic) |
| 89 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 90 | alloc r14=ar.pfs,0,0,1,0 |
| 91 | mov out0=r15 |
| 92 | adds r3=8,r2 // set up second base pointer |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 93 | ;; |
Xiantao Zhang | 9f7d5bb | 2008-11-21 17:16:07 +0800 | [diff] [blame^] | 94 | ssm psr.ic |
| 95 | ;; |
| 96 | srlz.i // guarantee that interruption collection is on |
| 97 | ;; |
| 98 | //(p15) ssm psr.i // restore psr.i |
| 99 | addl r14=@gprel(ia64_leave_hypervisor),gp |
| 100 | ;; |
| 101 | KVM_SAVE_REST |
| 102 | mov rp=r14 |
| 103 | ;; |
| 104 | br.call.sptk.many b6=vmm_panic_handler; |
| 105 | END(kvm_vmm_panic) |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 106 | |
| 107 | .section .text.ivt,"ax" |
| 108 | |
| 109 | .align 32768 // align on 32KB boundary |
| 110 | .global kvm_ia64_ivt |
| 111 | kvm_ia64_ivt: |
| 112 | /////////////////////////////////////////////////////////////// |
| 113 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) |
| 114 | ENTRY(kvm_vhpt_miss) |
| 115 | KVM_FAULT(0) |
| 116 | END(kvm_vhpt_miss) |
| 117 | |
| 118 | |
| 119 | .org kvm_ia64_ivt+0x400 |
| 120 | //////////////////////////////////////////////////////////////// |
| 121 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) |
| 122 | ENTRY(kvm_itlb_miss) |
| 123 | mov r31 = pr |
| 124 | mov r29=cr.ipsr; |
| 125 | ;; |
| 126 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
| 127 | (p6) br.sptk kvm_alt_itlb_miss |
| 128 | mov r19 = 1 |
| 129 | br.sptk kvm_itlb_miss_dispatch |
| 130 | KVM_FAULT(1); |
| 131 | END(kvm_itlb_miss) |
| 132 | |
| 133 | .org kvm_ia64_ivt+0x0800 |
| 134 | ////////////////////////////////////////////////////////////////// |
| 135 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) |
| 136 | ENTRY(kvm_dtlb_miss) |
| 137 | mov r31 = pr |
| 138 | mov r29=cr.ipsr; |
| 139 | ;; |
| 140 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
| 141 | (p6)br.sptk kvm_alt_dtlb_miss |
| 142 | br.sptk kvm_dtlb_miss_dispatch |
| 143 | END(kvm_dtlb_miss) |
| 144 | |
| 145 | .org kvm_ia64_ivt+0x0c00 |
| 146 | //////////////////////////////////////////////////////////////////// |
| 147 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
| 148 | ENTRY(kvm_alt_itlb_miss) |
| 149 | mov r16=cr.ifa // get address that caused the TLB miss |
| 150 | ;; |
| 151 | movl r17=PAGE_KERNEL |
| 152 | mov r24=cr.ipsr |
| 153 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
| 154 | ;; |
| 155 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
| 156 | ;; |
| 157 | or r19=r17,r19 // insert PTE control bits into r19 |
| 158 | ;; |
| 159 | movl r20=IA64_GRANULE_SHIFT<<2 |
| 160 | ;; |
| 161 | mov cr.itir=r20 |
| 162 | ;; |
| 163 | itc.i r19 // insert the TLB entry |
| 164 | mov pr=r31,-1 |
| 165 | rfi |
| 166 | END(kvm_alt_itlb_miss) |
| 167 | |
| 168 | .org kvm_ia64_ivt+0x1000 |
| 169 | ///////////////////////////////////////////////////////////////////// |
| 170 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
| 171 | ENTRY(kvm_alt_dtlb_miss) |
| 172 | mov r16=cr.ifa // get address that caused the TLB miss |
| 173 | ;; |
| 174 | movl r17=PAGE_KERNEL |
| 175 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
| 176 | mov r24=cr.ipsr |
| 177 | ;; |
| 178 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
| 179 | ;; |
| 180 | or r19=r19,r17 // insert PTE control bits into r19 |
| 181 | ;; |
| 182 | movl r20=IA64_GRANULE_SHIFT<<2 |
| 183 | ;; |
| 184 | mov cr.itir=r20 |
| 185 | ;; |
| 186 | itc.d r19 // insert the TLB entry |
| 187 | mov pr=r31,-1 |
| 188 | rfi |
| 189 | END(kvm_alt_dtlb_miss) |
| 190 | |
| 191 | .org kvm_ia64_ivt+0x1400 |
| 192 | ////////////////////////////////////////////////////////////////////// |
| 193 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) |
| 194 | ENTRY(kvm_nested_dtlb_miss) |
| 195 | KVM_FAULT(5) |
| 196 | END(kvm_nested_dtlb_miss) |
| 197 | |
| 198 | .org kvm_ia64_ivt+0x1800 |
| 199 | ///////////////////////////////////////////////////////////////////// |
| 200 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) |
| 201 | ENTRY(kvm_ikey_miss) |
| 202 | KVM_REFLECT(6) |
| 203 | END(kvm_ikey_miss) |
| 204 | |
| 205 | .org kvm_ia64_ivt+0x1c00 |
| 206 | ///////////////////////////////////////////////////////////////////// |
| 207 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
| 208 | ENTRY(kvm_dkey_miss) |
| 209 | KVM_REFLECT(7) |
| 210 | END(kvm_dkey_miss) |
| 211 | |
| 212 | .org kvm_ia64_ivt+0x2000 |
| 213 | //////////////////////////////////////////////////////////////////// |
| 214 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) |
| 215 | ENTRY(kvm_dirty_bit) |
| 216 | KVM_REFLECT(8) |
| 217 | END(kvm_dirty_bit) |
| 218 | |
| 219 | .org kvm_ia64_ivt+0x2400 |
| 220 | //////////////////////////////////////////////////////////////////// |
| 221 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) |
| 222 | ENTRY(kvm_iaccess_bit) |
| 223 | KVM_REFLECT(9) |
| 224 | END(kvm_iaccess_bit) |
| 225 | |
| 226 | .org kvm_ia64_ivt+0x2800 |
| 227 | /////////////////////////////////////////////////////////////////// |
| 228 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) |
| 229 | ENTRY(kvm_daccess_bit) |
| 230 | KVM_REFLECT(10) |
| 231 | END(kvm_daccess_bit) |
| 232 | |
| 233 | .org kvm_ia64_ivt+0x2c00 |
| 234 | ///////////////////////////////////////////////////////////////// |
| 235 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) |
| 236 | ENTRY(kvm_break_fault) |
| 237 | mov r31=pr |
| 238 | mov r19=11 |
| 239 | mov r29=cr.ipsr |
| 240 | ;; |
| 241 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 242 | ;; |
| 243 | alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) |
| 244 | mov out0=cr.ifa |
| 245 | mov out2=cr.isr // FIXME: pity to make this slow access twice |
| 246 | mov out3=cr.iim // FIXME: pity to make this slow access twice |
| 247 | adds r3=8,r2 // set up second base pointer |
| 248 | ;; |
| 249 | ssm psr.ic |
| 250 | ;; |
| 251 | srlz.i // guarantee that interruption collection is on |
| 252 | ;; |
| 253 | //(p15)ssm psr.i // restore psr.i |
| 254 | addl r14=@gprel(ia64_leave_hypervisor),gp |
| 255 | ;; |
| 256 | KVM_SAVE_REST |
| 257 | mov rp=r14 |
| 258 | ;; |
| 259 | adds out1=16,sp |
| 260 | br.call.sptk.many b6=kvm_ia64_handle_break |
| 261 | ;; |
| 262 | END(kvm_break_fault) |
| 263 | |
| 264 | .org kvm_ia64_ivt+0x3000 |
| 265 | ///////////////////////////////////////////////////////////////// |
| 266 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) |
| 267 | ENTRY(kvm_interrupt) |
| 268 | mov r31=pr // prepare to save predicates |
| 269 | mov r19=12 |
| 270 | mov r29=cr.ipsr |
| 271 | ;; |
| 272 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT |
| 273 | tbit.z p0,p15=r29,IA64_PSR_I_BIT |
| 274 | ;; |
| 275 | (p7) br.sptk kvm_dispatch_interrupt |
| 276 | ;; |
| 277 | mov r27=ar.rsc /* M */ |
| 278 | mov r20=r1 /* A */ |
| 279 | mov r25=ar.unat /* M */ |
| 280 | mov r26=ar.pfs /* I */ |
| 281 | mov r28=cr.iip /* M */ |
| 282 | cover /* B (or nothing) */ |
| 283 | ;; |
| 284 | mov r1=sp |
| 285 | ;; |
| 286 | invala /* M */ |
| 287 | mov r30=cr.ifs |
| 288 | ;; |
| 289 | addl r1=-VMM_PT_REGS_SIZE,r1 |
| 290 | ;; |
| 291 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ |
| 292 | adds r16=PT(CR_IPSR),r1 |
| 293 | ;; |
| 294 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES |
| 295 | st8 [r16]=r29 /* save cr.ipsr */ |
| 296 | ;; |
| 297 | lfetch.fault.excl.nt1 [r17] |
| 298 | mov r29=b0 |
| 299 | ;; |
| 300 | adds r16=PT(R8),r1 /* initialize first base pointer */ |
| 301 | adds r17=PT(R9),r1 /* initialize second base pointer */ |
| 302 | mov r18=r0 /* make sure r18 isn't NaT */ |
| 303 | ;; |
| 304 | .mem.offset 0,0; st8.spill [r16]=r8,16 |
| 305 | .mem.offset 8,0; st8.spill [r17]=r9,16 |
| 306 | ;; |
| 307 | .mem.offset 0,0; st8.spill [r16]=r10,24 |
| 308 | .mem.offset 8,0; st8.spill [r17]=r11,24 |
| 309 | ;; |
| 310 | st8 [r16]=r28,16 /* save cr.iip */ |
| 311 | st8 [r17]=r30,16 /* save cr.ifs */ |
| 312 | mov r8=ar.fpsr /* M */ |
| 313 | mov r9=ar.csd |
| 314 | mov r10=ar.ssd |
| 315 | movl r11=FPSR_DEFAULT /* L-unit */ |
| 316 | ;; |
| 317 | st8 [r16]=r25,16 /* save ar.unat */ |
| 318 | st8 [r17]=r26,16 /* save ar.pfs */ |
| 319 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ |
| 320 | ;; |
| 321 | st8 [r16]=r27,16 /* save ar.rsc */ |
| 322 | adds r17=16,r17 /* skip over ar_rnat field */ |
| 323 | ;; |
| 324 | st8 [r17]=r31,16 /* save predicates */ |
| 325 | adds r16=16,r16 /* skip over ar_bspstore field */ |
| 326 | ;; |
| 327 | st8 [r16]=r29,16 /* save b0 */ |
| 328 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ |
| 329 | ;; |
| 330 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ |
| 331 | .mem.offset 8,0; st8.spill [r17]=r12,16 |
| 332 | adds r12=-16,r1 |
| 333 | /* switch to kernel memory stack (with 16 bytes of scratch) */ |
| 334 | ;; |
| 335 | .mem.offset 0,0; st8.spill [r16]=r13,16 |
| 336 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ |
| 337 | ;; |
| 338 | .mem.offset 0,0; st8.spill [r16]=r15,16 |
| 339 | .mem.offset 8,0; st8.spill [r17]=r14,16 |
| 340 | dep r14=-1,r0,60,4 |
| 341 | ;; |
| 342 | .mem.offset 0,0; st8.spill [r16]=r2,16 |
| 343 | .mem.offset 8,0; st8.spill [r17]=r3,16 |
| 344 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 |
| 345 | adds r14 = VMM_VCPU_GP_OFFSET,r13 |
| 346 | ;; |
| 347 | mov r8=ar.ccv |
| 348 | ld8 r14 = [r14] |
| 349 | ;; |
| 350 | mov r1=r14 /* establish kernel global pointer */ |
| 351 | ;; \ |
| 352 | bsw.1 |
| 353 | ;; |
| 354 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
| 355 | mov out0=r13 |
| 356 | ;; |
| 357 | ssm psr.ic |
| 358 | ;; |
| 359 | srlz.i |
| 360 | ;; |
| 361 | //(p15) ssm psr.i |
| 362 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
| 363 | srlz.i // ensure everybody knows psr.ic is back on |
| 364 | ;; |
| 365 | .mem.offset 0,0; st8.spill [r2]=r16,16 |
| 366 | .mem.offset 8,0; st8.spill [r3]=r17,16 |
| 367 | ;; |
| 368 | .mem.offset 0,0; st8.spill [r2]=r18,16 |
| 369 | .mem.offset 8,0; st8.spill [r3]=r19,16 |
| 370 | ;; |
| 371 | .mem.offset 0,0; st8.spill [r2]=r20,16 |
| 372 | .mem.offset 8,0; st8.spill [r3]=r21,16 |
| 373 | mov r18=b6 |
| 374 | ;; |
| 375 | .mem.offset 0,0; st8.spill [r2]=r22,16 |
| 376 | .mem.offset 8,0; st8.spill [r3]=r23,16 |
| 377 | mov r19=b7 |
| 378 | ;; |
| 379 | .mem.offset 0,0; st8.spill [r2]=r24,16 |
| 380 | .mem.offset 8,0; st8.spill [r3]=r25,16 |
| 381 | ;; |
| 382 | .mem.offset 0,0; st8.spill [r2]=r26,16 |
| 383 | .mem.offset 8,0; st8.spill [r3]=r27,16 |
| 384 | ;; |
| 385 | .mem.offset 0,0; st8.spill [r2]=r28,16 |
| 386 | .mem.offset 8,0; st8.spill [r3]=r29,16 |
| 387 | ;; |
| 388 | .mem.offset 0,0; st8.spill [r2]=r30,16 |
| 389 | .mem.offset 8,0; st8.spill [r3]=r31,32 |
| 390 | ;; |
| 391 | mov ar.fpsr=r11 /* M-unit */ |
| 392 | st8 [r2]=r8,8 /* ar.ccv */ |
| 393 | adds r24=PT(B6)-PT(F7),r3 |
| 394 | ;; |
| 395 | stf.spill [r2]=f6,32 |
| 396 | stf.spill [r3]=f7,32 |
| 397 | ;; |
| 398 | stf.spill [r2]=f8,32 |
| 399 | stf.spill [r3]=f9,32 |
| 400 | ;; |
| 401 | stf.spill [r2]=f10 |
| 402 | stf.spill [r3]=f11 |
| 403 | adds r25=PT(B7)-PT(F11),r3 |
| 404 | ;; |
| 405 | st8 [r24]=r18,16 /* b6 */ |
| 406 | st8 [r25]=r19,16 /* b7 */ |
| 407 | ;; |
| 408 | st8 [r24]=r9 /* ar.csd */ |
| 409 | st8 [r25]=r10 /* ar.ssd */ |
| 410 | ;; |
| 411 | srlz.d // make sure we see the effect of cr.ivr |
| 412 | addl r14=@gprel(ia64_leave_nested),gp |
| 413 | ;; |
| 414 | mov rp=r14 |
| 415 | br.call.sptk.many b6=kvm_ia64_handle_irq |
| 416 | ;; |
| 417 | END(kvm_interrupt) |
| 418 | |
| 419 | .global kvm_dispatch_vexirq |
| 420 | .org kvm_ia64_ivt+0x3400 |
| 421 | ////////////////////////////////////////////////////////////////////// |
| 422 | // 0x3400 Entry 13 (size 64 bundles) Reserved |
| 423 | ENTRY(kvm_virtual_exirq) |
| 424 | mov r31=pr |
| 425 | mov r19=13 |
| 426 | mov r30 =r0 |
| 427 | ;; |
| 428 | kvm_dispatch_vexirq: |
| 429 | cmp.eq p6,p0 = 1,r30 |
| 430 | ;; |
| 431 | (p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
| 432 | ;; |
| 433 | (p6)ld8 r1 = [r29] |
| 434 | ;; |
| 435 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 436 | alloc r14=ar.pfs,0,0,1,0 |
| 437 | mov out0=r13 |
| 438 | |
| 439 | ssm psr.ic |
| 440 | ;; |
| 441 | srlz.i // guarantee that interruption collection is on |
| 442 | ;; |
| 443 | //(p15) ssm psr.i // restore psr.i |
| 444 | adds r3=8,r2 // set up second base pointer |
| 445 | ;; |
| 446 | KVM_SAVE_REST |
| 447 | addl r14=@gprel(ia64_leave_hypervisor),gp |
| 448 | ;; |
| 449 | mov rp=r14 |
| 450 | br.call.sptk.many b6=kvm_vexirq |
| 451 | END(kvm_virtual_exirq) |
| 452 | |
| 453 | .org kvm_ia64_ivt+0x3800 |
| 454 | ///////////////////////////////////////////////////////////////////// |
| 455 | // 0x3800 Entry 14 (size 64 bundles) Reserved |
| 456 | KVM_FAULT(14) |
| 457 | // this code segment is from 2.6.16.13 |
| 458 | |
| 459 | |
| 460 | .org kvm_ia64_ivt+0x3c00 |
| 461 | /////////////////////////////////////////////////////////////////////// |
| 462 | // 0x3c00 Entry 15 (size 64 bundles) Reserved |
| 463 | KVM_FAULT(15) |
| 464 | |
| 465 | |
| 466 | .org kvm_ia64_ivt+0x4000 |
| 467 | /////////////////////////////////////////////////////////////////////// |
| 468 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
| 469 | KVM_FAULT(16) |
| 470 | |
| 471 | .org kvm_ia64_ivt+0x4400 |
| 472 | ////////////////////////////////////////////////////////////////////// |
| 473 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
| 474 | KVM_FAULT(17) |
| 475 | |
| 476 | .org kvm_ia64_ivt+0x4800 |
| 477 | ////////////////////////////////////////////////////////////////////// |
| 478 | // 0x4800 Entry 18 (size 64 bundles) Reserved |
| 479 | KVM_FAULT(18) |
| 480 | |
| 481 | .org kvm_ia64_ivt+0x4c00 |
| 482 | ////////////////////////////////////////////////////////////////////// |
| 483 | // 0x4c00 Entry 19 (size 64 bundles) Reserved |
| 484 | KVM_FAULT(19) |
| 485 | |
| 486 | .org kvm_ia64_ivt+0x5000 |
| 487 | ////////////////////////////////////////////////////////////////////// |
| 488 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present |
| 489 | ENTRY(kvm_page_not_present) |
| 490 | KVM_REFLECT(20) |
| 491 | END(kvm_page_not_present) |
| 492 | |
| 493 | .org kvm_ia64_ivt+0x5100 |
| 494 | /////////////////////////////////////////////////////////////////////// |
| 495 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector |
| 496 | ENTRY(kvm_key_permission) |
| 497 | KVM_REFLECT(21) |
| 498 | END(kvm_key_permission) |
| 499 | |
| 500 | .org kvm_ia64_ivt+0x5200 |
| 501 | ////////////////////////////////////////////////////////////////////// |
| 502 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
| 503 | ENTRY(kvm_iaccess_rights) |
| 504 | KVM_REFLECT(22) |
| 505 | END(kvm_iaccess_rights) |
| 506 | |
| 507 | .org kvm_ia64_ivt+0x5300 |
| 508 | ////////////////////////////////////////////////////////////////////// |
| 509 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
| 510 | ENTRY(kvm_daccess_rights) |
| 511 | KVM_REFLECT(23) |
| 512 | END(kvm_daccess_rights) |
| 513 | |
| 514 | .org kvm_ia64_ivt+0x5400 |
| 515 | ///////////////////////////////////////////////////////////////////// |
| 516 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
| 517 | ENTRY(kvm_general_exception) |
| 518 | KVM_REFLECT(24) |
| 519 | KVM_FAULT(24) |
| 520 | END(kvm_general_exception) |
| 521 | |
| 522 | .org kvm_ia64_ivt+0x5500 |
| 523 | ////////////////////////////////////////////////////////////////////// |
| 524 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) |
| 525 | ENTRY(kvm_disabled_fp_reg) |
| 526 | KVM_REFLECT(25) |
| 527 | END(kvm_disabled_fp_reg) |
| 528 | |
| 529 | .org kvm_ia64_ivt+0x5600 |
| 530 | //////////////////////////////////////////////////////////////////// |
| 531 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) |
| 532 | ENTRY(kvm_nat_consumption) |
| 533 | KVM_REFLECT(26) |
| 534 | END(kvm_nat_consumption) |
| 535 | |
| 536 | .org kvm_ia64_ivt+0x5700 |
| 537 | ///////////////////////////////////////////////////////////////////// |
| 538 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) |
| 539 | ENTRY(kvm_speculation_vector) |
| 540 | KVM_REFLECT(27) |
| 541 | END(kvm_speculation_vector) |
| 542 | |
| 543 | .org kvm_ia64_ivt+0x5800 |
| 544 | ///////////////////////////////////////////////////////////////////// |
| 545 | // 0x5800 Entry 28 (size 16 bundles) Reserved |
| 546 | KVM_FAULT(28) |
| 547 | |
| 548 | .org kvm_ia64_ivt+0x5900 |
| 549 | /////////////////////////////////////////////////////////////////// |
| 550 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) |
| 551 | ENTRY(kvm_debug_vector) |
| 552 | KVM_FAULT(29) |
| 553 | END(kvm_debug_vector) |
| 554 | |
| 555 | .org kvm_ia64_ivt+0x5a00 |
| 556 | /////////////////////////////////////////////////////////////// |
| 557 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) |
| 558 | ENTRY(kvm_unaligned_access) |
| 559 | KVM_REFLECT(30) |
| 560 | END(kvm_unaligned_access) |
| 561 | |
| 562 | .org kvm_ia64_ivt+0x5b00 |
| 563 | ////////////////////////////////////////////////////////////////////// |
| 564 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) |
| 565 | ENTRY(kvm_unsupported_data_reference) |
| 566 | KVM_REFLECT(31) |
| 567 | END(kvm_unsupported_data_reference) |
| 568 | |
| 569 | .org kvm_ia64_ivt+0x5c00 |
| 570 | //////////////////////////////////////////////////////////////////// |
| 571 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) |
| 572 | ENTRY(kvm_floating_point_fault) |
| 573 | KVM_REFLECT(32) |
| 574 | END(kvm_floating_point_fault) |
| 575 | |
| 576 | .org kvm_ia64_ivt+0x5d00 |
| 577 | ///////////////////////////////////////////////////////////////////// |
| 578 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) |
| 579 | ENTRY(kvm_floating_point_trap) |
| 580 | KVM_REFLECT(33) |
| 581 | END(kvm_floating_point_trap) |
| 582 | |
| 583 | .org kvm_ia64_ivt+0x5e00 |
| 584 | ////////////////////////////////////////////////////////////////////// |
| 585 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) |
| 586 | ENTRY(kvm_lower_privilege_trap) |
| 587 | KVM_REFLECT(34) |
| 588 | END(kvm_lower_privilege_trap) |
| 589 | |
| 590 | .org kvm_ia64_ivt+0x5f00 |
| 591 | ////////////////////////////////////////////////////////////////////// |
| 592 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) |
| 593 | ENTRY(kvm_taken_branch_trap) |
| 594 | KVM_REFLECT(35) |
| 595 | END(kvm_taken_branch_trap) |
| 596 | |
| 597 | .org kvm_ia64_ivt+0x6000 |
| 598 | //////////////////////////////////////////////////////////////////// |
| 599 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) |
| 600 | ENTRY(kvm_single_step_trap) |
| 601 | KVM_REFLECT(36) |
| 602 | END(kvm_single_step_trap) |
| 603 | .global kvm_virtualization_fault_back |
| 604 | .org kvm_ia64_ivt+0x6100 |
| 605 | ///////////////////////////////////////////////////////////////////// |
| 606 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault |
| 607 | ENTRY(kvm_virtualization_fault) |
| 608 | mov r31=pr |
| 609 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
| 610 | ;; |
| 611 | st8 [r16] = r1 |
| 612 | adds r17 = VMM_VCPU_GP_OFFSET, r21 |
| 613 | ;; |
| 614 | ld8 r1 = [r17] |
| 615 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 |
| 616 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 |
| 617 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 |
| 618 | cmp.eq p9,p0=EVENT_RSM,r24 |
| 619 | cmp.eq p10,p0=EVENT_SSM,r24 |
| 620 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 |
| 621 | cmp.eq p12,p0=EVENT_THASH,r24 |
| 622 | (p6) br.dptk.many kvm_asm_mov_from_ar |
| 623 | (p7) br.dptk.many kvm_asm_mov_from_rr |
| 624 | (p8) br.dptk.many kvm_asm_mov_to_rr |
| 625 | (p9) br.dptk.many kvm_asm_rsm |
| 626 | (p10) br.dptk.many kvm_asm_ssm |
| 627 | (p11) br.dptk.many kvm_asm_mov_to_psr |
| 628 | (p12) br.dptk.many kvm_asm_thash |
| 629 | ;; |
| 630 | kvm_virtualization_fault_back: |
| 631 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
| 632 | ;; |
| 633 | ld8 r1 = [r16] |
| 634 | ;; |
| 635 | mov r19=37 |
| 636 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
| 637 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
| 638 | ;; |
| 639 | st8 [r16] = r24 |
| 640 | st8 [r17] = r25 |
| 641 | ;; |
| 642 | cmp.ne p6,p0=EVENT_RFI, r24 |
| 643 | (p6) br.sptk kvm_dispatch_virtualization_fault |
| 644 | ;; |
| 645 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
| 646 | ;; |
| 647 | ld8 r18=[r18] |
| 648 | ;; |
| 649 | adds r18=VMM_VPD_VIFS_OFFSET,r18 |
| 650 | ;; |
| 651 | ld8 r18=[r18] |
| 652 | ;; |
| 653 | tbit.z p6,p0=r18,63 |
| 654 | (p6) br.sptk kvm_dispatch_virtualization_fault |
| 655 | ;; |
| 656 | //if vifs.v=1 desert current register frame |
| 657 | alloc r18=ar.pfs,0,0,0,0 |
| 658 | br.sptk kvm_dispatch_virtualization_fault |
| 659 | END(kvm_virtualization_fault) |
| 660 | |
| 661 | .org kvm_ia64_ivt+0x6200 |
| 662 | ////////////////////////////////////////////////////////////// |
| 663 | // 0x6200 Entry 38 (size 16 bundles) Reserved |
| 664 | KVM_FAULT(38) |
| 665 | |
| 666 | .org kvm_ia64_ivt+0x6300 |
| 667 | ///////////////////////////////////////////////////////////////// |
| 668 | // 0x6300 Entry 39 (size 16 bundles) Reserved |
| 669 | KVM_FAULT(39) |
| 670 | |
| 671 | .org kvm_ia64_ivt+0x6400 |
| 672 | ///////////////////////////////////////////////////////////////// |
| 673 | // 0x6400 Entry 40 (size 16 bundles) Reserved |
| 674 | KVM_FAULT(40) |
| 675 | |
| 676 | .org kvm_ia64_ivt+0x6500 |
| 677 | ////////////////////////////////////////////////////////////////// |
| 678 | // 0x6500 Entry 41 (size 16 bundles) Reserved |
| 679 | KVM_FAULT(41) |
| 680 | |
| 681 | .org kvm_ia64_ivt+0x6600 |
| 682 | ////////////////////////////////////////////////////////////////// |
| 683 | // 0x6600 Entry 42 (size 16 bundles) Reserved |
| 684 | KVM_FAULT(42) |
| 685 | |
| 686 | .org kvm_ia64_ivt+0x6700 |
| 687 | ////////////////////////////////////////////////////////////////// |
| 688 | // 0x6700 Entry 43 (size 16 bundles) Reserved |
| 689 | KVM_FAULT(43) |
| 690 | |
| 691 | .org kvm_ia64_ivt+0x6800 |
| 692 | ////////////////////////////////////////////////////////////////// |
| 693 | // 0x6800 Entry 44 (size 16 bundles) Reserved |
| 694 | KVM_FAULT(44) |
| 695 | |
| 696 | .org kvm_ia64_ivt+0x6900 |
| 697 | /////////////////////////////////////////////////////////////////// |
| 698 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception |
| 699 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) |
| 700 | ENTRY(kvm_ia32_exception) |
| 701 | KVM_FAULT(45) |
| 702 | END(kvm_ia32_exception) |
| 703 | |
| 704 | .org kvm_ia64_ivt+0x6a00 |
| 705 | //////////////////////////////////////////////////////////////////// |
| 706 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
| 707 | ENTRY(kvm_ia32_intercept) |
| 708 | KVM_FAULT(47) |
| 709 | END(kvm_ia32_intercept) |
| 710 | |
| 711 | .org kvm_ia64_ivt+0x6c00 |
| 712 | ///////////////////////////////////////////////////////////////////// |
| 713 | // 0x6c00 Entry 48 (size 16 bundles) Reserved |
| 714 | KVM_FAULT(48) |
| 715 | |
| 716 | .org kvm_ia64_ivt+0x6d00 |
| 717 | ////////////////////////////////////////////////////////////////////// |
| 718 | // 0x6d00 Entry 49 (size 16 bundles) Reserved |
| 719 | KVM_FAULT(49) |
| 720 | |
| 721 | .org kvm_ia64_ivt+0x6e00 |
| 722 | ////////////////////////////////////////////////////////////////////// |
| 723 | // 0x6e00 Entry 50 (size 16 bundles) Reserved |
| 724 | KVM_FAULT(50) |
| 725 | |
| 726 | .org kvm_ia64_ivt+0x6f00 |
| 727 | ///////////////////////////////////////////////////////////////////// |
| 728 | // 0x6f00 Entry 51 (size 16 bundles) Reserved |
| 729 | KVM_FAULT(52) |
| 730 | |
| 731 | .org kvm_ia64_ivt+0x7100 |
| 732 | //////////////////////////////////////////////////////////////////// |
| 733 | // 0x7100 Entry 53 (size 16 bundles) Reserved |
| 734 | KVM_FAULT(53) |
| 735 | |
| 736 | .org kvm_ia64_ivt+0x7200 |
| 737 | ///////////////////////////////////////////////////////////////////// |
| 738 | // 0x7200 Entry 54 (size 16 bundles) Reserved |
| 739 | KVM_FAULT(54) |
| 740 | |
| 741 | .org kvm_ia64_ivt+0x7300 |
| 742 | //////////////////////////////////////////////////////////////////// |
| 743 | // 0x7300 Entry 55 (size 16 bundles) Reserved |
| 744 | KVM_FAULT(55) |
| 745 | |
| 746 | .org kvm_ia64_ivt+0x7400 |
| 747 | //////////////////////////////////////////////////////////////////// |
| 748 | // 0x7400 Entry 56 (size 16 bundles) Reserved |
| 749 | KVM_FAULT(56) |
| 750 | |
| 751 | .org kvm_ia64_ivt+0x7500 |
| 752 | ///////////////////////////////////////////////////////////////////// |
| 753 | // 0x7500 Entry 57 (size 16 bundles) Reserved |
| 754 | KVM_FAULT(57) |
| 755 | |
| 756 | .org kvm_ia64_ivt+0x7600 |
| 757 | ///////////////////////////////////////////////////////////////////// |
| 758 | // 0x7600 Entry 58 (size 16 bundles) Reserved |
| 759 | KVM_FAULT(58) |
| 760 | |
| 761 | .org kvm_ia64_ivt+0x7700 |
| 762 | //////////////////////////////////////////////////////////////////// |
| 763 | // 0x7700 Entry 59 (size 16 bundles) Reserved |
| 764 | KVM_FAULT(59) |
| 765 | |
| 766 | .org kvm_ia64_ivt+0x7800 |
| 767 | //////////////////////////////////////////////////////////////////// |
| 768 | // 0x7800 Entry 60 (size 16 bundles) Reserved |
| 769 | KVM_FAULT(60) |
| 770 | |
| 771 | .org kvm_ia64_ivt+0x7900 |
| 772 | ///////////////////////////////////////////////////////////////////// |
| 773 | // 0x7900 Entry 61 (size 16 bundles) Reserved |
| 774 | KVM_FAULT(61) |
| 775 | |
| 776 | .org kvm_ia64_ivt+0x7a00 |
| 777 | ///////////////////////////////////////////////////////////////////// |
| 778 | // 0x7a00 Entry 62 (size 16 bundles) Reserved |
| 779 | KVM_FAULT(62) |
| 780 | |
| 781 | .org kvm_ia64_ivt+0x7b00 |
| 782 | ///////////////////////////////////////////////////////////////////// |
| 783 | // 0x7b00 Entry 63 (size 16 bundles) Reserved |
| 784 | KVM_FAULT(63) |
| 785 | |
| 786 | .org kvm_ia64_ivt+0x7c00 |
| 787 | //////////////////////////////////////////////////////////////////// |
| 788 | // 0x7c00 Entry 64 (size 16 bundles) Reserved |
| 789 | KVM_FAULT(64) |
| 790 | |
| 791 | .org kvm_ia64_ivt+0x7d00 |
| 792 | ///////////////////////////////////////////////////////////////////// |
| 793 | // 0x7d00 Entry 65 (size 16 bundles) Reserved |
| 794 | KVM_FAULT(65) |
| 795 | |
| 796 | .org kvm_ia64_ivt+0x7e00 |
| 797 | ///////////////////////////////////////////////////////////////////// |
| 798 | // 0x7e00 Entry 66 (size 16 bundles) Reserved |
| 799 | KVM_FAULT(66) |
| 800 | |
| 801 | .org kvm_ia64_ivt+0x7f00 |
| 802 | //////////////////////////////////////////////////////////////////// |
| 803 | // 0x7f00 Entry 67 (size 16 bundles) Reserved |
| 804 | KVM_FAULT(67) |
| 805 | |
| 806 | .org kvm_ia64_ivt+0x8000 |
| 807 | // There is no particular reason for this code to be here, other than that |
| 808 | // there happens to be space here that would go unused otherwise. If this |
| 809 | // fault ever gets "unreserved", simply moved the following code to a more |
| 810 | // suitable spot... |
| 811 | |
| 812 | |
| 813 | ENTRY(kvm_dtlb_miss_dispatch) |
| 814 | mov r19 = 2 |
| 815 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 816 | alloc r14=ar.pfs,0,0,3,0 |
| 817 | mov out0=cr.ifa |
| 818 | mov out1=r15 |
| 819 | adds r3=8,r2 // set up second base pointer |
| 820 | ;; |
| 821 | ssm psr.ic |
| 822 | ;; |
| 823 | srlz.i // guarantee that interruption collection is on |
| 824 | ;; |
| 825 | //(p15) ssm psr.i // restore psr.i |
| 826 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
| 827 | ;; |
| 828 | KVM_SAVE_REST |
| 829 | KVM_SAVE_EXTRA |
| 830 | mov rp=r14 |
| 831 | ;; |
| 832 | adds out2=16,r12 |
| 833 | br.call.sptk.many b6=kvm_page_fault |
| 834 | END(kvm_dtlb_miss_dispatch) |
| 835 | |
| 836 | ENTRY(kvm_itlb_miss_dispatch) |
| 837 | |
| 838 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 839 | alloc r14=ar.pfs,0,0,3,0 |
| 840 | mov out0=cr.ifa |
| 841 | mov out1=r15 |
| 842 | adds r3=8,r2 // set up second base pointer |
| 843 | ;; |
| 844 | ssm psr.ic |
| 845 | ;; |
| 846 | srlz.i // guarantee that interruption collection is on |
| 847 | ;; |
| 848 | //(p15) ssm psr.i // restore psr.i |
| 849 | addl r14=@gprel(ia64_leave_hypervisor),gp |
| 850 | ;; |
| 851 | KVM_SAVE_REST |
| 852 | mov rp=r14 |
| 853 | ;; |
| 854 | adds out2=16,r12 |
| 855 | br.call.sptk.many b6=kvm_page_fault |
| 856 | END(kvm_itlb_miss_dispatch) |
| 857 | |
| 858 | ENTRY(kvm_dispatch_reflection) |
| 859 | /* |
| 860 | * Input: |
| 861 | * psr.ic: off |
| 862 | * r19: intr type (offset into ivt, see ia64_int.h) |
| 863 | * r31: contains saved predicates (pr) |
| 864 | */ |
| 865 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 866 | alloc r14=ar.pfs,0,0,5,0 |
| 867 | mov out0=cr.ifa |
| 868 | mov out1=cr.isr |
| 869 | mov out2=cr.iim |
| 870 | mov out3=r15 |
| 871 | adds r3=8,r2 // set up second base pointer |
| 872 | ;; |
| 873 | ssm psr.ic |
| 874 | ;; |
| 875 | srlz.i // guarantee that interruption collection is on |
| 876 | ;; |
| 877 | //(p15) ssm psr.i // restore psr.i |
| 878 | addl r14=@gprel(ia64_leave_hypervisor),gp |
| 879 | ;; |
| 880 | KVM_SAVE_REST |
| 881 | mov rp=r14 |
| 882 | ;; |
| 883 | adds out4=16,r12 |
| 884 | br.call.sptk.many b6=reflect_interruption |
| 885 | END(kvm_dispatch_reflection) |
| 886 | |
| 887 | ENTRY(kvm_dispatch_virtualization_fault) |
| 888 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
| 889 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
| 890 | ;; |
| 891 | st8 [r16] = r24 |
| 892 | st8 [r17] = r25 |
| 893 | ;; |
| 894 | KVM_SAVE_MIN_WITH_COVER_R19 |
| 895 | ;; |
| 896 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) |
| 897 | mov out0=r13 //vcpu |
| 898 | adds r3=8,r2 // set up second base pointer |
| 899 | ;; |
| 900 | ssm psr.ic |
| 901 | ;; |
| 902 | srlz.i // guarantee that interruption collection is on |
| 903 | ;; |
| 904 | //(p15) ssm psr.i // restore psr.i |
| 905 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
| 906 | ;; |
| 907 | KVM_SAVE_REST |
| 908 | KVM_SAVE_EXTRA |
| 909 | mov rp=r14 |
| 910 | ;; |
| 911 | adds out1=16,sp //regs |
| 912 | br.call.sptk.many b6=kvm_emulate |
| 913 | END(kvm_dispatch_virtualization_fault) |
| 914 | |
| 915 | |
| 916 | ENTRY(kvm_dispatch_interrupt) |
| 917 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 |
| 918 | ;; |
| 919 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
| 920 | //mov out0=cr.ivr // pass cr.ivr as first arg |
| 921 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
| 922 | ;; |
| 923 | ssm psr.ic |
| 924 | ;; |
| 925 | srlz.i |
| 926 | ;; |
| 927 | //(p15) ssm psr.i |
| 928 | addl r14=@gprel(ia64_leave_hypervisor),gp |
| 929 | ;; |
| 930 | KVM_SAVE_REST |
| 931 | mov rp=r14 |
| 932 | ;; |
| 933 | mov out0=r13 // pass pointer to pt_regs as second arg |
| 934 | br.call.sptk.many b6=kvm_ia64_handle_irq |
| 935 | END(kvm_dispatch_interrupt) |
| 936 | |
| 937 | |
| 938 | |
| 939 | |
| 940 | GLOBAL_ENTRY(ia64_leave_nested) |
| 941 | rsm psr.i |
| 942 | ;; |
| 943 | adds r21=PT(PR)+16,r12 |
| 944 | ;; |
| 945 | lfetch [r21],PT(CR_IPSR)-PT(PR) |
| 946 | adds r2=PT(B6)+16,r12 |
| 947 | adds r3=PT(R16)+16,r12 |
| 948 | ;; |
| 949 | lfetch [r21] |
| 950 | ld8 r28=[r2],8 // load b6 |
| 951 | adds r29=PT(R24)+16,r12 |
| 952 | |
| 953 | ld8.fill r16=[r3] |
| 954 | adds r3=PT(AR_CSD)-PT(R16),r3 |
| 955 | adds r30=PT(AR_CCV)+16,r12 |
| 956 | ;; |
| 957 | ld8.fill r24=[r29] |
| 958 | ld8 r15=[r30] // load ar.ccv |
| 959 | ;; |
| 960 | ld8 r29=[r2],16 // load b7 |
| 961 | ld8 r30=[r3],16 // load ar.csd |
| 962 | ;; |
| 963 | ld8 r31=[r2],16 // load ar.ssd |
| 964 | ld8.fill r8=[r3],16 |
| 965 | ;; |
| 966 | ld8.fill r9=[r2],16 |
| 967 | ld8.fill r10=[r3],PT(R17)-PT(R10) |
| 968 | ;; |
| 969 | ld8.fill r11=[r2],PT(R18)-PT(R11) |
| 970 | ld8.fill r17=[r3],16 |
| 971 | ;; |
| 972 | ld8.fill r18=[r2],16 |
| 973 | ld8.fill r19=[r3],16 |
| 974 | ;; |
| 975 | ld8.fill r20=[r2],16 |
| 976 | ld8.fill r21=[r3],16 |
| 977 | mov ar.csd=r30 |
| 978 | mov ar.ssd=r31 |
| 979 | ;; |
| 980 | rsm psr.i | psr.ic |
| 981 | // initiate turning off of interrupt and interruption collection |
| 982 | invala // invalidate ALAT |
| 983 | ;; |
| 984 | srlz.i |
| 985 | ;; |
| 986 | ld8.fill r22=[r2],24 |
| 987 | ld8.fill r23=[r3],24 |
| 988 | mov b6=r28 |
| 989 | ;; |
| 990 | ld8.fill r25=[r2],16 |
| 991 | ld8.fill r26=[r3],16 |
| 992 | mov b7=r29 |
| 993 | ;; |
| 994 | ld8.fill r27=[r2],16 |
| 995 | ld8.fill r28=[r3],16 |
| 996 | ;; |
| 997 | ld8.fill r29=[r2],16 |
| 998 | ld8.fill r30=[r3],24 |
| 999 | ;; |
| 1000 | ld8.fill r31=[r2],PT(F9)-PT(R31) |
| 1001 | adds r3=PT(F10)-PT(F6),r3 |
| 1002 | ;; |
| 1003 | ldf.fill f9=[r2],PT(F6)-PT(F9) |
| 1004 | ldf.fill f10=[r3],PT(F8)-PT(F10) |
| 1005 | ;; |
| 1006 | ldf.fill f6=[r2],PT(F7)-PT(F6) |
| 1007 | ;; |
| 1008 | ldf.fill f7=[r2],PT(F11)-PT(F7) |
| 1009 | ldf.fill f8=[r3],32 |
| 1010 | ;; |
| 1011 | srlz.i // ensure interruption collection is off |
| 1012 | mov ar.ccv=r15 |
| 1013 | ;; |
| 1014 | bsw.0 // switch back to bank 0 (no stop bit required beforehand...) |
| 1015 | ;; |
| 1016 | ldf.fill f11=[r2] |
| 1017 | // mov r18=r13 |
| 1018 | // mov r21=r13 |
| 1019 | adds r16=PT(CR_IPSR)+16,r12 |
| 1020 | adds r17=PT(CR_IIP)+16,r12 |
| 1021 | ;; |
| 1022 | ld8 r29=[r16],16 // load cr.ipsr |
| 1023 | ld8 r28=[r17],16 // load cr.iip |
| 1024 | ;; |
| 1025 | ld8 r30=[r16],16 // load cr.ifs |
| 1026 | ld8 r25=[r17],16 // load ar.unat |
| 1027 | ;; |
| 1028 | ld8 r26=[r16],16 // load ar.pfs |
| 1029 | ld8 r27=[r17],16 // load ar.rsc |
| 1030 | cmp.eq p9,p0=r0,r0 |
| 1031 | // set p9 to indicate that we should restore cr.ifs |
| 1032 | ;; |
| 1033 | ld8 r24=[r16],16 // load ar.rnat (may be garbage) |
| 1034 | ld8 r23=[r17],16// load ar.bspstore (may be garbage) |
| 1035 | ;; |
| 1036 | ld8 r31=[r16],16 // load predicates |
| 1037 | ld8 r22=[r17],16 // load b0 |
| 1038 | ;; |
| 1039 | ld8 r19=[r16],16 // load ar.rsc value for "loadrs" |
| 1040 | ld8.fill r1=[r17],16 // load r1 |
| 1041 | ;; |
| 1042 | ld8.fill r12=[r16],16 |
| 1043 | ld8.fill r13=[r17],16 |
| 1044 | ;; |
| 1045 | ld8 r20=[r16],16 // ar.fpsr |
| 1046 | ld8.fill r15=[r17],16 |
| 1047 | ;; |
| 1048 | ld8.fill r14=[r16],16 |
| 1049 | ld8.fill r2=[r17] |
| 1050 | ;; |
| 1051 | ld8.fill r3=[r16] |
| 1052 | ;; |
| 1053 | mov r16=ar.bsp // get existing backing store pointer |
| 1054 | ;; |
| 1055 | mov b0=r22 |
| 1056 | mov ar.pfs=r26 |
| 1057 | mov cr.ifs=r30 |
| 1058 | mov cr.ipsr=r29 |
| 1059 | mov ar.fpsr=r20 |
| 1060 | mov cr.iip=r28 |
| 1061 | ;; |
| 1062 | mov ar.rsc=r27 |
| 1063 | mov ar.unat=r25 |
| 1064 | mov pr=r31,-1 |
| 1065 | rfi |
| 1066 | END(ia64_leave_nested) |
| 1067 | |
| 1068 | |
| 1069 | |
| 1070 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) |
| 1071 | /* |
| 1072 | * work.need_resched etc. mustn't get changed |
| 1073 | *by this CPU before it returns to |
| 1074 | ;; |
| 1075 | * user- or fsys-mode, hence we disable interrupts early on: |
| 1076 | */ |
| 1077 | adds r2 = PT(R4)+16,r12 |
| 1078 | adds r3 = PT(R5)+16,r12 |
| 1079 | adds r8 = PT(EML_UNAT)+16,r12 |
| 1080 | ;; |
| 1081 | ld8 r8 = [r8] |
| 1082 | ;; |
| 1083 | mov ar.unat=r8 |
| 1084 | ;; |
| 1085 | ld8.fill r4=[r2],16 //load r4 |
| 1086 | ld8.fill r5=[r3],16 //load r5 |
| 1087 | ;; |
| 1088 | ld8.fill r6=[r2] //load r6 |
| 1089 | ld8.fill r7=[r3] //load r7 |
| 1090 | ;; |
| 1091 | END(ia64_leave_hypervisor_prepare) |
| 1092 | //fall through |
| 1093 | GLOBAL_ENTRY(ia64_leave_hypervisor) |
| 1094 | rsm psr.i |
| 1095 | ;; |
| 1096 | br.call.sptk.many b0=leave_hypervisor_tail |
| 1097 | ;; |
| 1098 | adds r20=PT(PR)+16,r12 |
| 1099 | adds r8=PT(EML_UNAT)+16,r12 |
| 1100 | ;; |
| 1101 | ld8 r8=[r8] |
| 1102 | ;; |
| 1103 | mov ar.unat=r8 |
| 1104 | ;; |
| 1105 | lfetch [r20],PT(CR_IPSR)-PT(PR) |
| 1106 | adds r2 = PT(B6)+16,r12 |
| 1107 | adds r3 = PT(B7)+16,r12 |
| 1108 | ;; |
| 1109 | lfetch [r20] |
| 1110 | ;; |
| 1111 | ld8 r24=[r2],16 /* B6 */ |
| 1112 | ld8 r25=[r3],16 /* B7 */ |
| 1113 | ;; |
| 1114 | ld8 r26=[r2],16 /* ar_csd */ |
| 1115 | ld8 r27=[r3],16 /* ar_ssd */ |
| 1116 | mov b6 = r24 |
| 1117 | ;; |
| 1118 | ld8.fill r8=[r2],16 |
| 1119 | ld8.fill r9=[r3],16 |
| 1120 | mov b7 = r25 |
| 1121 | ;; |
| 1122 | mov ar.csd = r26 |
| 1123 | mov ar.ssd = r27 |
| 1124 | ;; |
| 1125 | ld8.fill r10=[r2],PT(R15)-PT(R10) |
| 1126 | ld8.fill r11=[r3],PT(R14)-PT(R11) |
| 1127 | ;; |
| 1128 | ld8.fill r15=[r2],PT(R16)-PT(R15) |
| 1129 | ld8.fill r14=[r3],PT(R17)-PT(R14) |
| 1130 | ;; |
| 1131 | ld8.fill r16=[r2],16 |
| 1132 | ld8.fill r17=[r3],16 |
| 1133 | ;; |
| 1134 | ld8.fill r18=[r2],16 |
| 1135 | ld8.fill r19=[r3],16 |
| 1136 | ;; |
| 1137 | ld8.fill r20=[r2],16 |
| 1138 | ld8.fill r21=[r3],16 |
| 1139 | ;; |
| 1140 | ld8.fill r22=[r2],16 |
| 1141 | ld8.fill r23=[r3],16 |
| 1142 | ;; |
| 1143 | ld8.fill r24=[r2],16 |
| 1144 | ld8.fill r25=[r3],16 |
| 1145 | ;; |
| 1146 | ld8.fill r26=[r2],16 |
| 1147 | ld8.fill r27=[r3],16 |
| 1148 | ;; |
| 1149 | ld8.fill r28=[r2],16 |
| 1150 | ld8.fill r29=[r3],16 |
| 1151 | ;; |
| 1152 | ld8.fill r30=[r2],PT(F6)-PT(R30) |
| 1153 | ld8.fill r31=[r3],PT(F7)-PT(R31) |
| 1154 | ;; |
| 1155 | rsm psr.i | psr.ic |
| 1156 | // initiate turning off of interrupt and interruption collection |
| 1157 | invala // invalidate ALAT |
| 1158 | ;; |
| 1159 | srlz.i // ensure interruption collection is off |
| 1160 | ;; |
| 1161 | bsw.0 |
| 1162 | ;; |
| 1163 | adds r16 = PT(CR_IPSR)+16,r12 |
| 1164 | adds r17 = PT(CR_IIP)+16,r12 |
| 1165 | mov r21=r13 // get current |
| 1166 | ;; |
| 1167 | ld8 r31=[r16],16 // load cr.ipsr |
| 1168 | ld8 r30=[r17],16 // load cr.iip |
| 1169 | ;; |
| 1170 | ld8 r29=[r16],16 // load cr.ifs |
| 1171 | ld8 r28=[r17],16 // load ar.unat |
| 1172 | ;; |
| 1173 | ld8 r27=[r16],16 // load ar.pfs |
| 1174 | ld8 r26=[r17],16 // load ar.rsc |
| 1175 | ;; |
| 1176 | ld8 r25=[r16],16 // load ar.rnat |
| 1177 | ld8 r24=[r17],16 // load ar.bspstore |
| 1178 | ;; |
| 1179 | ld8 r23=[r16],16 // load predicates |
| 1180 | ld8 r22=[r17],16 // load b0 |
| 1181 | ;; |
| 1182 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" |
| 1183 | ld8.fill r1=[r17],16 //load r1 |
| 1184 | ;; |
| 1185 | ld8.fill r12=[r16],16 //load r12 |
| 1186 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 |
| 1187 | ;; |
| 1188 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr |
| 1189 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 |
| 1190 | ;; |
| 1191 | ld8.fill r3=[r16] //load r3 |
| 1192 | ld8 r18=[r17] //load ar_ccv |
| 1193 | ;; |
| 1194 | mov ar.fpsr=r19 |
| 1195 | mov ar.ccv=r18 |
| 1196 | shr.u r18=r20,16 |
| 1197 | ;; |
| 1198 | kvm_rbs_switch: |
| 1199 | mov r19=96 |
| 1200 | |
| 1201 | kvm_dont_preserve_current_frame: |
| 1202 | /* |
| 1203 | * To prevent leaking bits between the hypervisor and guest domain, |
| 1204 | * we must clear the stacked registers in the "invalid" partition here. |
| 1205 | * 5 registers/cycle on McKinley). |
| 1206 | */ |
| 1207 | # define pRecurse p6 |
| 1208 | # define pReturn p7 |
| 1209 | # define Nregs 14 |
| 1210 | |
| 1211 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
| 1212 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) |
| 1213 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize |
| 1214 | ;; |
| 1215 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" |
| 1216 | shladd in0=loc1,3,r19 |
| 1217 | mov in1=0 |
| 1218 | ;; |
| 1219 | TEXT_ALIGN(32) |
| 1220 | kvm_rse_clear_invalid: |
| 1221 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
| 1222 | cmp.lt pRecurse,p0=Nregs*8,in0 |
| 1223 | // if more than Nregs regs left to clear, (re)curse |
| 1224 | add out0=-Nregs*8,in0 |
| 1225 | add out1=1,in1 // increment recursion count |
| 1226 | mov loc1=0 |
| 1227 | mov loc2=0 |
| 1228 | ;; |
| 1229 | mov loc3=0 |
| 1230 | mov loc4=0 |
| 1231 | mov loc5=0 |
| 1232 | mov loc6=0 |
| 1233 | mov loc7=0 |
| 1234 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid |
| 1235 | ;; |
| 1236 | mov loc8=0 |
| 1237 | mov loc9=0 |
| 1238 | cmp.ne pReturn,p0=r0,in1 |
| 1239 | // if recursion count != 0, we need to do a br.ret |
| 1240 | mov loc10=0 |
| 1241 | mov loc11=0 |
| 1242 | (pReturn) br.ret.dptk.many b0 |
| 1243 | |
| 1244 | # undef pRecurse |
| 1245 | # undef pReturn |
| 1246 | |
| 1247 | // loadrs has already been shifted |
| 1248 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame |
| 1249 | ;; |
| 1250 | loadrs |
| 1251 | ;; |
| 1252 | mov ar.bspstore=r24 |
| 1253 | ;; |
| 1254 | mov ar.unat=r28 |
| 1255 | mov ar.rnat=r25 |
| 1256 | mov ar.rsc=r26 |
| 1257 | ;; |
| 1258 | mov cr.ipsr=r31 |
| 1259 | mov cr.iip=r30 |
| 1260 | mov cr.ifs=r29 |
| 1261 | mov ar.pfs=r27 |
| 1262 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
| 1263 | ;; |
| 1264 | ld8 r18=[r18] //vpd |
| 1265 | adds r17=VMM_VCPU_ISR_OFFSET,r21 |
| 1266 | ;; |
| 1267 | ld8 r17=[r17] |
| 1268 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
| 1269 | ;; |
| 1270 | ld8 r19=[r19] //vpsr |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1271 | mov r25=r18 |
| 1272 | adds r16= VMM_VCPU_GP_OFFSET,r21 |
| 1273 | ;; |
| 1274 | ld8 r16= [r16] // Put gp in r24 |
| 1275 | movl r24=@gprel(ia64_vmm_entry) // calculate return address |
| 1276 | ;; |
| 1277 | add r24=r24,r16 |
| 1278 | ;; |
Xiantao Zhang | 81aec52 | 2008-09-12 20:23:11 +0800 | [diff] [blame] | 1279 | br.sptk.many kvm_vps_sync_write // call the service |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1280 | ;; |
| 1281 | END(ia64_leave_hypervisor) |
| 1282 | // fall through |
| 1283 | GLOBAL_ENTRY(ia64_vmm_entry) |
| 1284 | /* |
| 1285 | * must be at bank 0 |
| 1286 | * parameter: |
| 1287 | * r17:cr.isr |
| 1288 | * r18:vpd |
| 1289 | * r19:vpsr |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1290 | * r22:b0 |
| 1291 | * r23:predicate |
| 1292 | */ |
| 1293 | mov r24=r22 |
| 1294 | mov r25=r18 |
| 1295 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic |
Xiantao Zhang | 81aec52 | 2008-09-12 20:23:11 +0800 | [diff] [blame] | 1296 | (p1) br.cond.sptk.few kvm_vps_resume_normal |
| 1297 | (p2) br.cond.sptk.many kvm_vps_resume_handler |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1298 | ;; |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1299 | END(ia64_vmm_entry) |
| 1300 | |
| 1301 | |
| 1302 | |
| 1303 | /* |
| 1304 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, |
| 1305 | * u64 arg3, u64 arg4, u64 arg5, |
| 1306 | * u64 arg6, u64 arg7); |
| 1307 | * |
| 1308 | * XXX: The currently defined services use only 4 args at the max. The |
| 1309 | * rest are not consumed. |
| 1310 | */ |
| 1311 | GLOBAL_ENTRY(ia64_call_vsa) |
| 1312 | .regstk 4,4,0,0 |
| 1313 | |
| 1314 | rpsave = loc0 |
| 1315 | pfssave = loc1 |
| 1316 | psrsave = loc2 |
| 1317 | entry = loc3 |
| 1318 | hostret = r24 |
| 1319 | |
| 1320 | alloc pfssave=ar.pfs,4,4,0,0 |
| 1321 | mov rpsave=rp |
| 1322 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 |
| 1323 | ;; |
| 1324 | ld8 entry=[entry] |
| 1325 | 1: mov hostret=ip |
| 1326 | mov r25=in1 // copy arguments |
| 1327 | mov r26=in2 |
| 1328 | mov r27=in3 |
| 1329 | mov psrsave=psr |
| 1330 | ;; |
| 1331 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I |
| 1332 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC |
| 1333 | ;; |
| 1334 | add hostret=2f-1b,hostret // calculate return address |
| 1335 | add entry=entry,in0 |
| 1336 | ;; |
| 1337 | rsm psr.i | psr.ic |
| 1338 | ;; |
| 1339 | srlz.i |
| 1340 | mov b6=entry |
| 1341 | br.cond.sptk b6 // call the service |
| 1342 | 2: |
| 1343 | // Architectural sequence for enabling interrupts if necessary |
| 1344 | (p7) ssm psr.ic |
| 1345 | ;; |
| 1346 | (p7) srlz.i |
| 1347 | ;; |
| 1348 | //(p6) ssm psr.i |
| 1349 | ;; |
| 1350 | mov rp=rpsave |
| 1351 | mov ar.pfs=pfssave |
| 1352 | mov r8=r31 |
| 1353 | ;; |
| 1354 | srlz.d |
| 1355 | br.ret.sptk rp |
| 1356 | |
| 1357 | END(ia64_call_vsa) |
| 1358 | |
| 1359 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) |
| 1360 | |
| 1361 | GLOBAL_ENTRY(vmm_reset_entry) |
| 1362 | //set up ipsr, iip, vpd.vpsr, dcr |
| 1363 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 |
| 1364 | // For DCR: all bits 0 |
Xiantao Zhang | 81aec52 | 2008-09-12 20:23:11 +0800 | [diff] [blame] | 1365 | bsw.0 |
| 1366 | ;; |
| 1367 | mov r21 =r13 |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1368 | adds r14=-VMM_PT_REGS_SIZE, r12 |
| 1369 | ;; |
| 1370 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 |
| 1371 | movl r10=0x8000000000000000 |
| 1372 | adds r16=PT(CR_IIP), r14 |
| 1373 | adds r20=PT(R1), r14 |
| 1374 | ;; |
| 1375 | rsm psr.ic | psr.i |
| 1376 | ;; |
| 1377 | srlz.i |
| 1378 | ;; |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1379 | mov ar.rsc = 0 |
| 1380 | ;; |
| 1381 | flushrs |
| 1382 | ;; |
| 1383 | mov ar.bspstore = 0 |
| 1384 | // clear BSPSTORE |
| 1385 | ;; |
| 1386 | mov cr.ipsr=r6 |
| 1387 | mov cr.ifs=r10 |
| 1388 | ld8 r4 = [r16] // Set init iip for first run. |
| 1389 | ld8 r1 = [r20] |
| 1390 | ;; |
| 1391 | mov cr.iip=r4 |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1392 | adds r16=VMM_VPD_BASE_OFFSET,r13 |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1393 | ;; |
| 1394 | ld8 r18=[r16] |
Xiantao Zhang | fbd4b56 | 2008-04-01 14:52:19 +0800 | [diff] [blame] | 1395 | ;; |
| 1396 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
| 1397 | ;; |
| 1398 | ld8 r19=[r19] |
| 1399 | mov r17=r0 |
| 1400 | mov r22=r0 |
| 1401 | mov r23=r0 |
| 1402 | br.cond.sptk ia64_vmm_entry |
| 1403 | br.ret.sptk b0 |
| 1404 | END(vmm_reset_entry) |