KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn

This allows additional registers to be accessed by the guest
in PR-mode KVM without trapping.

SPRG4-7 are readable from userspace.  On booke, KVM will sync
these registers when it enters the guest, so that accesses from
guest userspace will work.  The guest kernel, OTOH, must consistently
use either the real registers or the shared area between exits.  This
also applies to the already-paravirted SPRG3.

On non-booke, it's not clear to what extent SPRG4-7 are supported
(they're not architected for book3s, but exist on at least some classic
chips).  They are copied in the get/set regs ioctls, but I do not see any
non-booke emulation.  I also do not see any syncing with real registers
(in PR-mode) including the user-readable SPRG3.  This patch should not
make that situation any worse.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 06b15ee..04d4b5a 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -49,23 +49,17 @@
 #define KVM_RT_30		0x03c00000
 #define KVM_MASK_RB		0x0000f800
 #define KVM_INST_MFMSR		0x7c0000a6
-#define KVM_INST_MFSPR_SPRG0	0x7c1042a6
-#define KVM_INST_MFSPR_SPRG1	0x7c1142a6
-#define KVM_INST_MFSPR_SPRG2	0x7c1242a6
-#define KVM_INST_MFSPR_SPRG3	0x7c1342a6
-#define KVM_INST_MFSPR_SRR0	0x7c1a02a6
-#define KVM_INST_MFSPR_SRR1	0x7c1b02a6
-#define KVM_INST_MFSPR_DAR	0x7c1302a6
-#define KVM_INST_MFSPR_DSISR	0x7c1202a6
 
-#define KVM_INST_MTSPR_SPRG0	0x7c1043a6
-#define KVM_INST_MTSPR_SPRG1	0x7c1143a6
-#define KVM_INST_MTSPR_SPRG2	0x7c1243a6
-#define KVM_INST_MTSPR_SPRG3	0x7c1343a6
-#define KVM_INST_MTSPR_SRR0	0x7c1a03a6
-#define KVM_INST_MTSPR_SRR1	0x7c1b03a6
-#define KVM_INST_MTSPR_DAR	0x7c1303a6
-#define KVM_INST_MTSPR_DSISR	0x7c1203a6
+#define SPR_FROM		0
+#define SPR_TO			0x100
+
+#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
+				    (((sprn) & 0x1f) << 16) | \
+				    (((sprn) & 0x3e0) << 6) | \
+				    (moveto))
+
+#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
+#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
 
 #define KVM_INST_TLBSYNC	0x7c00046c
 #define KVM_INST_MTMSRD_L0	0x7c000164
@@ -440,56 +434,191 @@
 	case KVM_INST_MFMSR:
 		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG0:
+	case KVM_INST_MFSPR(SPRN_SPRG0):
 		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG1:
+	case KVM_INST_MFSPR(SPRN_SPRG1):
 		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG2:
+	case KVM_INST_MFSPR(SPRN_SPRG2):
 		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG3:
+	case KVM_INST_MFSPR(SPRN_SPRG3):
 		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SRR0:
+	case KVM_INST_MFSPR(SPRN_SRR0):
 		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SRR1:
+	case KVM_INST_MFSPR(SPRN_SRR1):
 		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
 		break;
-	case KVM_INST_MFSPR_DAR:
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MFSPR(SPRN_DEAR):
+#else
+	case KVM_INST_MFSPR(SPRN_DAR):
+#endif
 		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
 		break;
-	case KVM_INST_MFSPR_DSISR:
+	case KVM_INST_MFSPR(SPRN_DSISR):
 		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
 		break;
 
+#ifdef CONFIG_PPC_BOOK3E_MMU
+	case KVM_INST_MFSPR(SPRN_MAS0):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_MAS1):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_MAS2):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_MAS3):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_MAS4):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_MAS6):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_MAS7):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
+		break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+	case KVM_INST_MFSPR(SPRN_SPRG4):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MFSPR(SPRN_SPRG4R):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_SPRG5):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MFSPR(SPRN_SPRG5R):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_SPRG6):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MFSPR(SPRN_SPRG6R):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
+		break;
+	case KVM_INST_MFSPR(SPRN_SPRG7):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MFSPR(SPRN_SPRG7R):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
+		break;
+
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MFSPR(SPRN_ESR):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
+		break;
+#endif
+
+	case KVM_INST_MFSPR(SPRN_PIR):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
+		break;
+
+
 	/* Stores */
-	case KVM_INST_MTSPR_SPRG0:
+	case KVM_INST_MTSPR(SPRN_SPRG0):
 		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SPRG1:
+	case KVM_INST_MTSPR(SPRN_SPRG1):
 		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SPRG2:
+	case KVM_INST_MTSPR(SPRN_SPRG2):
 		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SPRG3:
+	case KVM_INST_MTSPR(SPRN_SPRG3):
 		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SRR0:
+	case KVM_INST_MTSPR(SPRN_SRR0):
 		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SRR1:
+	case KVM_INST_MTSPR(SPRN_SRR1):
 		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
 		break;
-	case KVM_INST_MTSPR_DAR:
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MTSPR(SPRN_DEAR):
+#else
+	case KVM_INST_MTSPR(SPRN_DAR):
+#endif
 		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
 		break;
-	case KVM_INST_MTSPR_DSISR:
+	case KVM_INST_MTSPR(SPRN_DSISR):
 		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
 		break;
+#ifdef CONFIG_PPC_BOOK3E_MMU
+	case KVM_INST_MTSPR(SPRN_MAS0):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_MAS1):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_MAS2):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_MAS3):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_MAS4):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_MAS6):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_MAS7):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
+		break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+	case KVM_INST_MTSPR(SPRN_SPRG4):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_SPRG5):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_SPRG6):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
+		break;
+	case KVM_INST_MTSPR(SPRN_SPRG7):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
+		break;
+
+#ifdef CONFIG_BOOKE
+	case KVM_INST_MTSPR(SPRN_ESR):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
+		break;
+#endif
 
 	/* Nops */
 	case KVM_INST_TLBSYNC:
@@ -556,9 +685,18 @@
 	start = (void*)_stext;
 	end = (void*)_etext;
 
+	/*
+	 * Being interrupted in the middle of patching would
+	 * be bad for SPRG4-7, which KVM can't keep in sync
+	 * with emulated accesses because reads don't trap.
+	 */
+	local_irq_disable();
+
 	for (p = start; p < end; p++)
 		kvm_check_ins(p, features);
 
+	local_irq_enable();
+
 	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
 			 kvm_patching_worked ? "worked" : "failed");
 }