[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.

It can map all of the linear kernel mappings with zero TSB hash
conflicts for systems with 16GB or less ram.  In such cases, on
SUN4V, once we load up this TSB the first time with all the
mappings, we never take a linear kernel mapping TLB miss ever
again, the hypervisor handles them all.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index ae1dac1..efcf38b 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -121,6 +121,12 @@
 	 nop
 
 	.align		32
+kvmap_dtlb_tsb4m_load:
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	KTSB_WRITE(%g1, %g5, %g6)
+	ba,pt		%xcc, kvmap_dtlb_load
+	 nop
+
 kvmap_dtlb:
 	/* %g6: TAG TARGET */
 	mov		TLB_TAG_ACCESS, %g4
@@ -133,6 +139,13 @@
 	brgez,pn	%g4, kvmap_dtlb_nonlinear
 	 nop
 
+	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
+	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+
+	/* TSB entry address left in %g1, lookup linear PTE.
+	 * Must preserve %g1 and %g6 (TAG).
+	 */
+kvmap_dtlb_tsb4m_miss:
 	sethi		%hi(kpte_linear_bitmap), %g2
 	or		%g2, %lo(kpte_linear_bitmap), %g2
 
@@ -163,7 +176,7 @@
 
 	.globl		kvmap_linear_patch
 kvmap_linear_patch:
-	ba,pt		%xcc, kvmap_dtlb_load
+	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
 	 xor		%g2, %g4, %g5
 
 kvmap_dtlb_vmalloc_addr: