[SPARC64]: Add infrastructure for dynamic TSB sizing.

This also cleans up tsb_context_switch().  The assembler
routine is now __tsb_context_switch() and the former is
an inline function that picks out the bits from the mm_struct
and passes it into the assembler code as arguments.

setup_tsb_parms() computes the locked TLB entry to map the
TSB.  Later when we support using the physical address quad
load instructions of Cheetah+ and later, we'll simply use
the physical address for the TSB register value and set
the map virtual and PTE both to zero.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 36384cf..2effeba 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,9 +90,20 @@
 
 #ifndef __ASSEMBLY__
 
+#define TSB_ENTRY_ALIGNMENT	16
+
+struct tsb {
+	unsigned long tag;
+	unsigned long pte;
+} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+
 typedef struct {
 	unsigned long	sparc64_ctx_val;
-	unsigned long	*sparc64_tsb;
+	struct tsb	*tsb;
+	unsigned long	tsb_nentries;
+	unsigned long	tsb_reg_val;
+	unsigned long	tsb_map_vaddr;
+	unsigned long	tsb_map_pte;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 0dffb4c..0a950f1 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -22,7 +22,15 @@
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
-extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
+extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
+				 unsigned long tsb_vaddr, unsigned long tsb_pte);
+
+static inline void tsb_context_switch(struct mm_struct *mm)
+{
+	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
+			     mm->context.tsb_map_vaddr,
+			     mm->context.tsb_map_pte);
+}
 
 /* Set MMU context in the actual hardware. */
 #define load_secondary_context(__mm) \
@@ -52,8 +60,7 @@
 
 	if (!ctx_valid || (old_mm != mm)) {
 		load_secondary_context(mm);
-		tsb_context_switch(__pa(mm->pgd),
-				   mm->context.sparc64_tsb);
+		tsb_context_switch(mm);
 	}
 
 	/* Even if (mm == old_mm) we _must_ check
@@ -91,7 +98,7 @@
 
 	load_secondary_context(mm);
 	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-	tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
+	tsb_context_switch(mm);
 }
 
 #endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 03d272e..1f93b7d 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -19,7 +19,7 @@
  * 	stxa		%g5, [%g0] ASI_{D,I}TLB_DATA_IN
  * 	retry
  *
-
+ *
  * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
  * PTE.  The TAG is of the same layout as the TLB TAG TARGET mmu
  * register which is: