sh64: Fix up the build for the thread_xstate changes.

This updates the sh64 processor info with the sh32 changes in order to
tie in to the generic task_xstate management code.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index 4648cce..92df285 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -27,8 +27,8 @@
 #define sNAN64		0xFFFFFFFFFFFFFFFFULL
 #define sNAN32		0xFFFFFFFFUL
 
-static union sh_fpu_union init_fpuregs = {
-	.hard = {
+static union thread_xstate init_fpuregs = {
+	.hardfpu = {
 		.fp_regs = { [0 ... 63] = sNAN32 },
 		.fpscr = FPSCR_INIT
 	}
@@ -72,7 +72,7 @@
 		     "fgetscr   fr63\n\t"
 		     "fst.s     %0, (32*8), fr63\n\t"
 		: /* no output */
-		: "r" (&tsk->thread.fpu.hard)
+		: "r" (&tsk->thread.xstate->hardfpu)
 		: "memory");
 }
 
@@ -121,7 +121,7 @@
 
 void fpinit(struct sh_fpu_hard_struct *fpregs)
 {
-	*fpregs = init_fpuregs.hard;
+	*fpregs = init_fpuregs.hardfpu;
 }
 
 asmlinkage void
@@ -157,10 +157,10 @@
 
         last_task_used_math = current;
         if (used_math()) {
-                fpload(&current->thread.fpu.hard);
+                fpload(&current->thread.xstate->hardfpu);
         } else {
 		/* First time FPU user.  */
-		fpload(&init_fpuregs.hard);
+		fpload(&init_fpuregs.hardfpu);
                 set_used_math();
         }
 	disable_fpu();
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 31f80c6..c9554a7 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -410,7 +410,7 @@
 			regs->sr |= SR_FD;
 		}
 
-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+		memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
 	}
 
 	return fpvalid;
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 873ebdc..67fbcee 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -88,7 +88,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+	tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
 	return tmp;
 }
 
@@ -114,7 +114,7 @@
 	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
 
 	if (!tsk_used_math(task)) {
-		fpinit(&task->thread.fpu.hard);
+		fpinit(&task->thread.xstate->hardfpu);
 		set_stopped_child_used_math(task);
 	} else if (last_task_used_math == task) {
 		enable_fpu();
@@ -124,7 +124,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+	((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
 	return 0;
 }
 
@@ -222,7 +222,7 @@
 		return ret;
 
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-				   &target->thread.fpu.hard, 0, -1);
+				   &target->thread.xstate->hardfpu, 0, -1);
 }
 
 static int fpregs_set(struct task_struct *target,
@@ -239,7 +239,7 @@
 	set_stopped_child_used_math(target);
 
 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				  &target->thread.fpu.hard, 0, -1);
+				  &target->thread.xstate->hardfpu, 0, -1);
 }
 
 static int fpregs_active(struct task_struct *target,
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index ce76dbd..4733bfc 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -295,7 +295,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+	err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
 				(sizeof(long long) * 32) + (sizeof(int) * 1));
 
 	return err;
@@ -320,7 +320,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
 			      (sizeof(long long) * 32) + (sizeof(int) * 1));
 	clear_used_math();
 
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index d86f531..e3f92eb 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -611,19 +611,19 @@
 
 		switch (width_shift) {
 		case 2:
-			current->thread.fpu.hard.fp_regs[destreg] = buflo;
+			current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 			break;
 		case 3:
 			if (do_paired_load) {
-				current->thread.fpu.hard.fp_regs[destreg] = buflo;
-				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+				current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+				current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 			} else {
 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
-				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
-				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+				current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
+				current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
 #else
-				current->thread.fpu.hard.fp_regs[destreg] = buflo;
-				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+				current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+				current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 #endif
 			}
 			break;
@@ -681,19 +681,19 @@
 
 		switch (width_shift) {
 		case 2:
-			buflo = current->thread.fpu.hard.fp_regs[srcreg];
+			buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 			break;
 		case 3:
 			if (do_paired_load) {
-				buflo = current->thread.fpu.hard.fp_regs[srcreg];
-				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 			} else {
 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
-				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
-				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
+				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 #else
-				buflo = current->thread.fpu.hard.fp_regs[srcreg];
-				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 #endif
 			}
 			break;