sh: Updates for IRQ handler changes.

Trivial fixes for build breakage introduced by IRQ handler changes.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index c7ebd6a..3b93682 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -17,6 +17,8 @@
 #include <asm/thread_info.h>
 #include <asm/cpu/mmu_context.h>
 
+atomic_t irq_err_count;
+
 /*
  * 'what should we do if we get a hw irq event on an illegal vector'.
  * each architecture has to answer this themselves, it doesn't deserve
@@ -47,8 +49,10 @@
 		if (!action)
 			goto unlock;
 		seq_printf(p, "%3d: ",i);
-		seq_printf(p, "%10u ", kstat_irqs(i));
-		seq_printf(p, " %14s", irq_desc[i].chip->typename);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
@@ -56,7 +60,9 @@
 		seq_putc(p, '\n');
 unlock:
 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-	}
+	} else if (i == NR_IRQS)
+		seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
+
 	return 0;
 }
 #endif
@@ -78,6 +84,7 @@
 		      unsigned long r6, unsigned long r7,
 		      struct pt_regs regs)
 {
+	struct pt_regs *old_regs = set_irq_regs(&regs);
 	int irq = r4;
 #ifdef CONFIG_4KSTACKS
 	union irq_ctx *curctx, *irqctx;
@@ -139,7 +146,6 @@
 
 		__asm__ __volatile__ (
 			"mov	%0, r4		\n"
-			"mov	%1, r5		\n"
 			"mov	r15, r9		\n"
 			"jsr	@%2		\n"
 			/* swith to the irq stack */
@@ -147,17 +153,18 @@
 			/* restore the stack (ring zero) */
 			"mov	r9, r15		\n"
 			: /* no outputs */
-			: "r" (irq), "r" (&regs), "r" (__do_IRQ), "r" (isp)
+			: "r" (irq), "r" (generic_handle_irq), "r" (isp)
 			/* XXX: A somewhat excessive clobber list? -PFM */
 			: "memory", "r0", "r1", "r2", "r3", "r4",
 			  "r5", "r6", "r7", "r8", "t", "pr"
 		);
 	} else
 #endif
-		__do_IRQ(irq, &regs);
+		generic_handle_irq(irq);
 
 	irq_exit();
 
+	set_irq_regs(old_regs);
 	return 1;
 }
 
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 1fbb83c..57e708d 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -109,13 +109,14 @@
  * handle_timer_tick() needs to keep up the real-time clock,
  * as well as call the "do_timer()" routine every clocktick
  */
-void handle_timer_tick(struct pt_regs *regs)
+void handle_timer_tick(void)
 {
 	do_timer(1);
 #ifndef CONFIG_SMP
-	update_process_times(user_mode(regs));
+	update_process_times(user_mode(get_irq_regs()));
 #endif
-	profile_tick(CPU_PROFILING, regs);
+	if (current->pid)
+		profile_tick(CPU_PROFILING);
 
 #ifdef CONFIG_HEARTBEAT
 	if (sh_mv.mv_heartbeat != NULL)
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index badfedb..2492701 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -80,8 +80,7 @@
 	return count;
 }
 
-static irqreturn_t tmu_timer_interrupt(int irq, void *dev_id,
-				       struct pt_regs *regs)
+static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
 {
 	unsigned long timer_status;
 
@@ -98,7 +97,7 @@
 	 * locally disabled. -arca
 	 */
 	write_seqlock(&xtime_lock);
-	handle_timer_tick(regs);
+	handle_timer_tick();
 	write_sequnlock(&xtime_lock);
 
 	return IRQ_HANDLED;