[PATCH] x86_64: Switch to the interrupt stack when running a softirq in local_bh_enable()

This avoids some potential stack overflows with very deep softirq callchains.
i386 does this too.

TOADD CFI annotation

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 0696e02..096d470 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -918,3 +918,15 @@
 ENTRY(call_debug)
        zeroentry do_call_debug
 
+ENTRY(call_softirq)
+	movq %gs:pda_irqstackptr,%rax
+	pushq %r15
+	movq %rsp,%r15
+	incl %gs:pda_irqcount
+	cmove %rax,%rsp
+	call __do_softirq
+	movq %r15,%rsp
+	decl %gs:pda_irqcount
+	popq %r15
+	ret
+
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index cc3fb85..849a20a 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -135,3 +135,22 @@
 	local_irq_disable();
 }
 #endif
+
+extern void call_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+ 	__u32 pending;
+ 	unsigned long flags;
+
+ 	if (in_interrupt())
+ 		return;
+
+ 	local_irq_save(flags);
+ 	pending = local_softirq_pending();
+ 	/* Switch to interrupt stack */
+ 	if (pending)
+		call_softirq();
+ 	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(do_softirq);