s390/percpu: implement this_cpu_xchg()

The generic variant has a local_irq_save/restore pair which is quite
expensive. It is sufficient to disable preemption, which is a no-op
with !CONFIG_PREEMPT and then use the regular xchg macro.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 6537e72..7adebfc 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -84,6 +84,24 @@
 #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
 #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
 
+#define arch_this_cpu_xchg(pcp, nval)					\
+({									\
+	typeof(pcp) *ptr__;						\
+	typeof(pcp) ret__;						\
+	preempt_disable();						\
+	ptr__ = __this_cpu_ptr(&(pcp));					\
+	ret__ = xchg(ptr__, nval);					\
+	preempt_enable();						\
+	ret__;								\
+})
+
+#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#ifdef CONFIG_64BIT
+#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#endif
+
 #include <asm-generic/percpu.h>
 
 #endif /* __ARCH_S390_PERCPU__ */