[S390] Inline assembly cleanup.

Major cleanup of all s390 inline assemblies. They now have a common
coding style. Quite a few have been shortened, mainly by using register
asm variables. Use of the EX_TABLE macro helps  as well. The atomic ops,
bit ops and locking inlines new use the Q-constraint if a newer gcc
is used.  That results in slightly better code.

Thanks to Christian Borntraeger for proof reading the changes.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9b11e3e..226275d 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -142,17 +142,17 @@
 
 	rx = (unsigned long) parameter;
 	ry = (unsigned long) func;
-	__asm__ __volatile__(
+	asm volatile(
 #ifdef CONFIG_64BIT
-		"   sam31\n" // switch to 31 bit
-		"   diag    %0,%1,0x64\n"
-		"   sam64\n" // switch back to 64 bit
+		"	sam31\n"
+		"	diag	%0,%1,0x64\n"
+		"	sam64\n"
 #else
-		"   diag    %0,%1,0x64\n"
+		"	diag	%0,%1,0x64\n"
 #endif
-		"   ipm     %2\n"
-		"   srl     %2,28\n"
-		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
+		"	ipm	%2\n"
+		"	srl	%2,28\n"
+		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
 	*ret1 = rx;
 	*ret2 = ry;
 	return rc;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a393c30..f2b9a84 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -424,20 +424,13 @@
 
 	if (pfault_disable)
 		return -1;
-        __asm__ __volatile__(
-                "    diag  %1,%0,0x258\n"
-		"0:  j     2f\n"
-		"1:  la    %0,8\n"
+	asm volatile(
+		"	diag	%1,%0,0x258\n"
+		"0:	j	2f\n"
+		"1:	la	%0,8\n"
 		"2:\n"
-		".section __ex_table,\"a\"\n"
-		"   .align 4\n"
-#ifndef CONFIG_64BIT
-		"   .long  0b,1b\n"
-#else /* CONFIG_64BIT */
-		"   .quad  0b,1b\n"
-#endif /* CONFIG_64BIT */
-		".previous"
-                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
+		EX_TABLE(0b,1b)
+		: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
         __ctl_set_bit(0, 9);
         return rc;
 }
@@ -450,18 +443,11 @@
 	if (pfault_disable)
 		return;
 	__ctl_clear_bit(0,9);
-        __asm__ __volatile__(
-                "    diag  %0,0,0x258\n"
+	asm volatile(
+		"	diag	%0,0,0x258\n"
 		"0:\n"
-		".section __ex_table,\"a\"\n"
-		"   .align 4\n"
-#ifndef CONFIG_64BIT
-		"   .long  0b,0b\n"
-#else /* CONFIG_64BIT */
-		"   .quad  0b,0b\n"
-#endif /* CONFIG_64BIT */
-		".previous"
-		: : "a" (&refbk), "m" (refbk) : "cc" );
+		EX_TABLE(0b,0b)
+		: : "a" (&refbk), "m" (refbk) : "cc");
 }
 
 asmlinkage void
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index cfd9b8f..127044e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -45,26 +45,17 @@
 {
         if (addr >= 0x7ff00000)
                 return;
+	asm volatile(
 #ifdef CONFIG_64BIT
-        asm volatile (
-		"   sam31\n"
-		"   diag %0,%0,0x10\n"
-		"0: sam64\n"
-		".section __ex_table,\"a\"\n"
-		"   .align 8\n"
-		"   .quad 0b, 0b\n"
-		".previous\n"
-		: : "a" (addr));
+		"	sam31\n"
+		"	diag	%0,%0,0x10\n"
+		"0:	sam64\n"
 #else
-        asm volatile (
-		"   diag %0,%0,0x10\n"
+		"	diag	%0,%0,0x10\n"
 		"0:\n"
-		".section __ex_table,\"a\"\n"
-		"   .align 4\n"
-		"   .long 0b, 0b\n"
-		".previous\n"
-		: : "a" (addr));
 #endif
+		EX_TABLE(0b,0b)
+		: : "a" (addr));
 }
 
 void show_mem(void)
@@ -156,11 +147,10 @@
 	S390_lowcore.kernel_asce = pgdir_k;
 
         /* enable virtual mapping in kernel mode */
-        __asm__ __volatile__("    LCTL  1,1,%0\n"
-                             "    LCTL  7,7,%0\n"
-                             "    LCTL  13,13,%0\n"
-                             "    SSM   %1" 
-			     : : "m" (pgdir_k), "m" (ssm_mask));
+	__ctl_load(pgdir_k, 1, 1);
+	__ctl_load(pgdir_k, 7, 7);
+	__ctl_load(pgdir_k, 13, 13);
+	__raw_local_irq_ssm(ssm_mask);
 
         local_flush_tlb();
         return;
@@ -241,11 +231,10 @@
 	S390_lowcore.kernel_asce = pgdir_k;
 
         /* enable virtual mapping in kernel mode */
-        __asm__ __volatile__("lctlg 1,1,%0\n\t"
-                             "lctlg 7,7,%0\n\t"
-                             "lctlg 13,13,%0\n\t"
-                             "ssm   %1"
-			     : :"m" (pgdir_k), "m" (ssm_mask));
+	__ctl_load(pgdir_k, 1, 1);
+	__ctl_load(pgdir_k, 7, 7);
+	__ctl_load(pgdir_k, 13, 13);
+	__raw_local_irq_ssm(ssm_mask);
 
         local_flush_tlb();