MIPS: Remove further use of .subsection

7837314d141c661c70bc13c5050694413ecfe14a [MIPS: Get rid of branches to
.subsections] removed most uses of .subsection] removed most uses of
.subsection in inline assembler code.

It left the instances in spinlock.h alone because we knew their use was
in fairly small files where .subsection use was fine but of course this
was a fragile assumption.  LTO breaks this assumption resulting in build
errors due to exceeded branch range, so remove further instances of
.subsection.

The two functions that still use .macro don't currently cause issues
however this use is still fragile.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5130c88..0b1dbd2 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -242,25 +242,16 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_read_lock	\n"
-		"1:	ll	%1, %2					\n"
-		"	bltz	%1, 3f					\n"
-		"	 addu	%1, 1					\n"
-		"2:	sc	%1, %0					\n"
-		"	beqz	%1, 1b					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"3:	ll	%1, %2					\n"
-		"	bltz	%1, 3b					\n"
-		"	 addu	%1, 1					\n"
-		"	b	2b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_read_lock	\n"
+			"	bltz	%1, 1b				\n"
+			"	 addu	%1, 1				\n"
+			"2:	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 
 	smp_llsc_mb();
@@ -285,21 +276,15 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_read_unlock	\n"
-		"1:	ll	%1, %2					\n"
-		"	sub	%1, 1					\n"
-		"	sc	%1, %0					\n"
-		"	beqz	%1, 2f					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"2:	b	1b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_read_unlock	\n"
+			"	sub	%1, 1				\n"
+			"	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 }
 
@@ -321,25 +306,16 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_write_lock	\n"
-		"1:	ll	%1, %2					\n"
-		"	bnez	%1, 3f					\n"
-		"	 lui	%1, 0x8000				\n"
-		"2:	sc	%1, %0					\n"
-		"	beqz	%1, 3f					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"3:	ll	%1, %2					\n"
-		"	bnez	%1, 3b					\n"
-		"	 lui	%1, 0x8000				\n"
-		"	b	2b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_write_lock	\n"
+			"	bnez	%1, 1b				\n"
+			"	 lui	%1, 0x8000			\n"
+			"2:	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 
 	smp_llsc_mb();
@@ -424,25 +400,21 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_write_trylock	\n"
-		"	li	%2, 0					\n"
-		"1:	ll	%1, %3					\n"
-		"	bnez	%1, 2f					\n"
-		"	lui	%1, 0x8000				\n"
-		"	sc	%1, %0					\n"
-		"	beqz	%1, 3f					\n"
-		"	 li	%2, 1					\n"
-		"2:							\n"
-		__WEAK_LLSC_MB
-		"	.subsection 2					\n"
-		"3:	b	1b					\n"
-		"	 li	%2, 0					\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"	ll	%1, %3	# arch_write_trylock	\n"
+			"	li	%2, 0				\n"
+			"	bnez	%1, 2f				\n"
+			"	lui	%1, 0x8000			\n"
+			"	sc	%1, %0				\n"
+			"	li	%2, 1				\n"
+			"2:						\n"
+			: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
+
+		smp_llsc_mb();
 	}
 
 	return ret;