FRV: Implement atomic64_t

Implement atomic64_t and its ops for FRV.  Tested with the following patch:

	diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
	index 55e4fab..086d50d 100644
	--- a/arch/frv/kernel/setup.c
	+++ b/arch/frv/kernel/setup.c
	@@ -746,6 +746,52 @@ static void __init parse_cmdline_early(char *cmdline)

	 } /* end parse_cmdline_early() */

	+static atomic64_t xxx;
	+
	+static void test_atomic64(void)
	+{
	+	atomic64_set(&xxx, 0x12300000023LL);
	+
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000023LL);
	+	mb();
	+	if (atomic64_inc_return(&xxx) != 0x12300000024LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000024LL);
	+	mb();
	+	if (atomic64_sub_return(0x36900000050LL, &xxx) != -0x2460000002cLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002cLL);
	+	mb();
	+	if (atomic64_dec_return(&xxx) != -0x2460000002dLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002dLL);
	+	mb();
	+	if (atomic64_add_return(0x36800000001LL, &xxx) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x123456789abcdefLL, 0x121ffffffd4LL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x121ffffffd4LL, 0x123456789abcdefLL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x123456789abcdefLL);
	+	mb();
	+	if (atomic64_xchg(&xxx, 0xabcdef123456789LL) != 0x123456789abcdefLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0xabcdef123456789LL);
	+	mb();
	+}
	+
	 /*****************************************************************************/
	 /*
	  *
	@@ -845,6 +891,8 @@ void __init setup_arch(char **cmdline_p)
	 //	asm volatile("movgs %0,timerd" :: "r"(10000000));
	 //	__set_HSR(0, __get_HSR(0) | HSR0_ETMD);

	+	test_atomic64();
	+
	 } /* end setup_arch() */

	 #if 0

Note that this doesn't cover all the trivial wrappers, but does cover all the
substantial implementations.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
new file mode 100644
index 0000000..b6194ee
--- /dev/null
+++ b/arch/frv/lib/atomic64-ops.S
@@ -0,0 +1,162 @@
+/* kernel atomic64 operations
+ *
+ * For an explanation of how atomic ops work in this arch, see:
+ *   Documentation/frv/atomic-ops.txt
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/spr-regs.h>
+
+	.text
+	.balign 4
+
+
+###############################################################################
+#
+# long long atomic64_inc_return(atomic64_t *v)
+#
+###############################################################################
+	.globl		atomic64_inc_return
+        .type		atomic64_inc_return,@function
+atomic64_inc_return:
+	or.p		gr8,gr8,gr10
+0:
+	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
+	ckeq		icc3,cc7
+	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
+	orcr		cc7,cc7,cc3			/* set CC3 to true */
+	addicc		gr9,#1,gr9,icc0
+	addxi		gr8,#0,gr8,icc0
+	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
+	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
+	beq		icc3,#0,0b
+	bralr
+
+	.size		atomic64_inc_return, .-atomic64_inc_return
+
+###############################################################################
+#
+# long long atomic64_dec_return(atomic64_t *v)
+#
+###############################################################################
+	.globl		atomic64_dec_return
+        .type		atomic64_dec_return,@function
+atomic64_dec_return:
+	or.p		gr8,gr8,gr10
+0:
+	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
+	ckeq		icc3,cc7
+	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
+	orcr		cc7,cc7,cc3			/* set CC3 to true */
+	subicc		gr9,#1,gr9,icc0
+	subxi		gr8,#0,gr8,icc0
+	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
+	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
+	beq		icc3,#0,0b
+	bralr
+
+	.size		atomic64_dec_return, .-atomic64_dec_return
+
+###############################################################################
+#
+# long long atomic64_add_return(long long i, atomic64_t *v)
+#
+###############################################################################
+	.globl		atomic64_add_return
+        .type		atomic64_add_return,@function
+atomic64_add_return:
+	or.p		gr8,gr8,gr4
+	or		gr9,gr9,gr5
+0:
+	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
+	ckeq		icc3,cc7
+	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
+	orcr		cc7,cc7,cc3			/* set CC3 to true */
+	addcc		gr9,gr5,gr9,icc0
+	addx		gr8,gr4,gr8,icc0
+	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
+	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
+	beq		icc3,#0,0b
+	bralr
+
+	.size		atomic64_add_return, .-atomic64_add_return
+
+###############################################################################
+#
+# long long atomic64_sub_return(long long i, atomic64_t *v)
+#
+###############################################################################
+	.globl		atomic64_sub_return
+        .type		atomic64_sub_return,@function
+atomic64_sub_return:
+	or.p		gr8,gr8,gr4
+	or		gr9,gr9,gr5
+0:
+	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
+	ckeq		icc3,cc7
+	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
+	orcr		cc7,cc7,cc3			/* set CC3 to true */
+	subcc		gr9,gr5,gr9,icc0
+	subx		gr8,gr4,gr8,icc0
+	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
+	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
+	beq		icc3,#0,0b
+	bralr
+
+	.size		atomic64_sub_return, .-atomic64_sub_return
+
+###############################################################################
+#
+# uint64_t __xchg_64(uint64_t i, uint64_t *v)
+#
+###############################################################################
+	.globl		__xchg_64
+        .type		__xchg_64,@function
+__xchg_64:
+	or.p		gr8,gr8,gr4
+	or		gr9,gr9,gr5
+0:
+	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
+	ckeq		icc3,cc7
+	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
+	orcr		cc7,cc7,cc3			/* set CC3 to true */
+	cstd.p		gr4,@(gr10,gr0)		,cc3,#1
+	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
+	beq		icc3,#0,0b
+	bralr
+
+	.size		__xchg_64, .-__xchg_64
+
+###############################################################################
+#
+# uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v)
+#
+###############################################################################
+	.globl		__cmpxchg_64
+        .type		__cmpxchg_64,@function
+__cmpxchg_64:
+	or.p		gr8,gr8,gr4
+	or		gr9,gr9,gr5
+0:
+	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
+	ckeq		icc3,cc7
+	ldd.p		@(gr12,gr0),gr8			/* LDD.P/ORCR must be atomic */
+	orcr		cc7,cc7,cc3
+	subcc		gr8,gr4,gr0,icc0
+	subcc.p		gr9,gr5,gr0,icc1
+	bnelr		icc0,#0
+	bnelr		icc1,#0
+	cstd.p		gr10,@(gr12,gr0)	,cc3,#1
+	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
+	beq		icc3,#0,0b
+	bralr
+
+	.size		__cmpxchg_64, .-__cmpxchg_64
+