Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
new file mode 100644
index 0000000..1b383e3
--- /dev/null
+++ b/include/asm-alpha/atomic.h
@@ -0,0 +1,198 @@
+#ifndef _ALPHA_ATOMIC_H
+#define _ALPHA_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc...
+ *
+ * But use these as seldom as possible since they are much slower
+ * than regular operations.
+ */
+
+
+/*
+ * Counter is volatile to make sure gcc doesn't try to be clever
+ * and move things around on us. We need to use _exactly_ the address
+ * the user gave us, not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+typedef struct { volatile long counter; } atomic64_t;
+
+#define ATOMIC_INIT(i)		( (atomic_t) { (i) } )
+#define ATOMIC64_INIT(i)	( (atomic64_t) { (i) } )
+
+#define atomic_read(v)		((v)->counter + 0)
+#define atomic64_read(v)	((v)->counter + 0)
+
+#define atomic_set(v,i)		((v)->counter = (i))
+#define atomic64_set(v,i)	((v)->counter = (i))
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	addl %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic64_add(long i, atomic64_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	addq %0,%2,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic_sub(int i, atomic_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	subl %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic64_sub(long i, atomic64_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	subq %0,%2,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+
+/*
+ * Same as above, but return the result value
+ */
+static __inline__ long atomic_add_return(int i, atomic_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	addl %0,%3,%2\n"
+	"	addl %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
+
+static __inline__ long atomic64_add_return(long i, atomic64_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	addq %0,%3,%2\n"
+	"	addq %0,%3,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+static __inline__ long atomic_sub_return(int i, atomic_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	subl %0,%3,%2\n"
+	"	subl %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	subq %0,%3,%2\n"
+	"	subq %0,%3,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
+
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic64_inc_return(v) atomic64_add_return(1,(v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic64_inc(v) atomic64_add(1,(v))
+
+#define atomic_dec(v) atomic_sub(1,(v))
+#define atomic64_dec(v) atomic64_sub(1,(v))
+
+#define smp_mb__before_atomic_dec()	smp_mb()
+#define smp_mb__after_atomic_dec()	smp_mb()
+#define smp_mb__before_atomic_inc()	smp_mb()
+#define smp_mb__after_atomic_inc()	smp_mb()
+
+#endif /* _ALPHA_ATOMIC_H */