| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | 		Semantics and Behavior of Atomic and | 
 | 2 | 		         Bitmask Operations | 
 | 3 |  | 
 | 4 | 			  David S. Miller	  | 
 | 5 |  | 
 | 6 | 	This document is intended to serve as a guide to Linux port | 
 | 7 | maintainers on how to implement atomic counter, bitops, and spinlock | 
 | 8 | interfaces properly. | 
 | 9 |  | 
 | 10 | 	The atomic_t type should be defined as a signed integer. | 
 | 11 | Also, it should be made opaque such that any kind of cast to a normal | 
 | 12 | C integer type will fail.  Something like the following should | 
 | 13 | suffice: | 
 | 14 |  | 
 | 15 | 	typedef struct { volatile int counter; } atomic_t; | 
 | 16 |  | 
 | 17 | 	The first operations to implement for atomic_t's are the | 
 | 18 | initializers and plain reads. | 
 | 19 |  | 
 | 20 | 	#define ATOMIC_INIT(i)		{ (i) } | 
 | 21 | 	#define atomic_set(v, i)	((v)->counter = (i)) | 
 | 22 |  | 
 | 23 | The first macro is used in definitions, such as: | 
 | 24 |  | 
 | 25 | static atomic_t my_counter = ATOMIC_INIT(1); | 
 | 26 |  | 
 | 27 | The second interface can be used at runtime, as in: | 
 | 28 |  | 
 | 29 | 	struct foo { atomic_t counter; }; | 
 | 30 | 	... | 
 | 31 |  | 
 | 32 | 	struct foo *k; | 
 | 33 |  | 
 | 34 | 	k = kmalloc(sizeof(*k), GFP_KERNEL); | 
 | 35 | 	if (!k) | 
 | 36 | 		return -ENOMEM; | 
 | 37 | 	atomic_set(&k->counter, 0); | 
 | 38 |  | 
 | 39 | Next, we have: | 
 | 40 |  | 
 | 41 | 	#define atomic_read(v)	((v)->counter) | 
 | 42 |  | 
 | 43 | which simply reads the current value of the counter. | 
 | 44 |  | 
 | 45 | Now, we move onto the actual atomic operation interfaces. | 
 | 46 |  | 
 | 47 | 	void atomic_add(int i, atomic_t *v); | 
 | 48 | 	void atomic_sub(int i, atomic_t *v); | 
 | 49 | 	void atomic_inc(atomic_t *v); | 
 | 50 | 	void atomic_dec(atomic_t *v); | 
 | 51 |  | 
 | 52 | These four routines add and subtract integral values to/from the given | 
 | 53 | atomic_t value.  The first two routines pass explicit integers by | 
 | 54 | which to make the adjustment, whereas the latter two use an implicit | 
 | 55 | adjustment value of "1". | 
 | 56 |  | 
 | 57 | One very important aspect of these two routines is that they DO NOT | 
 | 58 | require any explicit memory barriers.  They need only perform the | 
 | 59 | atomic_t counter update in an SMP safe manner. | 
 | 60 |  | 
 | 61 | Next, we have: | 
 | 62 |  | 
 | 63 | 	int atomic_inc_return(atomic_t *v); | 
 | 64 | 	int atomic_dec_return(atomic_t *v); | 
 | 65 |  | 
 | 66 | These routines add 1 and subtract 1, respectively, from the given | 
 | 67 | atomic_t and return the new counter value after the operation is | 
 | 68 | performed. | 
 | 69 |  | 
 | 70 | Unlike the above routines, it is required that explicit memory | 
 | 71 | barriers are performed before and after the operation.  It must be | 
 | 72 | done such that all memory operations before and after the atomic | 
 | 73 | operation calls are strongly ordered with respect to the atomic | 
 | 74 | operation itself. | 
 | 75 |  | 
 | 76 | For example, it should behave as if a smp_mb() call existed both | 
 | 77 | before and after the atomic operation. | 
 | 78 |  | 
 | 79 | If the atomic instructions used in an implementation provide explicit | 
 | 80 | memory barrier semantics which satisfy the above requirements, that is | 
 | 81 | fine as well. | 
 | 82 |  | 
 | 83 | Let's move on: | 
 | 84 |  | 
 | 85 | 	int atomic_add_return(int i, atomic_t *v); | 
 | 86 | 	int atomic_sub_return(int i, atomic_t *v); | 
 | 87 |  | 
 | 88 | These behave just like atomic_{inc,dec}_return() except that an | 
 | 89 | explicit counter adjustment is given instead of the implicit "1". | 
 | 90 | This means that like atomic_{inc,dec}_return(), the memory barrier | 
 | 91 | semantics are required. | 
 | 92 |  | 
 | 93 | Next: | 
 | 94 |  | 
 | 95 | 	int atomic_inc_and_test(atomic_t *v); | 
 | 96 | 	int atomic_dec_and_test(atomic_t *v); | 
 | 97 |  | 
 | 98 | These two routines increment and decrement by 1, respectively, the | 
 | 99 | given atomic counter.  They return a boolean indicating whether the | 
 | 100 | resulting counter value was zero or not. | 
 | 101 |  | 
 | 102 | It requires explicit memory barrier semantics around the operation as | 
 | 103 | above. | 
 | 104 |  | 
 | 105 | 	int atomic_sub_and_test(int i, atomic_t *v); | 
 | 106 |  | 
 | 107 | This is identical to atomic_dec_and_test() except that an explicit | 
 | 108 | decrement is given instead of the implicit "1".  It requires explicit | 
 | 109 | memory barrier semantics around the operation. | 
 | 110 |  | 
 | 111 | 	int atomic_add_negative(int i, atomic_t *v); | 
 | 112 |  | 
 | 113 | The given increment is added to the given atomic counter value.  A | 
 | 114 | boolean is return which indicates whether the resulting counter value | 
 | 115 | is negative.  It requires explicit memory barrier semantics around the | 
 | 116 | operation. | 
 | 117 |  | 
| Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 118 | Then: | 
| Nick Piggin | 4a6dae6 | 2005-11-13 16:07:24 -0800 | [diff] [blame] | 119 |  | 
 | 120 | 	int atomic_cmpxchg(atomic_t *v, int old, int new); | 
 | 121 |  | 
 | 122 | This performs an atomic compare exchange operation on the atomic value v, | 
 | 123 | with the given old and new values. Like all atomic_xxx operations, | 
 | 124 | atomic_cmpxchg will only satisfy its atomicity semantics as long as all | 
 | 125 | other accesses of *v are performed through atomic_xxx operations. | 
 | 126 |  | 
 | 127 | atomic_cmpxchg requires explicit memory barriers around the operation. | 
 | 128 |  | 
 | 129 | The semantics for atomic_cmpxchg are the same as those defined for 'cas' | 
 | 130 | below. | 
 | 131 |  | 
| Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 132 | Finally: | 
 | 133 |  | 
 | 134 | 	int atomic_add_unless(atomic_t *v, int a, int u); | 
 | 135 |  | 
 | 136 | If the atomic value v is not equal to u, this function adds a to v, and | 
 | 137 | returns non zero. If v is equal to u then it returns zero. This is done as | 
 | 138 | an atomic operation. | 
 | 139 |  | 
 | 140 | atomic_add_unless requires explicit memory barriers around the operation. | 
 | 141 |  | 
 | 142 | atomic_inc_not_zero, equivalent to atomic_add_unless(v, 1, 0) | 
 | 143 |  | 
| Nick Piggin | 4a6dae6 | 2005-11-13 16:07:24 -0800 | [diff] [blame] | 144 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | If a caller requires memory barrier semantics around an atomic_t | 
 | 146 | operation which does not return a value, a set of interfaces are | 
 | 147 | defined which accomplish this: | 
 | 148 |  | 
 | 149 | 	void smp_mb__before_atomic_dec(void); | 
 | 150 | 	void smp_mb__after_atomic_dec(void); | 
 | 151 | 	void smp_mb__before_atomic_inc(void); | 
 | 152 | 	void smp_mb__after_atomic_dec(void); | 
 | 153 |  | 
 | 154 | For example, smp_mb__before_atomic_dec() can be used like so: | 
 | 155 |  | 
 | 156 | 	obj->dead = 1; | 
 | 157 | 	smp_mb__before_atomic_dec(); | 
 | 158 | 	atomic_dec(&obj->ref_count); | 
 | 159 |  | 
 | 160 | It makes sure that all memory operations preceeding the atomic_dec() | 
 | 161 | call are strongly ordered with respect to the atomic counter | 
 | 162 | operation.  In the above example, it guarentees that the assignment of | 
 | 163 | "1" to obj->dead will be globally visible to other cpus before the | 
 | 164 | atomic counter decrement. | 
 | 165 |  | 
 | 166 | Without the explicitl smp_mb__before_atomic_dec() call, the | 
 | 167 | implementation could legally allow the atomic counter update visible | 
 | 168 | to other cpus before the "obj->dead = 1;" assignment. | 
 | 169 |  | 
 | 170 | The other three interfaces listed are used to provide explicit | 
 | 171 | ordering with respect to memory operations after an atomic_dec() call | 
 | 172 | (smp_mb__after_atomic_dec()) and around atomic_inc() calls | 
 | 173 | (smp_mb__{before,after}_atomic_inc()). | 
 | 174 |  | 
 | 175 | A missing memory barrier in the cases where they are required by the | 
 | 176 | atomic_t implementation above can have disasterous results.  Here is | 
 | 177 | an example, which follows a pattern occuring frequently in the Linux | 
 | 178 | kernel.  It is the use of atomic counters to implement reference | 
 | 179 | counting, and it works such that once the counter falls to zero it can | 
 | 180 | be guarenteed that no other entity can be accessing the object: | 
 | 181 |  | 
 | 182 | static void obj_list_add(struct obj *obj) | 
 | 183 | { | 
 | 184 | 	obj->active = 1; | 
 | 185 | 	list_add(&obj->list); | 
 | 186 | } | 
 | 187 |  | 
 | 188 | static void obj_list_del(struct obj *obj) | 
 | 189 | { | 
 | 190 | 	list_del(&obj->list); | 
 | 191 | 	obj->active = 0; | 
 | 192 | } | 
 | 193 |  | 
 | 194 | static void obj_destroy(struct obj *obj) | 
 | 195 | { | 
 | 196 | 	BUG_ON(obj->active); | 
 | 197 | 	kfree(obj); | 
 | 198 | } | 
 | 199 |  | 
 | 200 | struct obj *obj_list_peek(struct list_head *head) | 
 | 201 | { | 
 | 202 | 	if (!list_empty(head)) { | 
 | 203 | 		struct obj *obj; | 
 | 204 |  | 
 | 205 | 		obj = list_entry(head->next, struct obj, list); | 
 | 206 | 		atomic_inc(&obj->refcnt); | 
 | 207 | 		return obj; | 
 | 208 | 	} | 
 | 209 | 	return NULL; | 
 | 210 | } | 
 | 211 |  | 
 | 212 | void obj_poke(void) | 
 | 213 | { | 
 | 214 | 	struct obj *obj; | 
 | 215 |  | 
 | 216 | 	spin_lock(&global_list_lock); | 
 | 217 | 	obj = obj_list_peek(&global_list); | 
 | 218 | 	spin_unlock(&global_list_lock); | 
 | 219 |  | 
 | 220 | 	if (obj) { | 
 | 221 | 		obj->ops->poke(obj); | 
 | 222 | 		if (atomic_dec_and_test(&obj->refcnt)) | 
 | 223 | 			obj_destroy(obj); | 
 | 224 | 	} | 
 | 225 | } | 
 | 226 |  | 
 | 227 | void obj_timeout(struct obj *obj) | 
 | 228 | { | 
 | 229 | 	spin_lock(&global_list_lock); | 
 | 230 | 	obj_list_del(obj); | 
 | 231 | 	spin_unlock(&global_list_lock); | 
 | 232 |  | 
 | 233 | 	if (atomic_dec_and_test(&obj->refcnt)) | 
 | 234 | 		obj_destroy(obj); | 
 | 235 | } | 
 | 236 |  | 
 | 237 | (This is a simplification of the ARP queue management in the | 
 | 238 |  generic neighbour discover code of the networking.  Olaf Kirch | 
 | 239 |  found a bug wrt. memory barriers in kfree_skb() that exposed | 
 | 240 |  the atomic_t memory barrier requirements quite clearly.) | 
 | 241 |  | 
 | 242 | Given the above scheme, it must be the case that the obj->active | 
 | 243 | update done by the obj list deletion be visible to other processors | 
 | 244 | before the atomic counter decrement is performed. | 
 | 245 |  | 
 | 246 | Otherwise, the counter could fall to zero, yet obj->active would still | 
 | 247 | be set, thus triggering the assertion in obj_destroy().  The error | 
 | 248 | sequence looks like this: | 
 | 249 |  | 
 | 250 | 	cpu 0				cpu 1 | 
 | 251 | 	obj_poke()			obj_timeout() | 
 | 252 | 	obj = obj_list_peek(); | 
 | 253 | 	... gains ref to obj, refcnt=2 | 
 | 254 | 					obj_list_del(obj); | 
 | 255 | 					obj->active = 0 ... | 
 | 256 | 					... visibility delayed ... | 
 | 257 | 					atomic_dec_and_test() | 
 | 258 | 					... refcnt drops to 1 ... | 
 | 259 | 	atomic_dec_and_test() | 
 | 260 | 	... refcount drops to 0 ... | 
 | 261 | 	obj_destroy() | 
 | 262 | 	BUG() triggers since obj->active | 
 | 263 | 	still seen as one | 
 | 264 | 					obj->active update visibility occurs | 
 | 265 |  | 
 | 266 | With the memory barrier semantics required of the atomic_t operations | 
 | 267 | which return values, the above sequence of memory visibility can never | 
 | 268 | happen.  Specifically, in the above case the atomic_dec_and_test() | 
 | 269 | counter decrement would not become globally visible until the | 
 | 270 | obj->active update does. | 
 | 271 |  | 
 | 272 | As a historical note, 32-bit Sparc used to only allow usage of | 
 | 273 | 24-bits of it's atomic_t type.  This was because it used 8 bits | 
 | 274 | as a spinlock for SMP safety.  Sparc32 lacked a "compare and swap" | 
 | 275 | type instruction.  However, 32-bit Sparc has since been moved over | 
 | 276 | to a "hash table of spinlocks" scheme, that allows the full 32-bit | 
 | 277 | counter to be realized.  Essentially, an array of spinlocks are | 
 | 278 | indexed into based upon the address of the atomic_t being operated | 
 | 279 | on, and that lock protects the atomic operation.  Parisc uses the | 
 | 280 | same scheme. | 
 | 281 |  | 
 | 282 | Another note is that the atomic_t operations returning values are | 
 | 283 | extremely slow on an old 386. | 
 | 284 |  | 
 | 285 | We will now cover the atomic bitmask operations.  You will find that | 
 | 286 | their SMP and memory barrier semantics are similar in shape and scope | 
 | 287 | to the atomic_t ops above. | 
 | 288 |  | 
 | 289 | Native atomic bit operations are defined to operate on objects aligned | 
 | 290 | to the size of an "unsigned long" C data type, and are least of that | 
 | 291 | size.  The endianness of the bits within each "unsigned long" are the | 
 | 292 | native endianness of the cpu. | 
 | 293 |  | 
 | 294 | 	void set_bit(unsigned long nr, volatils unsigned long *addr); | 
 | 295 | 	void clear_bit(unsigned long nr, volatils unsigned long *addr); | 
 | 296 | 	void change_bit(unsigned long nr, volatils unsigned long *addr); | 
 | 297 |  | 
 | 298 | These routines set, clear, and change, respectively, the bit number | 
 | 299 | indicated by "nr" on the bit mask pointed to by "ADDR". | 
 | 300 |  | 
 | 301 | They must execute atomically, yet there are no implicit memory barrier | 
 | 302 | semantics required of these interfaces. | 
 | 303 |  | 
 | 304 | 	int test_and_set_bit(unsigned long nr, volatils unsigned long *addr); | 
 | 305 | 	int test_and_clear_bit(unsigned long nr, volatils unsigned long *addr); | 
 | 306 | 	int test_and_change_bit(unsigned long nr, volatils unsigned long *addr); | 
 | 307 |  | 
 | 308 | Like the above, except that these routines return a boolean which | 
 | 309 | indicates whether the changed bit was set _BEFORE_ the atomic bit | 
 | 310 | operation. | 
 | 311 |  | 
 | 312 | WARNING! It is incredibly important that the value be a boolean, | 
 | 313 | ie. "0" or "1".  Do not try to be fancy and save a few instructions by | 
 | 314 | declaring the above to return "long" and just returning something like | 
 | 315 | "old_val & mask" because that will not work. | 
 | 316 |  | 
 | 317 | For one thing, this return value gets truncated to int in many code | 
 | 318 | paths using these interfaces, so on 64-bit if the bit is set in the | 
 | 319 | upper 32-bits then testers will never see that. | 
 | 320 |  | 
 | 321 | One great example of where this problem crops up are the thread_info | 
 | 322 | flag operations.  Routines such as test_and_set_ti_thread_flag() chop | 
 | 323 | the return value into an int.  There are other places where things | 
 | 324 | like this occur as well. | 
 | 325 |  | 
 | 326 | These routines, like the atomic_t counter operations returning values, | 
 | 327 | require explicit memory barrier semantics around their execution.  All | 
 | 328 | memory operations before the atomic bit operation call must be made | 
 | 329 | visible globally before the atomic bit operation is made visible. | 
 | 330 | Likewise, the atomic bit operation must be visible globally before any | 
 | 331 | subsequent memory operation is made visible.  For example: | 
 | 332 |  | 
 | 333 | 	obj->dead = 1; | 
 | 334 | 	if (test_and_set_bit(0, &obj->flags)) | 
 | 335 | 		/* ... */; | 
 | 336 | 	obj->killed = 1; | 
 | 337 |  | 
 | 338 | The implementation of test_and_set_bit() must guarentee that | 
 | 339 | "obj->dead = 1;" is visible to cpus before the atomic memory operation | 
 | 340 | done by test_and_set_bit() becomes visible.  Likewise, the atomic | 
 | 341 | memory operation done by test_and_set_bit() must become visible before | 
 | 342 | "obj->killed = 1;" is visible. | 
 | 343 |  | 
 | 344 | Finally there is the basic operation: | 
 | 345 |  | 
 | 346 | 	int test_bit(unsigned long nr, __const__ volatile unsigned long *addr); | 
 | 347 |  | 
 | 348 | Which returns a boolean indicating if bit "nr" is set in the bitmask | 
 | 349 | pointed to by "addr". | 
 | 350 |  | 
 | 351 | If explicit memory barriers are required around clear_bit() (which | 
 | 352 | does not return a value, and thus does not need to provide memory | 
 | 353 | barrier semantics), two interfaces are provided: | 
 | 354 |  | 
 | 355 | 	void smp_mb__before_clear_bit(void); | 
 | 356 | 	void smp_mb__after_clear_bit(void); | 
 | 357 |  | 
 | 358 | They are used as follows, and are akin to their atomic_t operation | 
 | 359 | brothers: | 
 | 360 |  | 
 | 361 | 	/* All memory operations before this call will | 
 | 362 | 	 * be globally visible before the clear_bit(). | 
 | 363 | 	 */ | 
 | 364 | 	smp_mb__before_clear_bit(); | 
 | 365 | 	clear_bit( ... ); | 
 | 366 |  | 
 | 367 | 	/* The clear_bit() will be visible before all | 
 | 368 | 	 * subsequent memory operations. | 
 | 369 | 	 */ | 
 | 370 | 	 smp_mb__after_clear_bit(); | 
 | 371 |  | 
 | 372 | Finally, there are non-atomic versions of the bitmask operations | 
 | 373 | provided.  They are used in contexts where some other higher-level SMP | 
 | 374 | locking scheme is being used to protect the bitmask, and thus less | 
 | 375 | expensive non-atomic operations may be used in the implementation. | 
 | 376 | They have names similar to the above bitmask operation interfaces, | 
 | 377 | except that two underscores are prefixed to the interface name. | 
 | 378 |  | 
 | 379 | 	void __set_bit(unsigned long nr, volatile unsigned long *addr); | 
 | 380 | 	void __clear_bit(unsigned long nr, volatile unsigned long *addr); | 
 | 381 | 	void __change_bit(unsigned long nr, volatile unsigned long *addr); | 
 | 382 | 	int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr); | 
 | 383 | 	int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); | 
 | 384 | 	int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr); | 
 | 385 |  | 
 | 386 | These non-atomic variants also do not require any special memory | 
 | 387 | barrier semantics. | 
 | 388 |  | 
 | 389 | The routines xchg() and cmpxchg() need the same exact memory barriers | 
 | 390 | as the atomic and bit operations returning values. | 
 | 391 |  | 
 | 392 | Spinlocks and rwlocks have memory barrier expectations as well. | 
 | 393 | The rule to follow is simple: | 
 | 394 |  | 
 | 395 | 1) When acquiring a lock, the implementation must make it globally | 
 | 396 |    visible before any subsequent memory operation. | 
 | 397 |  | 
 | 398 | 2) When releasing a lock, the implementation must make it such that | 
 | 399 |    all previous memory operations are globally visible before the | 
 | 400 |    lock release. | 
 | 401 |  | 
 | 402 | Which finally brings us to _atomic_dec_and_lock().  There is an | 
 | 403 | architecture-neutral version implemented in lib/dec_and_lock.c, | 
 | 404 | but most platforms will wish to optimize this in assembler. | 
 | 405 |  | 
 | 406 | 	int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | 
 | 407 |  | 
 | 408 | Atomically decrement the given counter, and if will drop to zero | 
 | 409 | atomically acquire the given spinlock and perform the decrement | 
 | 410 | of the counter to zero.  If it does not drop to zero, do nothing | 
 | 411 | with the spinlock. | 
 | 412 |  | 
 | 413 | It is actually pretty simple to get the memory barrier correct. | 
 | 414 | Simply satisfy the spinlock grab requirements, which is make | 
 | 415 | sure the spinlock operation is globally visible before any | 
 | 416 | subsequent memory operation. | 
 | 417 |  | 
 | 418 | We can demonstrate this operation more clearly if we define | 
 | 419 | an abstract atomic operation: | 
 | 420 |  | 
 | 421 | 	long cas(long *mem, long old, long new); | 
 | 422 |  | 
 | 423 | "cas" stands for "compare and swap".  It atomically: | 
 | 424 |  | 
 | 425 | 1) Compares "old" with the value currently at "mem". | 
 | 426 | 2) If they are equal, "new" is written to "mem". | 
 | 427 | 3) Regardless, the current value at "mem" is returned. | 
 | 428 |  | 
 | 429 | As an example usage, here is what an atomic counter update | 
 | 430 | might look like: | 
 | 431 |  | 
 | 432 | void example_atomic_inc(long *counter) | 
 | 433 | { | 
 | 434 | 	long old, new, ret; | 
 | 435 |  | 
 | 436 | 	while (1) { | 
 | 437 | 		old = *counter; | 
 | 438 | 		new = old + 1; | 
 | 439 |  | 
 | 440 | 		ret = cas(counter, old, new); | 
 | 441 | 		if (ret == old) | 
 | 442 | 			break; | 
 | 443 | 	} | 
 | 444 | } | 
 | 445 |  | 
 | 446 | Let's use cas() in order to build a pseudo-C atomic_dec_and_lock(): | 
 | 447 |  | 
 | 448 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 
 | 449 | { | 
 | 450 | 	long old, new, ret; | 
 | 451 | 	int went_to_zero; | 
 | 452 |  | 
 | 453 | 	went_to_zero = 0; | 
 | 454 | 	while (1) { | 
 | 455 | 		old = atomic_read(atomic); | 
 | 456 | 		new = old - 1; | 
 | 457 | 		if (new == 0) { | 
 | 458 | 			went_to_zero = 1; | 
 | 459 | 			spin_lock(lock); | 
 | 460 | 		} | 
 | 461 | 		ret = cas(atomic, old, new); | 
 | 462 | 		if (ret == old) | 
 | 463 | 			break; | 
 | 464 | 		if (went_to_zero) { | 
 | 465 | 			spin_unlock(lock); | 
 | 466 | 			went_to_zero = 0; | 
 | 467 | 		} | 
 | 468 | 	} | 
 | 469 |  | 
 | 470 | 	return went_to_zero; | 
 | 471 | } | 
 | 472 |  | 
 | 473 | Now, as far as memory barriers go, as long as spin_lock() | 
 | 474 | strictly orders all subsequent memory operations (including | 
 | 475 | the cas()) with respect to itself, things will be fine. | 
 | 476 |  | 
 | 477 | Said another way, _atomic_dec_and_lock() must guarentee that | 
 | 478 | a counter dropping to zero is never made visible before the | 
 | 479 | spinlock being acquired. | 
 | 480 |  | 
 | 481 | Note that this also means that for the case where the counter | 
 | 482 | is not dropping to zero, there are no memory ordering | 
 | 483 | requirements. |