| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/module.h> | 
 | 2 | #include <linux/spinlock.h> | 
| Arun Sharma | 6006349 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 3 | #include <linux/atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 |  | 
| David S. Miller | 4db2ce0 | 2005-09-14 21:47:01 -0700 | [diff] [blame] | 5 | /* | 
 | 6 |  * This is an implementation of the notion of "decrement a | 
 | 7 |  * reference count, and return locked if it decremented to zero". | 
 | 8 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  * NOTE NOTE NOTE! This is _not_ equivalent to | 
 | 10 |  * | 
 | 11 |  *	if (atomic_dec_and_test(&atomic)) { | 
 | 12 |  *		spin_lock(&lock); | 
 | 13 |  *		return 1; | 
 | 14 |  *	} | 
 | 15 |  *	return 0; | 
 | 16 |  * | 
 | 17 |  * because the spin-lock and the decrement must be | 
 | 18 |  * "atomic". | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 
 | 21 | { | 
| Nick Piggin | a57004e | 2006-01-08 01:02:19 -0800 | [diff] [blame] | 22 | 	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | 
 | 23 | 	if (atomic_add_unless(atomic, -1, 1)) | 
 | 24 | 		return 0; | 
| Jan Blunck | 417dcdf | 2009-06-16 15:33:33 -0700 | [diff] [blame] | 25 |  | 
| Nick Piggin | a57004e | 2006-01-08 01:02:19 -0800 | [diff] [blame] | 26 | 	/* Otherwise do it the slow way */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | 	spin_lock(lock); | 
 | 28 | 	if (atomic_dec_and_test(atomic)) | 
 | 29 | 		return 1; | 
 | 30 | 	spin_unlock(lock); | 
 | 31 | 	return 0; | 
 | 32 | } | 
 | 33 |  | 
 | 34 | EXPORT_SYMBOL(_atomic_dec_and_lock); |