| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/module.h> | 
 | 2 | #include <linux/spinlock.h> | 
 | 3 | #include <asm/atomic.h> | 
| David S. Miller | 4db2ce0 | 2005-09-14 21:47:01 -0700 | [diff] [blame] | 4 | #include <asm/system.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  | 
| David S. Miller | 4db2ce0 | 2005-09-14 21:47:01 -0700 | [diff] [blame] | 6 | #ifdef __HAVE_ARCH_CMPXCHG | 
 | 7 | /* | 
 | 8 |  * This is an implementation of the notion of "decrement a | 
 | 9 |  * reference count, and return locked if it decremented to zero". | 
 | 10 |  * | 
 | 11 |  * This implementation can be used on any architecture that | 
 | 12 |  * has a cmpxchg, and where atomic->value is an int holding | 
 | 13 |  * the value of the atomic (i.e. the high bits aren't used | 
 | 14 |  * for a lock or anything like that). | 
 | 15 |  */ | 
 | 16 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 
 | 17 | { | 
 | 18 | 	int counter; | 
 | 19 | 	int newcount; | 
 | 20 |  | 
 | 21 | 	for (;;) { | 
 | 22 | 		counter = atomic_read(atomic); | 
 | 23 | 		newcount = counter - 1; | 
 | 24 | 		if (!newcount) | 
 | 25 | 			break;		/* do it the slow way */ | 
 | 26 |  | 
 | 27 | 		newcount = cmpxchg(&atomic->counter, counter, newcount); | 
 | 28 | 		if (newcount == counter) | 
 | 29 | 			return 0; | 
 | 30 | 	} | 
 | 31 |  | 
 | 32 | 	spin_lock(lock); | 
 | 33 | 	if (atomic_dec_and_test(atomic)) | 
 | 34 | 		return 1; | 
 | 35 | 	spin_unlock(lock); | 
 | 36 | 	return 0; | 
 | 37 | } | 
 | 38 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | /* | 
 | 40 |  * This is an architecture-neutral, but slow, | 
 | 41 |  * implementation of the notion of "decrement | 
 | 42 |  * a reference count, and return locked if it | 
 | 43 |  * decremented to zero". | 
 | 44 |  * | 
 | 45 |  * NOTE NOTE NOTE! This is _not_ equivalent to | 
 | 46 |  * | 
 | 47 |  *	if (atomic_dec_and_test(&atomic)) { | 
 | 48 |  *		spin_lock(&lock); | 
 | 49 |  *		return 1; | 
 | 50 |  *	} | 
 | 51 |  *	return 0; | 
 | 52 |  * | 
 | 53 |  * because the spin-lock and the decrement must be | 
 | 54 |  * "atomic". | 
 | 55 |  * | 
 | 56 |  * This slow version gets the spinlock unconditionally, | 
 | 57 |  * and releases it if it isn't needed. Architectures | 
 | 58 |  * are encouraged to come up with better approaches, | 
 | 59 |  * this is trivially done efficiently using a load-locked | 
 | 60 |  * store-conditional approach, for example. | 
 | 61 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 
 | 63 | { | 
 | 64 | 	spin_lock(lock); | 
 | 65 | 	if (atomic_dec_and_test(atomic)) | 
 | 66 | 		return 1; | 
 | 67 | 	spin_unlock(lock); | 
 | 68 | 	return 0; | 
 | 69 | } | 
| David S. Miller | 4db2ce0 | 2005-09-14 21:47:01 -0700 | [diff] [blame] | 70 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
 | 72 | EXPORT_SYMBOL(_atomic_dec_and_lock); |