| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_BITOPS_H | 
 | 2 | #define _ASM_X86_BITOPS_H | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 3 |  | 
 | 4 | /* | 
 | 5 |  * Copyright 1992, Linus Torvalds. | 
| Andi Kleen | c839994 | 2009-01-12 23:01:15 +0100 | [diff] [blame] | 6 |  * | 
 | 7 |  * Note: inlines with more than a single statement should be marked | 
 | 8 |  * __always_inline to avoid problems with older gcc's inlining heuristics. | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 9 |  */ | 
 | 10 |  | 
 | 11 | #ifndef _LINUX_BITOPS_H | 
 | 12 | #error only <linux/bitops.h> can be included directly | 
 | 13 | #endif | 
 | 14 |  | 
 | 15 | #include <linux/compiler.h> | 
 | 16 | #include <asm/alternative.h> | 
 | 17 |  | 
| Borislav Petkov | e8f380e | 2012-05-22 12:53:45 +0200 | [diff] [blame] | 18 | #define BIT_64(n)			(U64_C(1) << (n)) | 
 | 19 |  | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 20 | /* | 
 | 21 |  * These have to be done with inline assembly: that way the bit-setting | 
 | 22 |  * is guaranteed to be atomic. All bit operations return 0 if the bit | 
 | 23 |  * was cleared before the operation and != 0 if it was not. | 
 | 24 |  * | 
 | 25 |  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 
 | 26 |  */ | 
 | 27 |  | 
 | 28 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | 
 | 29 | /* Technically wrong, but this avoids compilation errors on some gcc | 
 | 30 |    versions. */ | 
| Linus Torvalds | 1a750e0 | 2008-06-18 21:03:26 -0700 | [diff] [blame] | 31 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 32 | #else | 
| Linus Torvalds | 1a750e0 | 2008-06-18 21:03:26 -0700 | [diff] [blame] | 33 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 34 | #endif | 
 | 35 |  | 
| Ingo Molnar | 7dbceaf | 2008-06-20 07:28:24 +0200 | [diff] [blame] | 36 | #define ADDR				BITOP_ADDR(addr) | 
| Linus Torvalds | 1a750e0 | 2008-06-18 21:03:26 -0700 | [diff] [blame] | 37 |  | 
 | 38 | /* | 
 | 39 |  * We do the locked ops that don't return the old value as | 
 | 40 |  * a mask operation on a byte. | 
 | 41 |  */ | 
| Ingo Molnar | 7dbceaf | 2008-06-20 07:28:24 +0200 | [diff] [blame] | 42 | #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr)) | 
 | 43 | #define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3)) | 
 | 44 | #define CONST_MASK(nr)			(1 << ((nr) & 7)) | 
| Linus Torvalds | 1a750e0 | 2008-06-18 21:03:26 -0700 | [diff] [blame] | 45 |  | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 46 | /** | 
 | 47 |  * set_bit - Atomically set a bit in memory | 
 | 48 |  * @nr: the bit to set | 
 | 49 |  * @addr: the address to start counting from | 
 | 50 |  * | 
 | 51 |  * This function is atomic and may not be reordered.  See __set_bit() | 
 | 52 |  * if you do not require the atomic guarantees. | 
 | 53 |  * | 
 | 54 |  * Note: there are no guarantees that this function will not be reordered | 
 | 55 |  * on non x86 architectures, so if you are writing portable code, | 
 | 56 |  * make sure not to rely on its reordering guarantees. | 
 | 57 |  * | 
 | 58 |  * Note that @nr may be almost arbitrarily large; this function is not | 
 | 59 |  * restricted to acting on a single-word quantity. | 
 | 60 |  */ | 
| Andi Kleen | c839994 | 2009-01-12 23:01:15 +0100 | [diff] [blame] | 61 | static __always_inline void | 
 | 62 | set_bit(unsigned int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 63 | { | 
| Ingo Molnar | 7dbceaf | 2008-06-20 07:28:24 +0200 | [diff] [blame] | 64 | 	if (IS_IMMEDIATE(nr)) { | 
 | 65 | 		asm volatile(LOCK_PREFIX "orb %1,%0" | 
 | 66 | 			: CONST_MASK_ADDR(nr, addr) | 
| Ingo Molnar | 437a0a5 | 2008-06-20 21:50:20 +0200 | [diff] [blame] | 67 | 			: "iq" ((u8)CONST_MASK(nr)) | 
| Ingo Molnar | 7dbceaf | 2008-06-20 07:28:24 +0200 | [diff] [blame] | 68 | 			: "memory"); | 
 | 69 | 	} else { | 
 | 70 | 		asm volatile(LOCK_PREFIX "bts %1,%0" | 
 | 71 | 			: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | 
 | 72 | 	} | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 73 | } | 
 | 74 |  | 
 | 75 | /** | 
 | 76 |  * __set_bit - Set a bit in memory | 
 | 77 |  * @nr: the bit to set | 
 | 78 |  * @addr: the address to start counting from | 
 | 79 |  * | 
 | 80 |  * Unlike set_bit(), this function is non-atomic and may be reordered. | 
 | 81 |  * If it's called on the same region of memory simultaneously, the effect | 
 | 82 |  * may be that only one operation succeeds. | 
 | 83 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 84 | static inline void __set_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 85 | { | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 86 | 	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 87 | } | 
 | 88 |  | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 89 | /** | 
 | 90 |  * clear_bit - Clears a bit in memory | 
 | 91 |  * @nr: Bit to clear | 
 | 92 |  * @addr: Address to start counting from | 
 | 93 |  * | 
 | 94 |  * clear_bit() is atomic and may not be reordered.  However, it does | 
 | 95 |  * not contain a memory barrier, so if it is used for locking purposes, | 
 | 96 |  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 
 | 97 |  * in order to ensure changes are visible on other processors. | 
 | 98 |  */ | 
| Andi Kleen | c839994 | 2009-01-12 23:01:15 +0100 | [diff] [blame] | 99 | static __always_inline void | 
 | 100 | clear_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 101 | { | 
| Ingo Molnar | 7dbceaf | 2008-06-20 07:28:24 +0200 | [diff] [blame] | 102 | 	if (IS_IMMEDIATE(nr)) { | 
 | 103 | 		asm volatile(LOCK_PREFIX "andb %1,%0" | 
 | 104 | 			: CONST_MASK_ADDR(nr, addr) | 
| Ingo Molnar | 437a0a5 | 2008-06-20 21:50:20 +0200 | [diff] [blame] | 105 | 			: "iq" ((u8)~CONST_MASK(nr))); | 
| Ingo Molnar | 7dbceaf | 2008-06-20 07:28:24 +0200 | [diff] [blame] | 106 | 	} else { | 
 | 107 | 		asm volatile(LOCK_PREFIX "btr %1,%0" | 
 | 108 | 			: BITOP_ADDR(addr) | 
 | 109 | 			: "Ir" (nr)); | 
 | 110 | 	} | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 111 | } | 
 | 112 |  | 
 | 113 | /* | 
 | 114 |  * clear_bit_unlock - Clears a bit in memory | 
 | 115 |  * @nr: Bit to clear | 
 | 116 |  * @addr: Address to start counting from | 
 | 117 |  * | 
 | 118 |  * clear_bit() is atomic and implies release semantics before the memory | 
 | 119 |  * operation. It can be used for an unlock. | 
 | 120 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 121 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 122 | { | 
 | 123 | 	barrier(); | 
 | 124 | 	clear_bit(nr, addr); | 
 | 125 | } | 
 | 126 |  | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 127 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 128 | { | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 129 | 	asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 130 | } | 
 | 131 |  | 
 | 132 | /* | 
 | 133 |  * __clear_bit_unlock - Clears a bit in memory | 
 | 134 |  * @nr: Bit to clear | 
 | 135 |  * @addr: Address to start counting from | 
 | 136 |  * | 
 | 137 |  * __clear_bit() is non-atomic and implies release semantics before the memory | 
 | 138 |  * operation. It can be used for an unlock if no other CPUs can concurrently | 
 | 139 |  * modify other bits in the word. | 
 | 140 |  * | 
 | 141 |  * No memory barrier is required here, because x86 cannot reorder stores past | 
 | 142 |  * older loads. Same principle as spin_unlock. | 
 | 143 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 144 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 145 | { | 
 | 146 | 	barrier(); | 
 | 147 | 	__clear_bit(nr, addr); | 
 | 148 | } | 
 | 149 |  | 
 | 150 | #define smp_mb__before_clear_bit()	barrier() | 
 | 151 | #define smp_mb__after_clear_bit()	barrier() | 
 | 152 |  | 
 | 153 | /** | 
 | 154 |  * __change_bit - Toggle a bit in memory | 
 | 155 |  * @nr: the bit to change | 
 | 156 |  * @addr: the address to start counting from | 
 | 157 |  * | 
 | 158 |  * Unlike change_bit(), this function is non-atomic and may be reordered. | 
 | 159 |  * If it's called on the same region of memory simultaneously, the effect | 
 | 160 |  * may be that only one operation succeeds. | 
 | 161 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 162 | static inline void __change_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 163 | { | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 164 | 	asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 165 | } | 
 | 166 |  | 
 | 167 | /** | 
 | 168 |  * change_bit - Toggle a bit in memory | 
 | 169 |  * @nr: Bit to change | 
 | 170 |  * @addr: Address to start counting from | 
 | 171 |  * | 
 | 172 |  * change_bit() is atomic and may not be reordered. | 
 | 173 |  * Note that @nr may be almost arbitrarily large; this function is not | 
 | 174 |  * restricted to acting on a single-word quantity. | 
 | 175 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 176 | static inline void change_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 177 | { | 
| Uros Bizjak | 838e8bb | 2008-10-24 16:53:33 +0200 | [diff] [blame] | 178 | 	if (IS_IMMEDIATE(nr)) { | 
 | 179 | 		asm volatile(LOCK_PREFIX "xorb %1,%0" | 
 | 180 | 			: CONST_MASK_ADDR(nr, addr) | 
 | 181 | 			: "iq" ((u8)CONST_MASK(nr))); | 
 | 182 | 	} else { | 
 | 183 | 		asm volatile(LOCK_PREFIX "btc %1,%0" | 
 | 184 | 			: BITOP_ADDR(addr) | 
 | 185 | 			: "Ir" (nr)); | 
 | 186 | 	} | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 187 | } | 
 | 188 |  | 
 | 189 | /** | 
 | 190 |  * test_and_set_bit - Set a bit and return its old value | 
 | 191 |  * @nr: Bit to set | 
 | 192 |  * @addr: Address to count from | 
 | 193 |  * | 
 | 194 |  * This operation is atomic and cannot be reordered. | 
 | 195 |  * It also implies a memory barrier. | 
 | 196 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 197 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 198 | { | 
 | 199 | 	int oldbit; | 
 | 200 |  | 
 | 201 | 	asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | 
| Joe Perches | 286275c | 2008-03-23 01:01:45 -0700 | [diff] [blame] | 202 | 		     "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 203 |  | 
 | 204 | 	return oldbit; | 
 | 205 | } | 
 | 206 |  | 
 | 207 | /** | 
 | 208 |  * test_and_set_bit_lock - Set a bit and return its old value for lock | 
 | 209 |  * @nr: Bit to set | 
 | 210 |  * @addr: Address to count from | 
 | 211 |  * | 
 | 212 |  * This is the same as test_and_set_bit on x86. | 
 | 213 |  */ | 
| Andi Kleen | c839994 | 2009-01-12 23:01:15 +0100 | [diff] [blame] | 214 | static __always_inline int | 
 | 215 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 216 | { | 
 | 217 | 	return test_and_set_bit(nr, addr); | 
 | 218 | } | 
 | 219 |  | 
 | 220 | /** | 
 | 221 |  * __test_and_set_bit - Set a bit and return its old value | 
 | 222 |  * @nr: Bit to set | 
 | 223 |  * @addr: Address to count from | 
 | 224 |  * | 
 | 225 |  * This operation is non-atomic and can be reordered. | 
 | 226 |  * If two examples of this operation race, one can appear to succeed | 
 | 227 |  * but actually fail.  You must protect multiple accesses with a lock. | 
 | 228 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 229 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 230 | { | 
 | 231 | 	int oldbit; | 
 | 232 |  | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 233 | 	asm("bts %2,%1\n\t" | 
 | 234 | 	    "sbb %0,%0" | 
 | 235 | 	    : "=r" (oldbit), ADDR | 
 | 236 | 	    : "Ir" (nr)); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 237 | 	return oldbit; | 
 | 238 | } | 
 | 239 |  | 
 | 240 | /** | 
 | 241 |  * test_and_clear_bit - Clear a bit and return its old value | 
 | 242 |  * @nr: Bit to clear | 
 | 243 |  * @addr: Address to count from | 
 | 244 |  * | 
 | 245 |  * This operation is atomic and cannot be reordered. | 
 | 246 |  * It also implies a memory barrier. | 
 | 247 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 248 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 249 | { | 
 | 250 | 	int oldbit; | 
 | 251 |  | 
 | 252 | 	asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | 
 | 253 | 		     "sbb %0,%0" | 
| Joe Perches | 286275c | 2008-03-23 01:01:45 -0700 | [diff] [blame] | 254 | 		     : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 255 |  | 
 | 256 | 	return oldbit; | 
 | 257 | } | 
 | 258 |  | 
 | 259 | /** | 
 | 260 |  * __test_and_clear_bit - Clear a bit and return its old value | 
 | 261 |  * @nr: Bit to clear | 
 | 262 |  * @addr: Address to count from | 
 | 263 |  * | 
 | 264 |  * This operation is non-atomic and can be reordered. | 
 | 265 |  * If two examples of this operation race, one can appear to succeed | 
 | 266 |  * but actually fail.  You must protect multiple accesses with a lock. | 
| Michael S. Tsirkin | d0a69d6 | 2012-06-24 19:24:42 +0300 | [diff] [blame] | 267 |  * | 
 | 268 |  * Note: the operation is performed atomically with respect to | 
 | 269 |  * the local CPU, but not other CPUs. Portable code should not | 
 | 270 |  * rely on this behaviour. | 
 | 271 |  * KVM relies on this behaviour on x86 for modifying memory that is also | 
 | 272 |  * accessed from a hypervisor on the same CPU if running in a VM: don't change | 
 | 273 |  * this without also updating arch/x86/kernel/kvm.c | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 274 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 275 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 276 | { | 
 | 277 | 	int oldbit; | 
 | 278 |  | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 279 | 	asm volatile("btr %2,%1\n\t" | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 280 | 		     "sbb %0,%0" | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 281 | 		     : "=r" (oldbit), ADDR | 
 | 282 | 		     : "Ir" (nr)); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 283 | 	return oldbit; | 
 | 284 | } | 
 | 285 |  | 
 | 286 | /* WARNING: non atomic and it can be reordered! */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 287 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 288 | { | 
 | 289 | 	int oldbit; | 
 | 290 |  | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 291 | 	asm volatile("btc %2,%1\n\t" | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 292 | 		     "sbb %0,%0" | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 293 | 		     : "=r" (oldbit), ADDR | 
 | 294 | 		     : "Ir" (nr) : "memory"); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 295 |  | 
 | 296 | 	return oldbit; | 
 | 297 | } | 
 | 298 |  | 
 | 299 | /** | 
 | 300 |  * test_and_change_bit - Change a bit and return its old value | 
 | 301 |  * @nr: Bit to change | 
 | 302 |  * @addr: Address to count from | 
 | 303 |  * | 
 | 304 |  * This operation is atomic and cannot be reordered. | 
 | 305 |  * It also implies a memory barrier. | 
 | 306 |  */ | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 307 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 308 | { | 
 | 309 | 	int oldbit; | 
 | 310 |  | 
 | 311 | 	asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | 
 | 312 | 		     "sbb %0,%0" | 
| Joe Perches | 286275c | 2008-03-23 01:01:45 -0700 | [diff] [blame] | 313 | 		     : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 314 |  | 
 | 315 | 	return oldbit; | 
 | 316 | } | 
 | 317 |  | 
| Andi Kleen | c839994 | 2009-01-12 23:01:15 +0100 | [diff] [blame] | 318 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 319 | { | 
| Glauber de Oliveira Costa | 26996dd | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 320 | 	return ((1UL << (nr % BITS_PER_LONG)) & | 
| Alexander Chumachenko | c9e2fbd | 2010-04-01 15:34:52 +0300 | [diff] [blame] | 321 | 		(addr[nr / BITS_PER_LONG])) != 0; | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 322 | } | 
 | 323 |  | 
| Andrew Morton | 5136dea | 2008-05-14 16:10:41 -0700 | [diff] [blame] | 324 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 325 | { | 
 | 326 | 	int oldbit; | 
 | 327 |  | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 328 | 	asm volatile("bt %2,%1\n\t" | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 329 | 		     "sbb %0,%0" | 
 | 330 | 		     : "=r" (oldbit) | 
| Simon Holm Thøgersen | eb2b4e6 | 2008-05-05 15:45:28 +0200 | [diff] [blame] | 331 | 		     : "m" (*(unsigned long *)addr), "Ir" (nr)); | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 332 |  | 
 | 333 | 	return oldbit; | 
 | 334 | } | 
 | 335 |  | 
 | 336 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | 
 | 337 | /** | 
 | 338 |  * test_bit - Determine whether a bit is set | 
 | 339 |  * @nr: bit number to test | 
 | 340 |  * @addr: Address to start counting from | 
 | 341 |  */ | 
 | 342 | static int test_bit(int nr, const volatile unsigned long *addr); | 
 | 343 | #endif | 
 | 344 |  | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 345 | #define test_bit(nr, addr)			\ | 
 | 346 | 	(__builtin_constant_p((nr))		\ | 
 | 347 | 	 ? constant_test_bit((nr), (addr))	\ | 
 | 348 | 	 : variable_test_bit((nr), (addr))) | 
| Jeremy Fitzhardinge | 1c54d77 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 349 |  | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 350 | /** | 
 | 351 |  * __ffs - find first set bit in word | 
 | 352 |  * @word: The word to search | 
 | 353 |  * | 
 | 354 |  * Undefined if no bit exists, so code should check against 0 first. | 
 | 355 |  */ | 
 | 356 | static inline unsigned long __ffs(unsigned long word) | 
 | 357 | { | 
| Jan Beulich | e26a44a | 2012-09-18 12:16:14 +0100 | [diff] [blame] | 358 | 	asm("rep; bsf %1,%0" | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 359 | 		: "=r" (word) | 
 | 360 | 		: "rm" (word)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 361 | 	return word; | 
 | 362 | } | 
 | 363 |  | 
 | 364 | /** | 
 | 365 |  * ffz - find first zero bit in word | 
 | 366 |  * @word: The word to search | 
 | 367 |  * | 
 | 368 |  * Undefined if no zero exists, so code should check against ~0UL first. | 
 | 369 |  */ | 
 | 370 | static inline unsigned long ffz(unsigned long word) | 
 | 371 | { | 
| Jan Beulich | e26a44a | 2012-09-18 12:16:14 +0100 | [diff] [blame] | 372 | 	asm("rep; bsf %1,%0" | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 373 | 		: "=r" (word) | 
 | 374 | 		: "r" (~word)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 375 | 	return word; | 
 | 376 | } | 
 | 377 |  | 
 | 378 | /* | 
 | 379 |  * __fls: find last set bit in word | 
 | 380 |  * @word: The word to search | 
 | 381 |  * | 
| Alexander van Heukelum | 8450e85 | 2008-07-05 19:53:46 +0200 | [diff] [blame] | 382 |  * Undefined if no set bit exists, so code should check against 0 first. | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 383 |  */ | 
 | 384 | static inline unsigned long __fls(unsigned long word) | 
 | 385 | { | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 386 | 	asm("bsr %1,%0" | 
 | 387 | 	    : "=r" (word) | 
 | 388 | 	    : "rm" (word)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 389 | 	return word; | 
 | 390 | } | 
 | 391 |  | 
| H. Peter Anvin | 83d99df | 2011-12-15 14:55:53 -0800 | [diff] [blame] | 392 | #undef ADDR | 
 | 393 |  | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 394 | #ifdef __KERNEL__ | 
 | 395 | /** | 
 | 396 |  * ffs - find first set bit in word | 
 | 397 |  * @x: the word to search | 
 | 398 |  * | 
 | 399 |  * This is defined the same way as the libc and compiler builtin ffs | 
 | 400 |  * routines, therefore differs in spirit from the other bitops. | 
 | 401 |  * | 
 | 402 |  * ffs(value) returns 0 if value is 0 or the position of the first | 
 | 403 |  * set bit if value is nonzero. The first (least significant) bit | 
 | 404 |  * is at position 1. | 
 | 405 |  */ | 
 | 406 | static inline int ffs(int x) | 
 | 407 | { | 
 | 408 | 	int r; | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 409 |  | 
 | 410 | #ifdef CONFIG_X86_64 | 
 | 411 | 	/* | 
 | 412 | 	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the | 
 | 413 | 	 * dest reg is undefined if x==0, but their CPU architect says its | 
 | 414 | 	 * value is written to set it to the same as before, except that the | 
 | 415 | 	 * top 32 bits will be cleared. | 
 | 416 | 	 * | 
 | 417 | 	 * We cannot do this on 32 bits because at the very least some | 
 | 418 | 	 * 486 CPUs did not behave this way. | 
 | 419 | 	 */ | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 420 | 	asm("bsfl %1,%0" | 
 | 421 | 	    : "=r" (r) | 
| Jan Beulich | 1edfbb4 | 2012-09-10 12:04:16 +0100 | [diff] [blame] | 422 | 	    : "rm" (x), "0" (-1)); | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 423 | #elif defined(CONFIG_X86_CMOV) | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 424 | 	asm("bsfl %1,%0\n\t" | 
 | 425 | 	    "cmovzl %2,%0" | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 426 | 	    : "=&r" (r) : "rm" (x), "r" (-1)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 427 | #else | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 428 | 	asm("bsfl %1,%0\n\t" | 
 | 429 | 	    "jnz 1f\n\t" | 
 | 430 | 	    "movl $-1,%0\n" | 
 | 431 | 	    "1:" : "=r" (r) : "rm" (x)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 432 | #endif | 
 | 433 | 	return r + 1; | 
 | 434 | } | 
 | 435 |  | 
 | 436 | /** | 
 | 437 |  * fls - find last set bit in word | 
 | 438 |  * @x: the word to search | 
 | 439 |  * | 
 | 440 |  * This is defined in a similar way as the libc and compiler builtin | 
 | 441 |  * ffs, but returns the position of the most significant set bit. | 
 | 442 |  * | 
 | 443 |  * fls(value) returns 0 if value is 0 or the position of the last | 
 | 444 |  * set bit if value is nonzero. The last (most significant) bit is | 
 | 445 |  * at position 32. | 
 | 446 |  */ | 
 | 447 | static inline int fls(int x) | 
 | 448 | { | 
 | 449 | 	int r; | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 450 |  | 
 | 451 | #ifdef CONFIG_X86_64 | 
 | 452 | 	/* | 
 | 453 | 	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the | 
 | 454 | 	 * dest reg is undefined if x==0, but their CPU architect says its | 
 | 455 | 	 * value is written to set it to the same as before, except that the | 
 | 456 | 	 * top 32 bits will be cleared. | 
 | 457 | 	 * | 
 | 458 | 	 * We cannot do this on 32 bits because at the very least some | 
 | 459 | 	 * 486 CPUs did not behave this way. | 
 | 460 | 	 */ | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 461 | 	asm("bsrl %1,%0" | 
 | 462 | 	    : "=r" (r) | 
| Jan Beulich | 1edfbb4 | 2012-09-10 12:04:16 +0100 | [diff] [blame] | 463 | 	    : "rm" (x), "0" (-1)); | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 464 | #elif defined(CONFIG_X86_CMOV) | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 465 | 	asm("bsrl %1,%0\n\t" | 
 | 466 | 	    "cmovzl %2,%0" | 
 | 467 | 	    : "=&r" (r) : "rm" (x), "rm" (-1)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 468 | #else | 
| Joe Perches | f19dcf4 | 2008-03-23 01:03:07 -0700 | [diff] [blame] | 469 | 	asm("bsrl %1,%0\n\t" | 
 | 470 | 	    "jnz 1f\n\t" | 
 | 471 | 	    "movl $-1,%0\n" | 
 | 472 | 	    "1:" : "=r" (r) : "rm" (x)); | 
| Alexander van Heukelum | 12d9c84 | 2008-03-15 13:04:42 +0100 | [diff] [blame] | 473 | #endif | 
 | 474 | 	return r + 1; | 
 | 475 | } | 
| Alexander van Heukelum | d66462f | 2008-04-04 20:49:30 +0200 | [diff] [blame] | 476 |  | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 477 | /** | 
 | 478 |  * fls64 - find last set bit in a 64-bit word | 
 | 479 |  * @x: the word to search | 
 | 480 |  * | 
 | 481 |  * This is defined in a similar way as the libc and compiler builtin | 
 | 482 |  * ffsll, but returns the position of the most significant set bit. | 
 | 483 |  * | 
 | 484 |  * fls64(value) returns 0 if value is 0 or the position of the last | 
 | 485 |  * set bit if value is nonzero. The last (most significant) bit is | 
 | 486 |  * at position 64. | 
 | 487 |  */ | 
 | 488 | #ifdef CONFIG_X86_64 | 
 | 489 | static __always_inline int fls64(__u64 x) | 
 | 490 | { | 
| Jan Beulich | 1edfbb4 | 2012-09-10 12:04:16 +0100 | [diff] [blame] | 491 | 	int bitpos = -1; | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 492 | 	/* | 
 | 493 | 	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the | 
 | 494 | 	 * dest reg is undefined if x==0, but their CPU architect says its | 
 | 495 | 	 * value is written to set it to the same as before. | 
 | 496 | 	 */ | 
| Jan Beulich | 1edfbb4 | 2012-09-10 12:04:16 +0100 | [diff] [blame] | 497 | 	asm("bsrq %1,%q0" | 
| David Howells | ca3d30c | 2011-12-13 14:56:54 +0000 | [diff] [blame] | 498 | 	    : "+r" (bitpos) | 
 | 499 | 	    : "rm" (x)); | 
 | 500 | 	return bitpos + 1; | 
 | 501 | } | 
 | 502 | #else | 
 | 503 | #include <asm-generic/bitops/fls64.h> | 
 | 504 | #endif | 
 | 505 |  | 
| Akinobu Mita | 708ff2a | 2010-09-29 18:08:50 +0900 | [diff] [blame] | 506 | #include <asm-generic/bitops/find.h> | 
 | 507 |  | 
| Alexander van Heukelum | d66462f | 2008-04-04 20:49:30 +0200 | [diff] [blame] | 508 | #include <asm-generic/bitops/sched.h> | 
 | 509 |  | 
 | 510 | #define ARCH_HAS_FAST_MULTIPLIER 1 | 
 | 511 |  | 
| Borislav Petkov | d61931d | 2010-03-05 17:34:46 +0100 | [diff] [blame] | 512 | #include <asm/arch_hweight.h> | 
 | 513 |  | 
 | 514 | #include <asm-generic/bitops/const_hweight.h> | 
| Alexander van Heukelum | d66462f | 2008-04-04 20:49:30 +0200 | [diff] [blame] | 515 |  | 
| Akinobu Mita | 861b5ae | 2011-03-23 16:42:02 -0700 | [diff] [blame] | 516 | #include <asm-generic/bitops/le.h> | 
| Alexander van Heukelum | d66462f | 2008-04-04 20:49:30 +0200 | [diff] [blame] | 517 |  | 
| Akinobu Mita | 148817b | 2011-07-26 16:09:04 -0700 | [diff] [blame] | 518 | #include <asm-generic/bitops/ext2-atomic-setbit.h> | 
| Alexander van Heukelum | d66462f | 2008-04-04 20:49:30 +0200 | [diff] [blame] | 519 |  | 
| Alexander van Heukelum | d66462f | 2008-04-04 20:49:30 +0200 | [diff] [blame] | 520 | #endif /* __KERNEL__ */ | 
| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 521 | #endif /* _ASM_X86_BITOPS_H */ |