| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_BITOPS_H | 
|  | 2 | #define _ASM_IA64_BITOPS_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 
|  | 6 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 7 | * | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 | 
|  | 9 | * O(1) scheduler patch | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ | 
|  | 11 |  | 
|  | 12 | #include <linux/compiler.h> | 
|  | 13 | #include <linux/types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/intrinsics.h> | 
|  | 15 |  | 
|  | 16 | /** | 
|  | 17 | * set_bit - Atomically set a bit in memory | 
|  | 18 | * @nr: the bit to set | 
|  | 19 | * @addr: the address to start counting from | 
|  | 20 | * | 
|  | 21 | * This function is atomic and may not be reordered.  See __set_bit() | 
|  | 22 | * if you do not require the atomic guarantees. | 
|  | 23 | * Note that @nr may be almost arbitrarily large; this function is not | 
|  | 24 | * restricted to acting on a single-word quantity. | 
|  | 25 | * | 
|  | 26 | * The address must be (at least) "long" aligned. | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 27 | * Note that there are driver (e.g., eepro100) which use these operations to | 
|  | 28 | * operate on hw-defined data-structures, so we can't easily change these | 
|  | 29 | * operations to force a bigger alignment. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | * | 
|  | 31 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 
|  | 32 | */ | 
|  | 33 | static __inline__ void | 
|  | 34 | set_bit (int nr, volatile void *addr) | 
|  | 35 | { | 
|  | 36 | __u32 bit, old, new; | 
|  | 37 | volatile __u32 *m; | 
|  | 38 | CMPXCHG_BUGCHECK_DECL | 
|  | 39 |  | 
|  | 40 | m = (volatile __u32 *) addr + (nr >> 5); | 
|  | 41 | bit = 1 << (nr & 31); | 
|  | 42 | do { | 
|  | 43 | CMPXCHG_BUGCHECK(m); | 
|  | 44 | old = *m; | 
|  | 45 | new = old | bit; | 
|  | 46 | } while (cmpxchg_acq(m, old, new) != old); | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | /** | 
|  | 50 | * __set_bit - Set a bit in memory | 
|  | 51 | * @nr: the bit to set | 
|  | 52 | * @addr: the address to start counting from | 
|  | 53 | * | 
|  | 54 | * Unlike set_bit(), this function is non-atomic and may be reordered. | 
|  | 55 | * If it's called on the same region of memory simultaneously, the effect | 
|  | 56 | * may be that only one operation succeeds. | 
|  | 57 | */ | 
|  | 58 | static __inline__ void | 
|  | 59 | __set_bit (int nr, volatile void *addr) | 
|  | 60 | { | 
|  | 61 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); | 
|  | 62 | } | 
|  | 63 |  | 
|  | 64 | /* | 
|  | 65 | * clear_bit() has "acquire" semantics. | 
|  | 66 | */ | 
|  | 67 | #define smp_mb__before_clear_bit()	smp_mb() | 
|  | 68 | #define smp_mb__after_clear_bit()	do { /* skip */; } while (0) | 
|  | 69 |  | 
|  | 70 | /** | 
|  | 71 | * clear_bit - Clears a bit in memory | 
|  | 72 | * @nr: Bit to clear | 
|  | 73 | * @addr: Address to start counting from | 
|  | 74 | * | 
|  | 75 | * clear_bit() is atomic and may not be reordered.  However, it does | 
|  | 76 | * not contain a memory barrier, so if it is used for locking purposes, | 
|  | 77 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 
|  | 78 | * in order to ensure changes are visible on other processors. | 
|  | 79 | */ | 
|  | 80 | static __inline__ void | 
|  | 81 | clear_bit (int nr, volatile void *addr) | 
|  | 82 | { | 
|  | 83 | __u32 mask, old, new; | 
|  | 84 | volatile __u32 *m; | 
|  | 85 | CMPXCHG_BUGCHECK_DECL | 
|  | 86 |  | 
|  | 87 | m = (volatile __u32 *) addr + (nr >> 5); | 
|  | 88 | mask = ~(1 << (nr & 31)); | 
|  | 89 | do { | 
|  | 90 | CMPXCHG_BUGCHECK(m); | 
|  | 91 | old = *m; | 
|  | 92 | new = old & mask; | 
|  | 93 | } while (cmpxchg_acq(m, old, new) != old); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | /** | 
|  | 97 | * __clear_bit - Clears a bit in memory (non-atomic version) | 
|  | 98 | */ | 
|  | 99 | static __inline__ void | 
|  | 100 | __clear_bit (int nr, volatile void *addr) | 
|  | 101 | { | 
|  | 102 | volatile __u32 *p = (__u32 *) addr + (nr >> 5); | 
|  | 103 | __u32 m = 1 << (nr & 31); | 
|  | 104 | *p &= ~m; | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | /** | 
|  | 108 | * change_bit - Toggle a bit in memory | 
|  | 109 | * @nr: Bit to clear | 
|  | 110 | * @addr: Address to start counting from | 
|  | 111 | * | 
|  | 112 | * change_bit() is atomic and may not be reordered. | 
|  | 113 | * Note that @nr may be almost arbitrarily large; this function is not | 
|  | 114 | * restricted to acting on a single-word quantity. | 
|  | 115 | */ | 
|  | 116 | static __inline__ void | 
|  | 117 | change_bit (int nr, volatile void *addr) | 
|  | 118 | { | 
|  | 119 | __u32 bit, old, new; | 
|  | 120 | volatile __u32 *m; | 
|  | 121 | CMPXCHG_BUGCHECK_DECL | 
|  | 122 |  | 
|  | 123 | m = (volatile __u32 *) addr + (nr >> 5); | 
|  | 124 | bit = (1 << (nr & 31)); | 
|  | 125 | do { | 
|  | 126 | CMPXCHG_BUGCHECK(m); | 
|  | 127 | old = *m; | 
|  | 128 | new = old ^ bit; | 
|  | 129 | } while (cmpxchg_acq(m, old, new) != old); | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | /** | 
|  | 133 | * __change_bit - Toggle a bit in memory | 
|  | 134 | * @nr: the bit to set | 
|  | 135 | * @addr: the address to start counting from | 
|  | 136 | * | 
|  | 137 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 
|  | 138 | * If it's called on the same region of memory simultaneously, the effect | 
|  | 139 | * may be that only one operation succeeds. | 
|  | 140 | */ | 
|  | 141 | static __inline__ void | 
|  | 142 | __change_bit (int nr, volatile void *addr) | 
|  | 143 | { | 
|  | 144 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | /** | 
|  | 148 | * test_and_set_bit - Set a bit and return its old value | 
|  | 149 | * @nr: Bit to set | 
|  | 150 | * @addr: Address to count from | 
|  | 151 | * | 
|  | 152 | * This operation is atomic and cannot be reordered. | 
|  | 153 | * It also implies a memory barrier. | 
|  | 154 | */ | 
|  | 155 | static __inline__ int | 
|  | 156 | test_and_set_bit (int nr, volatile void *addr) | 
|  | 157 | { | 
|  | 158 | __u32 bit, old, new; | 
|  | 159 | volatile __u32 *m; | 
|  | 160 | CMPXCHG_BUGCHECK_DECL | 
|  | 161 |  | 
|  | 162 | m = (volatile __u32 *) addr + (nr >> 5); | 
|  | 163 | bit = 1 << (nr & 31); | 
|  | 164 | do { | 
|  | 165 | CMPXCHG_BUGCHECK(m); | 
|  | 166 | old = *m; | 
|  | 167 | new = old | bit; | 
|  | 168 | } while (cmpxchg_acq(m, old, new) != old); | 
|  | 169 | return (old & bit) != 0; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | /** | 
|  | 173 | * __test_and_set_bit - Set a bit and return its old value | 
|  | 174 | * @nr: Bit to set | 
|  | 175 | * @addr: Address to count from | 
|  | 176 | * | 
|  | 177 | * This operation is non-atomic and can be reordered. | 
|  | 178 | * If two examples of this operation race, one can appear to succeed | 
|  | 179 | * but actually fail.  You must protect multiple accesses with a lock. | 
|  | 180 | */ | 
|  | 181 | static __inline__ int | 
|  | 182 | __test_and_set_bit (int nr, volatile void *addr) | 
|  | 183 | { | 
|  | 184 | __u32 *p = (__u32 *) addr + (nr >> 5); | 
|  | 185 | __u32 m = 1 << (nr & 31); | 
|  | 186 | int oldbitset = (*p & m) != 0; | 
|  | 187 |  | 
|  | 188 | *p |= m; | 
|  | 189 | return oldbitset; | 
|  | 190 | } | 
|  | 191 |  | 
|  | 192 | /** | 
|  | 193 | * test_and_clear_bit - Clear a bit and return its old value | 
|  | 194 | * @nr: Bit to set | 
|  | 195 | * @addr: Address to count from | 
|  | 196 | * | 
|  | 197 | * This operation is atomic and cannot be reordered. | 
|  | 198 | * It also implies a memory barrier. | 
|  | 199 | */ | 
|  | 200 | static __inline__ int | 
|  | 201 | test_and_clear_bit (int nr, volatile void *addr) | 
|  | 202 | { | 
|  | 203 | __u32 mask, old, new; | 
|  | 204 | volatile __u32 *m; | 
|  | 205 | CMPXCHG_BUGCHECK_DECL | 
|  | 206 |  | 
|  | 207 | m = (volatile __u32 *) addr + (nr >> 5); | 
|  | 208 | mask = ~(1 << (nr & 31)); | 
|  | 209 | do { | 
|  | 210 | CMPXCHG_BUGCHECK(m); | 
|  | 211 | old = *m; | 
|  | 212 | new = old & mask; | 
|  | 213 | } while (cmpxchg_acq(m, old, new) != old); | 
|  | 214 | return (old & ~mask) != 0; | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | /** | 
|  | 218 | * __test_and_clear_bit - Clear a bit and return its old value | 
|  | 219 | * @nr: Bit to set | 
|  | 220 | * @addr: Address to count from | 
|  | 221 | * | 
|  | 222 | * This operation is non-atomic and can be reordered. | 
|  | 223 | * If two examples of this operation race, one can appear to succeed | 
|  | 224 | * but actually fail.  You must protect multiple accesses with a lock. | 
|  | 225 | */ | 
|  | 226 | static __inline__ int | 
|  | 227 | __test_and_clear_bit(int nr, volatile void * addr) | 
|  | 228 | { | 
|  | 229 | __u32 *p = (__u32 *) addr + (nr >> 5); | 
|  | 230 | __u32 m = 1 << (nr & 31); | 
|  | 231 | int oldbitset = *p & m; | 
|  | 232 |  | 
|  | 233 | *p &= ~m; | 
|  | 234 | return oldbitset; | 
|  | 235 | } | 
|  | 236 |  | 
|  | 237 | /** | 
|  | 238 | * test_and_change_bit - Change a bit and return its old value | 
|  | 239 | * @nr: Bit to set | 
|  | 240 | * @addr: Address to count from | 
|  | 241 | * | 
|  | 242 | * This operation is atomic and cannot be reordered. | 
|  | 243 | * It also implies a memory barrier. | 
|  | 244 | */ | 
|  | 245 | static __inline__ int | 
|  | 246 | test_and_change_bit (int nr, volatile void *addr) | 
|  | 247 | { | 
|  | 248 | __u32 bit, old, new; | 
|  | 249 | volatile __u32 *m; | 
|  | 250 | CMPXCHG_BUGCHECK_DECL | 
|  | 251 |  | 
|  | 252 | m = (volatile __u32 *) addr + (nr >> 5); | 
|  | 253 | bit = (1 << (nr & 31)); | 
|  | 254 | do { | 
|  | 255 | CMPXCHG_BUGCHECK(m); | 
|  | 256 | old = *m; | 
|  | 257 | new = old ^ bit; | 
|  | 258 | } while (cmpxchg_acq(m, old, new) != old); | 
|  | 259 | return (old & bit) != 0; | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | /* | 
|  | 263 | * WARNING: non atomic version. | 
|  | 264 | */ | 
|  | 265 | static __inline__ int | 
|  | 266 | __test_and_change_bit (int nr, void *addr) | 
|  | 267 | { | 
|  | 268 | __u32 old, bit = (1 << (nr & 31)); | 
|  | 269 | __u32 *m = (__u32 *) addr + (nr >> 5); | 
|  | 270 |  | 
|  | 271 | old = *m; | 
|  | 272 | *m = old ^ bit; | 
|  | 273 | return (old & bit) != 0; | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | static __inline__ int | 
|  | 277 | test_bit (int nr, const volatile void *addr) | 
|  | 278 | { | 
|  | 279 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | /** | 
|  | 283 | * ffz - find the first zero bit in a long word | 
|  | 284 | * @x: The long word to find the bit in | 
|  | 285 | * | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 286 | * Returns the bit-number (0..63) of the first (least significant) zero bit. | 
|  | 287 | * Undefined if no zero exists, so code should check against ~0UL first... | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | */ | 
|  | 289 | static inline unsigned long | 
|  | 290 | ffz (unsigned long x) | 
|  | 291 | { | 
|  | 292 | unsigned long result; | 
|  | 293 |  | 
|  | 294 | result = ia64_popcnt(x & (~x - 1)); | 
|  | 295 | return result; | 
|  | 296 | } | 
|  | 297 |  | 
|  | 298 | /** | 
|  | 299 | * __ffs - find first bit in word. | 
|  | 300 | * @x: The word to search | 
|  | 301 | * | 
|  | 302 | * Undefined if no bit exists, so code should check against 0 first. | 
|  | 303 | */ | 
|  | 304 | static __inline__ unsigned long | 
|  | 305 | __ffs (unsigned long x) | 
|  | 306 | { | 
|  | 307 | unsigned long result; | 
|  | 308 |  | 
|  | 309 | result = ia64_popcnt((x-1) & ~x); | 
|  | 310 | return result; | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | #ifdef __KERNEL__ | 
|  | 314 |  | 
|  | 315 | /* | 
| David Mosberger-Tang | 821376b | 2005-04-21 11:07:59 -0700 | [diff] [blame] | 316 | * Return bit number of last (most-significant) bit set.  Undefined | 
|  | 317 | * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | */ | 
|  | 319 | static inline unsigned long | 
|  | 320 | ia64_fls (unsigned long x) | 
|  | 321 | { | 
|  | 322 | long double d = x; | 
|  | 323 | long exp; | 
|  | 324 |  | 
|  | 325 | exp = ia64_getf_exp(d); | 
|  | 326 | return exp - 0xffff; | 
|  | 327 | } | 
|  | 328 |  | 
| David Mosberger-Tang | 821376b | 2005-04-21 11:07:59 -0700 | [diff] [blame] | 329 | /* | 
|  | 330 | * Find the last (most significant) bit set.  Returns 0 for x==0 and | 
|  | 331 | * bits are numbered from 1..32 (e.g., fls(9) == 4). | 
|  | 332 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | static inline int | 
| David Mosberger-Tang | 821376b | 2005-04-21 11:07:59 -0700 | [diff] [blame] | 334 | fls (int t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | { | 
| David Mosberger-Tang | 821376b | 2005-04-21 11:07:59 -0700 | [diff] [blame] | 336 | unsigned long x = t & 0xffffffffu; | 
|  | 337 |  | 
|  | 338 | if (!x) | 
|  | 339 | return 0; | 
|  | 340 | x |= x >> 1; | 
|  | 341 | x |= x >> 2; | 
|  | 342 | x |= x >> 4; | 
|  | 343 | x |= x >> 8; | 
|  | 344 | x |= x >> 16; | 
|  | 345 | return ia64_popcnt(x); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 347 |  | 
|  | 348 | #include <asm-generic/bitops/fls64.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 |  | 
|  | 350 | /* | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 351 | * ffs: find first bit set. This is defined the same way as the libc and | 
|  | 352 | * compiler builtin ffs routines, therefore differs in spirit from the above | 
|  | 353 | * ffz (man ffs): it operates on "int" values only and the result value is the | 
|  | 354 | * bit number + 1.  ffs(0) is defined to return zero. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | */ | 
|  | 356 | #define ffs(x)	__builtin_ffs(x) | 
|  | 357 |  | 
|  | 358 | /* | 
|  | 359 | * hweightN: returns the hamming weight (i.e. the number | 
|  | 360 | * of bits set) of a N-bit word | 
|  | 361 | */ | 
|  | 362 | static __inline__ unsigned long | 
|  | 363 | hweight64 (unsigned long x) | 
|  | 364 | { | 
|  | 365 | unsigned long result; | 
|  | 366 | result = ia64_popcnt(x); | 
|  | 367 | return result; | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | #define hweight32(x)	(unsigned int) hweight64((x) & 0xfffffffful) | 
|  | 371 | #define hweight16(x)	(unsigned int) hweight64((x) & 0xfffful) | 
|  | 372 | #define hweight8(x)	(unsigned int) hweight64((x) & 0xfful) | 
|  | 373 |  | 
|  | 374 | #endif /* __KERNEL__ */ | 
|  | 375 |  | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 376 | #include <asm-generic/bitops/find.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 |  | 
|  | 378 | #ifdef __KERNEL__ | 
|  | 379 |  | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 380 | #include <asm-generic/bitops/ext2-non-atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | #define ext2_set_bit_atomic(l,n,a)	test_and_set_bit(n,a) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | #define ext2_clear_bit_atomic(l,n,a)	test_and_clear_bit(n,a) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 |  | 
| Akinobu Mita | 2875aef | 2006-03-26 01:39:25 -0800 | [diff] [blame] | 385 | #include <asm-generic/bitops/minix.h> | 
|  | 386 | #include <asm-generic/bitops/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 |  | 
|  | 388 | #endif /* __KERNEL__ */ | 
|  | 389 |  | 
|  | 390 | #endif /* _ASM_IA64_BITOPS_H */ |