| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2004-2006 Atmel Corporation | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of the GNU General Public License version 2 as | 
|  | 6 | * published by the Free Software Foundation. | 
|  | 7 | */ | 
|  | 8 | #ifndef __ASM_AVR32_BITOPS_H | 
|  | 9 | #define __ASM_AVR32_BITOPS_H | 
|  | 10 |  | 
| Jiri Slaby | 0624517 | 2007-10-18 23:40:26 -0700 | [diff] [blame] | 11 | #ifndef _LINUX_BITOPS_H | 
|  | 12 | #error only <linux/bitops.h> can be included directly | 
|  | 13 | #endif | 
|  | 14 |  | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 15 | #include <asm/byteorder.h> | 
|  | 16 | #include <asm/system.h> | 
|  | 17 |  | 
|  | 18 | /* | 
|  | 19 | * clear_bit() doesn't provide any barrier for the compiler | 
|  | 20 | */ | 
|  | 21 | #define smp_mb__before_clear_bit()	barrier() | 
|  | 22 | #define smp_mb__after_clear_bit()	barrier() | 
|  | 23 |  | 
|  | 24 | /* | 
|  | 25 | * set_bit - Atomically set a bit in memory | 
|  | 26 | * @nr: the bit to set | 
|  | 27 | * @addr: the address to start counting from | 
|  | 28 | * | 
|  | 29 | * This function is atomic and may not be reordered.  See __set_bit() | 
|  | 30 | * if you do not require the atomic guarantees. | 
|  | 31 | * | 
|  | 32 | * Note that @nr may be almost arbitrarily large; this function is not | 
|  | 33 | * restricted to acting on a single-word quantity. | 
|  | 34 | */ | 
|  | 35 | static inline void set_bit(int nr, volatile void * addr) | 
|  | 36 | { | 
|  | 37 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 
|  | 38 | unsigned long tmp; | 
|  | 39 |  | 
|  | 40 | if (__builtin_constant_p(nr)) { | 
|  | 41 | asm volatile( | 
|  | 42 | "1:	ssrf	5\n" | 
|  | 43 | "	ld.w	%0, %2\n" | 
|  | 44 | "	sbr	%0, %3\n" | 
|  | 45 | "	stcond	%1, %0\n" | 
|  | 46 | "	brne	1b" | 
|  | 47 | : "=&r"(tmp), "=o"(*p) | 
|  | 48 | : "m"(*p), "i"(nr) | 
|  | 49 | : "cc"); | 
|  | 50 | } else { | 
|  | 51 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 
|  | 52 | asm volatile( | 
|  | 53 | "1:	ssrf	5\n" | 
|  | 54 | "	ld.w	%0, %2\n" | 
|  | 55 | "	or	%0, %3\n" | 
|  | 56 | "	stcond	%1, %0\n" | 
|  | 57 | "	brne	1b" | 
|  | 58 | : "=&r"(tmp), "=o"(*p) | 
|  | 59 | : "m"(*p), "r"(mask) | 
|  | 60 | : "cc"); | 
|  | 61 | } | 
|  | 62 | } | 
|  | 63 |  | 
|  | 64 | /* | 
|  | 65 | * clear_bit - Clears a bit in memory | 
|  | 66 | * @nr: Bit to clear | 
|  | 67 | * @addr: Address to start counting from | 
|  | 68 | * | 
|  | 69 | * clear_bit() is atomic and may not be reordered.  However, it does | 
|  | 70 | * not contain a memory barrier, so if it is used for locking purposes, | 
|  | 71 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 
|  | 72 | * in order to ensure changes are visible on other processors. | 
|  | 73 | */ | 
|  | 74 | static inline void clear_bit(int nr, volatile void * addr) | 
|  | 75 | { | 
|  | 76 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 
|  | 77 | unsigned long tmp; | 
|  | 78 |  | 
|  | 79 | if (__builtin_constant_p(nr)) { | 
|  | 80 | asm volatile( | 
|  | 81 | "1:	ssrf	5\n" | 
|  | 82 | "	ld.w	%0, %2\n" | 
|  | 83 | "	cbr	%0, %3\n" | 
|  | 84 | "	stcond	%1, %0\n" | 
|  | 85 | "	brne	1b" | 
|  | 86 | : "=&r"(tmp), "=o"(*p) | 
|  | 87 | : "m"(*p), "i"(nr) | 
|  | 88 | : "cc"); | 
|  | 89 | } else { | 
|  | 90 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 
|  | 91 | asm volatile( | 
|  | 92 | "1:	ssrf	5\n" | 
|  | 93 | "	ld.w	%0, %2\n" | 
|  | 94 | "	andn	%0, %3\n" | 
|  | 95 | "	stcond	%1, %0\n" | 
|  | 96 | "	brne	1b" | 
|  | 97 | : "=&r"(tmp), "=o"(*p) | 
|  | 98 | : "m"(*p), "r"(mask) | 
|  | 99 | : "cc"); | 
|  | 100 | } | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * change_bit - Toggle a bit in memory | 
|  | 105 | * @nr: Bit to change | 
|  | 106 | * @addr: Address to start counting from | 
|  | 107 | * | 
|  | 108 | * change_bit() is atomic and may not be reordered. | 
|  | 109 | * Note that @nr may be almost arbitrarily large; this function is not | 
|  | 110 | * restricted to acting on a single-word quantity. | 
|  | 111 | */ | 
|  | 112 | static inline void change_bit(int nr, volatile void * addr) | 
|  | 113 | { | 
|  | 114 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 
|  | 115 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 
|  | 116 | unsigned long tmp; | 
|  | 117 |  | 
|  | 118 | asm volatile( | 
|  | 119 | "1:	ssrf	5\n" | 
|  | 120 | "	ld.w	%0, %2\n" | 
|  | 121 | "	eor	%0, %3\n" | 
|  | 122 | "	stcond	%1, %0\n" | 
|  | 123 | "	brne	1b" | 
|  | 124 | : "=&r"(tmp), "=o"(*p) | 
|  | 125 | : "m"(*p), "r"(mask) | 
|  | 126 | : "cc"); | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | /* | 
|  | 130 | * test_and_set_bit - Set a bit and return its old value | 
|  | 131 | * @nr: Bit to set | 
|  | 132 | * @addr: Address to count from | 
|  | 133 | * | 
|  | 134 | * This operation is atomic and cannot be reordered. | 
|  | 135 | * It also implies a memory barrier. | 
|  | 136 | */ | 
|  | 137 | static inline int test_and_set_bit(int nr, volatile void * addr) | 
|  | 138 | { | 
|  | 139 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 
|  | 140 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 
|  | 141 | unsigned long tmp, old; | 
|  | 142 |  | 
|  | 143 | if (__builtin_constant_p(nr)) { | 
|  | 144 | asm volatile( | 
|  | 145 | "1:	ssrf	5\n" | 
|  | 146 | "	ld.w	%0, %3\n" | 
|  | 147 | "	mov	%2, %0\n" | 
|  | 148 | "	sbr	%0, %4\n" | 
|  | 149 | "	stcond	%1, %0\n" | 
|  | 150 | "	brne	1b" | 
|  | 151 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 
|  | 152 | : "m"(*p), "i"(nr) | 
|  | 153 | : "memory", "cc"); | 
|  | 154 | } else { | 
|  | 155 | asm volatile( | 
|  | 156 | "1:	ssrf	5\n" | 
|  | 157 | "	ld.w	%2, %3\n" | 
|  | 158 | "	or	%0, %2, %4\n" | 
|  | 159 | "	stcond	%1, %0\n" | 
|  | 160 | "	brne	1b" | 
|  | 161 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 
|  | 162 | : "m"(*p), "r"(mask) | 
|  | 163 | : "memory", "cc"); | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | return (old & mask) != 0; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * test_and_clear_bit - Clear a bit and return its old value | 
|  | 171 | * @nr: Bit to clear | 
|  | 172 | * @addr: Address to count from | 
|  | 173 | * | 
|  | 174 | * This operation is atomic and cannot be reordered. | 
|  | 175 | * It also implies a memory barrier. | 
|  | 176 | */ | 
|  | 177 | static inline int test_and_clear_bit(int nr, volatile void * addr) | 
|  | 178 | { | 
|  | 179 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 
|  | 180 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 
|  | 181 | unsigned long tmp, old; | 
|  | 182 |  | 
|  | 183 | if (__builtin_constant_p(nr)) { | 
|  | 184 | asm volatile( | 
|  | 185 | "1:	ssrf	5\n" | 
|  | 186 | "	ld.w	%0, %3\n" | 
|  | 187 | "	mov	%2, %0\n" | 
|  | 188 | "	cbr	%0, %4\n" | 
|  | 189 | "	stcond	%1, %0\n" | 
|  | 190 | "	brne	1b" | 
|  | 191 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 
|  | 192 | : "m"(*p), "i"(nr) | 
|  | 193 | : "memory", "cc"); | 
|  | 194 | } else { | 
|  | 195 | asm volatile( | 
|  | 196 | "1:	ssrf	5\n" | 
|  | 197 | "	ld.w	%0, %3\n" | 
|  | 198 | "	mov	%2, %0\n" | 
|  | 199 | "	andn	%0, %4\n" | 
|  | 200 | "	stcond	%1, %0\n" | 
|  | 201 | "	brne	1b" | 
|  | 202 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 
|  | 203 | : "m"(*p), "r"(mask) | 
|  | 204 | : "memory", "cc"); | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | return (old & mask) != 0; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | /* | 
|  | 211 | * test_and_change_bit - Change a bit and return its old value | 
|  | 212 | * @nr: Bit to change | 
|  | 213 | * @addr: Address to count from | 
|  | 214 | * | 
|  | 215 | * This operation is atomic and cannot be reordered. | 
|  | 216 | * It also implies a memory barrier. | 
|  | 217 | */ | 
|  | 218 | static inline int test_and_change_bit(int nr, volatile void * addr) | 
|  | 219 | { | 
|  | 220 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 
|  | 221 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 
|  | 222 | unsigned long tmp, old; | 
|  | 223 |  | 
|  | 224 | asm volatile( | 
|  | 225 | "1:	ssrf	5\n" | 
|  | 226 | "	ld.w	%2, %3\n" | 
|  | 227 | "	eor	%0, %2, %4\n" | 
|  | 228 | "	stcond	%1, %0\n" | 
|  | 229 | "	brne	1b" | 
|  | 230 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 
|  | 231 | : "m"(*p), "r"(mask) | 
|  | 232 | : "memory", "cc"); | 
|  | 233 |  | 
|  | 234 | return (old & mask) != 0; | 
|  | 235 | } | 
|  | 236 |  | 
|  | 237 | #include <asm-generic/bitops/non-atomic.h> | 
|  | 238 |  | 
|  | 239 | /* Find First bit Set */ | 
|  | 240 | static inline unsigned long __ffs(unsigned long word) | 
|  | 241 | { | 
|  | 242 | unsigned long result; | 
|  | 243 |  | 
|  | 244 | asm("brev %1\n\t" | 
|  | 245 | "clz %0,%1" | 
|  | 246 | : "=r"(result), "=&r"(word) | 
|  | 247 | : "1"(word)); | 
|  | 248 | return result; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | /* Find First Zero */ | 
|  | 252 | static inline unsigned long ffz(unsigned long word) | 
|  | 253 | { | 
|  | 254 | return __ffs(~word); | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | /* Find Last bit Set */ | 
|  | 258 | static inline int fls(unsigned long word) | 
|  | 259 | { | 
|  | 260 | unsigned long result; | 
|  | 261 |  | 
|  | 262 | asm("clz %0,%1" : "=r"(result) : "r"(word)); | 
|  | 263 | return 32 - result; | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | unsigned long find_first_zero_bit(const unsigned long *addr, | 
|  | 267 | unsigned long size); | 
|  | 268 | unsigned long find_next_zero_bit(const unsigned long *addr, | 
|  | 269 | unsigned long size, | 
|  | 270 | unsigned long offset); | 
|  | 271 | unsigned long find_first_bit(const unsigned long *addr, | 
|  | 272 | unsigned long size); | 
|  | 273 | unsigned long find_next_bit(const unsigned long *addr, | 
|  | 274 | unsigned long size, | 
|  | 275 | unsigned long offset); | 
|  | 276 |  | 
|  | 277 | /* | 
|  | 278 | * ffs: find first bit set. This is defined the same way as | 
|  | 279 | * the libc and compiler builtin ffs routines, therefore | 
|  | 280 | * differs in spirit from the above ffz (man ffs). | 
|  | 281 | * | 
|  | 282 | * The difference is that bit numbering starts at 1, and if no bit is set, | 
|  | 283 | * the function returns 0. | 
|  | 284 | */ | 
|  | 285 | static inline int ffs(unsigned long word) | 
|  | 286 | { | 
|  | 287 | if(word == 0) | 
|  | 288 | return 0; | 
|  | 289 | return __ffs(word) + 1; | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | #include <asm-generic/bitops/fls64.h> | 
|  | 293 | #include <asm-generic/bitops/sched.h> | 
|  | 294 | #include <asm-generic/bitops/hweight.h> | 
| Nick Piggin | 2633357 | 2007-10-18 03:06:39 -0700 | [diff] [blame] | 295 | #include <asm-generic/bitops/lock.h> | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 296 |  | 
|  | 297 | #include <asm-generic/bitops/ext2-non-atomic.h> | 
|  | 298 | #include <asm-generic/bitops/ext2-atomic.h> | 
|  | 299 | #include <asm-generic/bitops/minix-le.h> | 
|  | 300 |  | 
|  | 301 | #endif /* __ASM_AVR32_BITOPS_H */ |