| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Split spinlock implementation out into its own file, so it can be | 
|  | 3 | * compiled in a FTRACE-compatible way. | 
|  | 4 | */ | 
|  | 5 | #include <linux/kernel_stat.h> | 
|  | 6 | #include <linux/spinlock.h> | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 7 | #include <linux/debugfs.h> | 
|  | 8 | #include <linux/log2.h> | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 9 |  | 
|  | 10 | #include <asm/paravirt.h> | 
|  | 11 |  | 
|  | 12 | #include <xen/interface/xen.h> | 
|  | 13 | #include <xen/events.h> | 
|  | 14 |  | 
|  | 15 | #include "xen-ops.h" | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 16 | #include "debugfs.h" | 
|  | 17 |  | 
|  | 18 | #ifdef CONFIG_XEN_DEBUG_FS | 
|  | 19 | static struct xen_spinlock_stats | 
|  | 20 | { | 
|  | 21 | u64 taken; | 
|  | 22 | u32 taken_slow; | 
|  | 23 | u32 taken_slow_nested; | 
|  | 24 | u32 taken_slow_pickup; | 
|  | 25 | u32 taken_slow_spurious; | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 26 | u32 taken_slow_irqenable; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 27 |  | 
|  | 28 | u64 released; | 
|  | 29 | u32 released_slow; | 
|  | 30 | u32 released_slow_kicked; | 
|  | 31 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 32 | #define HISTO_BUCKETS	30 | 
|  | 33 | u32 histo_spin_total[HISTO_BUCKETS+1]; | 
|  | 34 | u32 histo_spin_spinning[HISTO_BUCKETS+1]; | 
|  | 35 | u32 histo_spin_blocked[HISTO_BUCKETS+1]; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 36 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 37 | u64 time_total; | 
|  | 38 | u64 time_spinning; | 
|  | 39 | u64 time_blocked; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 40 | } spinlock_stats; | 
|  | 41 |  | 
|  | 42 | static u8 zero_stats; | 
|  | 43 |  | 
|  | 44 | static unsigned lock_timeout = 1 << 10; | 
|  | 45 | #define TIMEOUT lock_timeout | 
|  | 46 |  | 
|  | 47 | static inline void check_zero(void) | 
|  | 48 | { | 
|  | 49 | if (unlikely(zero_stats)) { | 
|  | 50 | memset(&spinlock_stats, 0, sizeof(spinlock_stats)); | 
|  | 51 | zero_stats = 0; | 
|  | 52 | } | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | #define ADD_STATS(elem, val)			\ | 
|  | 56 | do { check_zero(); spinlock_stats.elem += (val); } while(0) | 
|  | 57 |  | 
|  | 58 | static inline u64 spin_time_start(void) | 
|  | 59 | { | 
|  | 60 | return xen_clocksource_read(); | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | static void __spin_time_accum(u64 delta, u32 *array) | 
|  | 64 | { | 
|  | 65 | unsigned index = ilog2(delta); | 
|  | 66 |  | 
|  | 67 | check_zero(); | 
|  | 68 |  | 
|  | 69 | if (index < HISTO_BUCKETS) | 
|  | 70 | array[index]++; | 
|  | 71 | else | 
|  | 72 | array[HISTO_BUCKETS]++; | 
|  | 73 | } | 
|  | 74 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 75 | static inline void spin_time_accum_spinning(u64 start) | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 76 | { | 
|  | 77 | u32 delta = xen_clocksource_read() - start; | 
|  | 78 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 79 | __spin_time_accum(delta, spinlock_stats.histo_spin_spinning); | 
|  | 80 | spinlock_stats.time_spinning += delta; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 81 | } | 
|  | 82 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 83 | static inline void spin_time_accum_total(u64 start) | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 84 | { | 
|  | 85 | u32 delta = xen_clocksource_read() - start; | 
|  | 86 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 87 | __spin_time_accum(delta, spinlock_stats.histo_spin_total); | 
|  | 88 | spinlock_stats.time_total += delta; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | static inline void spin_time_accum_blocked(u64 start) | 
|  | 92 | { | 
|  | 93 | u32 delta = xen_clocksource_read() - start; | 
|  | 94 |  | 
|  | 95 | __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); | 
|  | 96 | spinlock_stats.time_blocked += delta; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 97 | } | 
|  | 98 | #else  /* !CONFIG_XEN_DEBUG_FS */ | 
|  | 99 | #define TIMEOUT			(1 << 10) | 
|  | 100 | #define ADD_STATS(elem, val)	do { (void)(val); } while(0) | 
|  | 101 |  | 
|  | 102 | static inline u64 spin_time_start(void) | 
|  | 103 | { | 
|  | 104 | return 0; | 
|  | 105 | } | 
|  | 106 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 107 | static inline void spin_time_accum_total(u64 start) | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 108 | { | 
|  | 109 | } | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 110 | static inline void spin_time_accum_spinning(u64 start) | 
|  | 111 | { | 
|  | 112 | } | 
|  | 113 | static inline void spin_time_accum_blocked(u64 start) | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 114 | { | 
|  | 115 | } | 
|  | 116 | #endif  /* CONFIG_XEN_DEBUG_FS */ | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 117 |  | 
|  | 118 | struct xen_spinlock { | 
|  | 119 | unsigned char lock;		/* 0 -> free; 1 -> locked */ | 
|  | 120 | unsigned short spinners;	/* count of waiting cpus */ | 
|  | 121 | }; | 
|  | 122 |  | 
|  | 123 | static int xen_spin_is_locked(struct raw_spinlock *lock) | 
|  | 124 | { | 
|  | 125 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 
|  | 126 |  | 
|  | 127 | return xl->lock != 0; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static int xen_spin_is_contended(struct raw_spinlock *lock) | 
|  | 131 | { | 
|  | 132 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 
|  | 133 |  | 
|  | 134 | /* Not strictly true; this is only the count of contended | 
|  | 135 | lock-takers entering the slow path. */ | 
|  | 136 | return xl->spinners != 0; | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | static int xen_spin_trylock(struct raw_spinlock *lock) | 
|  | 140 | { | 
|  | 141 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 
|  | 142 | u8 old = 1; | 
|  | 143 |  | 
|  | 144 | asm("xchgb %b0,%1" | 
|  | 145 | : "+q" (old), "+m" (xl->lock) : : "memory"); | 
|  | 146 |  | 
|  | 147 | return old == 0; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | 
|  | 151 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); | 
|  | 152 |  | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 153 | /* | 
|  | 154 | * Mark a cpu as interested in a lock.  Returns the CPU's previous | 
|  | 155 | * lock of interest, in case we got preempted by an interrupt. | 
|  | 156 | */ | 
|  | 157 | static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 158 | { | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 159 | struct xen_spinlock *prev; | 
|  | 160 |  | 
|  | 161 | prev = __get_cpu_var(lock_spinners); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 162 | __get_cpu_var(lock_spinners) = xl; | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 163 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 164 | wmb();			/* set lock of interest before count */ | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 165 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 166 | asm(LOCK_PREFIX " incw %0" | 
|  | 167 | : "+m" (xl->spinners) : : "memory"); | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 168 |  | 
|  | 169 | return prev; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 170 | } | 
|  | 171 |  | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 172 | /* | 
|  | 173 | * Mark a cpu as no longer interested in a lock.  Restores previous | 
|  | 174 | * lock of interest (NULL for none). | 
|  | 175 | */ | 
|  | 176 | static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 177 | { | 
|  | 178 | asm(LOCK_PREFIX " decw %0" | 
|  | 179 | : "+m" (xl->spinners) : : "memory"); | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 180 | wmb();			/* decrement count before restoring lock */ | 
|  | 181 | __get_cpu_var(lock_spinners) = prev; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 182 | } | 
|  | 183 |  | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 184 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 185 | { | 
|  | 186 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 187 | struct xen_spinlock *prev; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 188 | int irq = __get_cpu_var(lock_kicker_irq); | 
|  | 189 | int ret; | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 190 | unsigned long flags; | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 191 | u64 start; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 192 |  | 
|  | 193 | /* If kicker interrupts not initialized yet, just spin */ | 
|  | 194 | if (irq == -1) | 
|  | 195 | return 0; | 
|  | 196 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 197 | start = spin_time_start(); | 
|  | 198 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 199 | /* announce we're spinning */ | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 200 | prev = spinning_lock(xl); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 201 |  | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 202 | flags = __raw_local_save_flags(); | 
|  | 203 | if (irq_enable) { | 
|  | 204 | ADD_STATS(taken_slow_irqenable, 1); | 
|  | 205 | raw_local_irq_enable(); | 
|  | 206 | } | 
|  | 207 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 208 | ADD_STATS(taken_slow, 1); | 
|  | 209 | ADD_STATS(taken_slow_nested, prev != NULL); | 
|  | 210 |  | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 211 | do { | 
|  | 212 | /* clear pending */ | 
|  | 213 | xen_clear_irq_pending(irq); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 214 |  | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 215 | /* check again make sure it didn't become free while | 
|  | 216 | we weren't looking  */ | 
|  | 217 | ret = xen_spin_trylock(lock); | 
|  | 218 | if (ret) { | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 219 | ADD_STATS(taken_slow_pickup, 1); | 
|  | 220 |  | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 221 | /* | 
|  | 222 | * If we interrupted another spinlock while it | 
|  | 223 | * was blocking, make sure it doesn't block | 
|  | 224 | * without rechecking the lock. | 
|  | 225 | */ | 
|  | 226 | if (prev != NULL) | 
|  | 227 | xen_set_irq_pending(irq); | 
|  | 228 | goto out; | 
|  | 229 | } | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 230 |  | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 231 | /* | 
|  | 232 | * Block until irq becomes pending.  If we're | 
|  | 233 | * interrupted at this point (after the trylock but | 
|  | 234 | * before entering the block), then the nested lock | 
|  | 235 | * handler guarantees that the irq will be left | 
|  | 236 | * pending if there's any chance the lock became free; | 
|  | 237 | * xen_poll_irq() returns immediately if the irq is | 
|  | 238 | * pending. | 
|  | 239 | */ | 
|  | 240 | xen_poll_irq(irq); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 241 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 242 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ | 
|  | 243 |  | 
| Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 244 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 245 |  | 
|  | 246 | out: | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 247 | raw_local_irq_restore(flags); | 
| Jeremy Fitzhardinge | 168d2f4 | 2008-08-20 17:02:18 -0700 | [diff] [blame] | 248 | unspinning_lock(xl, prev); | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 249 | spin_time_accum_blocked(start); | 
|  | 250 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 251 | return ret; | 
|  | 252 | } | 
|  | 253 |  | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 254 | static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 255 | { | 
|  | 256 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 257 | unsigned timeout; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 258 | u8 oldval; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 259 | u64 start_spin; | 
|  | 260 |  | 
|  | 261 | ADD_STATS(taken, 1); | 
|  | 262 |  | 
|  | 263 | start_spin = spin_time_start(); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 264 |  | 
|  | 265 | do { | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 266 | u64 start_spin_fast = spin_time_start(); | 
|  | 267 |  | 
|  | 268 | timeout = TIMEOUT; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 269 |  | 
|  | 270 | asm("1: xchgb %1,%0\n" | 
|  | 271 | "   testb %1,%1\n" | 
|  | 272 | "   jz 3f\n" | 
|  | 273 | "2: rep;nop\n" | 
|  | 274 | "   cmpb $0,%0\n" | 
|  | 275 | "   je 1b\n" | 
|  | 276 | "   dec %2\n" | 
|  | 277 | "   jnz 2b\n" | 
|  | 278 | "3:\n" | 
|  | 279 | : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) | 
|  | 280 | : "1" (1) | 
|  | 281 | : "memory"); | 
|  | 282 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 283 | spin_time_accum_spinning(start_spin_fast); | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 284 |  | 
|  | 285 | } while (unlikely(oldval != 0 && | 
|  | 286 | (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable)))); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 287 |  | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 288 | spin_time_accum_total(start_spin); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 289 | } | 
|  | 290 |  | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 291 | static void xen_spin_lock(struct raw_spinlock *lock) | 
|  | 292 | { | 
|  | 293 | __xen_spin_lock(lock, false); | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | 
|  | 297 | { | 
|  | 298 | __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); | 
|  | 299 | } | 
|  | 300 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 301 | static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | 
|  | 302 | { | 
|  | 303 | int cpu; | 
|  | 304 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 305 | ADD_STATS(released_slow, 1); | 
|  | 306 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 307 | for_each_online_cpu(cpu) { | 
|  | 308 | /* XXX should mix up next cpu selection */ | 
|  | 309 | if (per_cpu(lock_spinners, cpu) == xl) { | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 310 | ADD_STATS(released_slow_kicked, 1); | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 311 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | 
|  | 312 | break; | 
|  | 313 | } | 
|  | 314 | } | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | static void xen_spin_unlock(struct raw_spinlock *lock) | 
|  | 318 | { | 
|  | 319 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 
|  | 320 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 321 | ADD_STATS(released, 1); | 
|  | 322 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 323 | smp_wmb();		/* make sure no writes get moved after unlock */ | 
|  | 324 | xl->lock = 0;		/* release lock */ | 
|  | 325 |  | 
|  | 326 | /* make sure unlock happens before kick */ | 
|  | 327 | barrier(); | 
|  | 328 |  | 
|  | 329 | if (unlikely(xl->spinners)) | 
|  | 330 | xen_spin_unlock_slow(xl); | 
|  | 331 | } | 
|  | 332 |  | 
|  | 333 | static irqreturn_t dummy_handler(int irq, void *dev_id) | 
|  | 334 | { | 
|  | 335 | BUG(); | 
|  | 336 | return IRQ_HANDLED; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | void __cpuinit xen_init_lock_cpu(int cpu) | 
|  | 340 | { | 
|  | 341 | int irq; | 
|  | 342 | const char *name; | 
|  | 343 |  | 
|  | 344 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); | 
|  | 345 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | 
|  | 346 | cpu, | 
|  | 347 | dummy_handler, | 
|  | 348 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | 
|  | 349 | name, | 
|  | 350 | NULL); | 
|  | 351 |  | 
|  | 352 | if (irq >= 0) { | 
|  | 353 | disable_irq(irq); /* make sure it's never delivered */ | 
|  | 354 | per_cpu(lock_kicker_irq, cpu) = irq; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | 
|  | 358 | } | 
|  | 359 |  | 
| Alex Nixon | d68d82a | 2008-08-22 11:52:15 +0100 | [diff] [blame] | 360 | void xen_uninit_lock_cpu(int cpu) | 
|  | 361 | { | 
|  | 362 | unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); | 
|  | 363 | } | 
|  | 364 |  | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 365 | void __init xen_init_spinlocks(void) | 
|  | 366 | { | 
|  | 367 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; | 
|  | 368 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; | 
|  | 369 | pv_lock_ops.spin_lock = xen_spin_lock; | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 370 | pv_lock_ops.spin_lock_flags = xen_spin_lock_flags; | 
| Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 371 | pv_lock_ops.spin_trylock = xen_spin_trylock; | 
|  | 372 | pv_lock_ops.spin_unlock = xen_spin_unlock; | 
|  | 373 | } | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 374 |  | 
|  | 375 | #ifdef CONFIG_XEN_DEBUG_FS | 
|  | 376 |  | 
|  | 377 | static struct dentry *d_spin_debug; | 
|  | 378 |  | 
|  | 379 | static int __init xen_spinlock_debugfs(void) | 
|  | 380 | { | 
|  | 381 | struct dentry *d_xen = xen_init_debugfs(); | 
|  | 382 |  | 
|  | 383 | if (d_xen == NULL) | 
|  | 384 | return -ENOMEM; | 
|  | 385 |  | 
|  | 386 | d_spin_debug = debugfs_create_dir("spinlocks", d_xen); | 
|  | 387 |  | 
|  | 388 | debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); | 
|  | 389 |  | 
|  | 390 | debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout); | 
|  | 391 |  | 
|  | 392 | debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken); | 
|  | 393 | debugfs_create_u32("taken_slow", 0444, d_spin_debug, | 
|  | 394 | &spinlock_stats.taken_slow); | 
|  | 395 | debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug, | 
|  | 396 | &spinlock_stats.taken_slow_nested); | 
|  | 397 | debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, | 
|  | 398 | &spinlock_stats.taken_slow_pickup); | 
|  | 399 | debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, | 
|  | 400 | &spinlock_stats.taken_slow_spurious); | 
| Jeremy Fitzhardinge | 1e696f6 | 2008-08-20 17:02:20 -0700 | [diff] [blame] | 401 | debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug, | 
|  | 402 | &spinlock_stats.taken_slow_irqenable); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 403 |  | 
|  | 404 | debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); | 
|  | 405 | debugfs_create_u32("released_slow", 0444, d_spin_debug, | 
|  | 406 | &spinlock_stats.released_slow); | 
|  | 407 | debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, | 
|  | 408 | &spinlock_stats.released_slow_kicked); | 
|  | 409 |  | 
|  | 410 | debugfs_create_u64("time_spinning", 0444, d_spin_debug, | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 411 | &spinlock_stats.time_spinning); | 
|  | 412 | debugfs_create_u64("time_blocked", 0444, d_spin_debug, | 
|  | 413 | &spinlock_stats.time_blocked); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 414 | debugfs_create_u64("time_total", 0444, d_spin_debug, | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 415 | &spinlock_stats.time_total); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 416 |  | 
|  | 417 | xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 418 | spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 419 | xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, | 
| Jeremy Fitzhardinge | f8eca41 | 2008-08-20 17:02:21 -0700 | [diff] [blame] | 420 | spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); | 
|  | 421 | xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, | 
|  | 422 | spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 423 |  | 
|  | 424 | return 0; | 
|  | 425 | } | 
|  | 426 | fs_initcall(xen_spinlock_debugfs); | 
|  | 427 |  | 
|  | 428 | #endif	/* CONFIG_XEN_DEBUG_FS */ |