| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * kernel/lockdep.c | 
|  | 3 | * | 
|  | 4 | * Runtime locking correctness validator | 
|  | 5 | * | 
|  | 6 | * Started by Ingo Molnar: | 
|  | 7 | * | 
|  | 8 | *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 9 | * | 
|  | 10 | * this code maps all the lock dependencies as they occur in a live kernel | 
|  | 11 | * and will warn about the following classes of locking bugs: | 
|  | 12 | * | 
|  | 13 | * - lock inversion scenarios | 
|  | 14 | * - circular lock dependencies | 
|  | 15 | * - hardirq/softirq safe/unsafe locking bugs | 
|  | 16 | * | 
|  | 17 | * Bugs are reported even if the current locking scenario does not cause | 
|  | 18 | * any deadlock at this point. | 
|  | 19 | * | 
|  | 20 | * I.e. if anytime in the past two locks were taken in a different order, | 
|  | 21 | * even if it happened for another task, even if those were different | 
|  | 22 | * locks (but of the same class as this lock), this code will detect it. | 
|  | 23 | * | 
|  | 24 | * Thanks to Arjan van de Ven for coming up with the initial idea of | 
|  | 25 | * mapping lock dependencies runtime. | 
|  | 26 | */ | 
|  | 27 | #include <linux/mutex.h> | 
|  | 28 | #include <linux/sched.h> | 
|  | 29 | #include <linux/delay.h> | 
|  | 30 | #include <linux/module.h> | 
|  | 31 | #include <linux/proc_fs.h> | 
|  | 32 | #include <linux/seq_file.h> | 
|  | 33 | #include <linux/spinlock.h> | 
|  | 34 | #include <linux/kallsyms.h> | 
|  | 35 | #include <linux/interrupt.h> | 
|  | 36 | #include <linux/stacktrace.h> | 
|  | 37 | #include <linux/debug_locks.h> | 
|  | 38 | #include <linux/irqflags.h> | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 39 | #include <linux/utsname.h> | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 40 |  | 
|  | 41 | #include <asm/sections.h> | 
|  | 42 |  | 
|  | 43 | #include "lockdep_internals.h" | 
|  | 44 |  | 
|  | 45 | /* | 
|  | 46 | * hash_lock: protects the lockdep hashes and class/list/hash allocators. | 
|  | 47 | * | 
|  | 48 | * This is one of the rare exceptions where it's justified | 
|  | 49 | * to use a raw spinlock - we really dont want the spinlock | 
|  | 50 | * code to recurse back into the lockdep code. | 
|  | 51 | */ | 
|  | 52 | static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 
|  | 53 |  | 
|  | 54 | static int lockdep_initialized; | 
|  | 55 |  | 
|  | 56 | unsigned long nr_list_entries; | 
|  | 57 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | 
|  | 58 |  | 
|  | 59 | /* | 
|  | 60 | * Allocate a lockdep entry. (assumes hash_lock held, returns | 
|  | 61 | * with NULL on failure) | 
|  | 62 | */ | 
|  | 63 | static struct lock_list *alloc_list_entry(void) | 
|  | 64 | { | 
|  | 65 | if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { | 
|  | 66 | __raw_spin_unlock(&hash_lock); | 
|  | 67 | debug_locks_off(); | 
|  | 68 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); | 
|  | 69 | printk("turning off the locking correctness validator.\n"); | 
|  | 70 | return NULL; | 
|  | 71 | } | 
|  | 72 | return list_entries + nr_list_entries++; | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * All data structures here are protected by the global debug_lock. | 
|  | 77 | * | 
|  | 78 | * Mutex key structs only get allocated, once during bootup, and never | 
|  | 79 | * get freed - this significantly simplifies the debugging code. | 
|  | 80 | */ | 
|  | 81 | unsigned long nr_lock_classes; | 
|  | 82 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * We keep a global list of all lock classes. The list only grows, | 
|  | 86 | * never shrinks. The list is only accessed with the lockdep | 
|  | 87 | * spinlock lock held. | 
|  | 88 | */ | 
|  | 89 | LIST_HEAD(all_lock_classes); | 
|  | 90 |  | 
|  | 91 | /* | 
|  | 92 | * The lockdep classes are in a hash-table as well, for fast lookup: | 
|  | 93 | */ | 
|  | 94 | #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1) | 
|  | 95 | #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS) | 
|  | 96 | #define CLASSHASH_MASK		(CLASSHASH_SIZE - 1) | 
|  | 97 | #define __classhashfn(key)	((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK) | 
|  | 98 | #define classhashentry(key)	(classhash_table + __classhashfn((key))) | 
|  | 99 |  | 
|  | 100 | static struct list_head classhash_table[CLASSHASH_SIZE]; | 
|  | 101 |  | 
|  | 102 | unsigned long nr_lock_chains; | 
|  | 103 | static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; | 
|  | 104 |  | 
|  | 105 | /* | 
|  | 106 | * We put the lock dependency chains into a hash-table as well, to cache | 
|  | 107 | * their existence: | 
|  | 108 | */ | 
|  | 109 | #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1) | 
|  | 110 | #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS) | 
|  | 111 | #define CHAINHASH_MASK		(CHAINHASH_SIZE - 1) | 
|  | 112 | #define __chainhashfn(chain) \ | 
|  | 113 | (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK) | 
|  | 114 | #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain))) | 
|  | 115 |  | 
|  | 116 | static struct list_head chainhash_table[CHAINHASH_SIZE]; | 
|  | 117 |  | 
|  | 118 | /* | 
|  | 119 | * The hash key of the lock dependency chains is a hash itself too: | 
|  | 120 | * it's a hash of all locks taken up to that lock, including that lock. | 
|  | 121 | * It's a 64-bit hash, because it's important for the keys to be | 
|  | 122 | * unique. | 
|  | 123 | */ | 
|  | 124 | #define iterate_chain_key(key1, key2) \ | 
| Ingo Molnar | 03cbc35 | 2006-09-29 02:01:46 -0700 | [diff] [blame] | 125 | (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ | 
|  | 126 | ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 127 | (key2)) | 
|  | 128 |  | 
|  | 129 | void lockdep_off(void) | 
|  | 130 | { | 
|  | 131 | current->lockdep_recursion++; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | EXPORT_SYMBOL(lockdep_off); | 
|  | 135 |  | 
|  | 136 | void lockdep_on(void) | 
|  | 137 | { | 
|  | 138 | current->lockdep_recursion--; | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 | EXPORT_SYMBOL(lockdep_on); | 
|  | 142 |  | 
|  | 143 | int lockdep_internal(void) | 
|  | 144 | { | 
|  | 145 | return current->lockdep_recursion != 0; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | EXPORT_SYMBOL(lockdep_internal); | 
|  | 149 |  | 
|  | 150 | /* | 
|  | 151 | * Debugging switches: | 
|  | 152 | */ | 
|  | 153 |  | 
|  | 154 | #define VERBOSE			0 | 
|  | 155 | #ifdef VERBOSE | 
|  | 156 | # define VERY_VERBOSE		0 | 
|  | 157 | #endif | 
|  | 158 |  | 
|  | 159 | #if VERBOSE | 
|  | 160 | # define HARDIRQ_VERBOSE	1 | 
|  | 161 | # define SOFTIRQ_VERBOSE	1 | 
|  | 162 | #else | 
|  | 163 | # define HARDIRQ_VERBOSE	0 | 
|  | 164 | # define SOFTIRQ_VERBOSE	0 | 
|  | 165 | #endif | 
|  | 166 |  | 
|  | 167 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | 
|  | 168 | /* | 
|  | 169 | * Quick filtering for interesting events: | 
|  | 170 | */ | 
|  | 171 | static int class_filter(struct lock_class *class) | 
|  | 172 | { | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 173 | #if 0 | 
|  | 174 | /* Example */ | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 175 | if (class->name_version == 1 && | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 176 | !strcmp(class->name, "lockname")) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 177 | return 1; | 
|  | 178 | if (class->name_version == 1 && | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 179 | !strcmp(class->name, "&struct->lockfield")) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 180 | return 1; | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 181 | #endif | 
|  | 182 | /* Allow everything else. 0 would be filter everything else */ | 
|  | 183 | return 1; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 184 | } | 
|  | 185 | #endif | 
|  | 186 |  | 
|  | 187 | static int verbose(struct lock_class *class) | 
|  | 188 | { | 
|  | 189 | #if VERBOSE | 
|  | 190 | return class_filter(class); | 
|  | 191 | #endif | 
|  | 192 | return 0; | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 196 |  | 
|  | 197 | static int hardirq_verbose(struct lock_class *class) | 
|  | 198 | { | 
|  | 199 | #if HARDIRQ_VERBOSE | 
|  | 200 | return class_filter(class); | 
|  | 201 | #endif | 
|  | 202 | return 0; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | static int softirq_verbose(struct lock_class *class) | 
|  | 206 | { | 
|  | 207 | #if SOFTIRQ_VERBOSE | 
|  | 208 | return class_filter(class); | 
|  | 209 | #endif | 
|  | 210 | return 0; | 
|  | 211 | } | 
|  | 212 |  | 
|  | 213 | #endif | 
|  | 214 |  | 
|  | 215 | /* | 
|  | 216 | * Stack-trace: tightly packed array of stack backtrace | 
|  | 217 | * addresses. Protected by the hash_lock. | 
|  | 218 | */ | 
|  | 219 | unsigned long nr_stack_trace_entries; | 
|  | 220 | static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; | 
|  | 221 |  | 
|  | 222 | static int save_trace(struct stack_trace *trace) | 
|  | 223 | { | 
|  | 224 | trace->nr_entries = 0; | 
|  | 225 | trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; | 
|  | 226 | trace->entries = stack_trace + nr_stack_trace_entries; | 
|  | 227 |  | 
| Andi Kleen | 5a1b399 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 228 | trace->skip = 3; | 
|  | 229 | trace->all_contexts = 0; | 
|  | 230 |  | 
| Andi Kleen | 3fa7c79 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 231 | /* Make sure to not recurse in case the the unwinder needs to tak | 
|  | 232 | e	   locks. */ | 
|  | 233 | lockdep_off(); | 
| Andi Kleen | 5a1b399 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 234 | save_stack_trace(trace, NULL); | 
| Andi Kleen | 3fa7c79 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 235 | lockdep_on(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 236 |  | 
|  | 237 | trace->max_entries = trace->nr_entries; | 
|  | 238 |  | 
|  | 239 | nr_stack_trace_entries += trace->nr_entries; | 
|  | 240 | if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) | 
|  | 241 | return 0; | 
|  | 242 |  | 
|  | 243 | if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { | 
|  | 244 | __raw_spin_unlock(&hash_lock); | 
|  | 245 | if (debug_locks_off()) { | 
|  | 246 | printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); | 
|  | 247 | printk("turning off the locking correctness validator.\n"); | 
|  | 248 | dump_stack(); | 
|  | 249 | } | 
|  | 250 | return 0; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | return 1; | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | unsigned int nr_hardirq_chains; | 
|  | 257 | unsigned int nr_softirq_chains; | 
|  | 258 | unsigned int nr_process_chains; | 
|  | 259 | unsigned int max_lockdep_depth; | 
|  | 260 | unsigned int max_recursion_depth; | 
|  | 261 |  | 
|  | 262 | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | 263 | /* | 
|  | 264 | * We cannot printk in early bootup code. Not even early_printk() | 
|  | 265 | * might work. So we mark any initialization errors and printk | 
|  | 266 | * about it later on, in lockdep_info(). | 
|  | 267 | */ | 
|  | 268 | static int lockdep_init_error; | 
|  | 269 |  | 
|  | 270 | /* | 
|  | 271 | * Various lockdep statistics: | 
|  | 272 | */ | 
|  | 273 | atomic_t chain_lookup_hits; | 
|  | 274 | atomic_t chain_lookup_misses; | 
|  | 275 | atomic_t hardirqs_on_events; | 
|  | 276 | atomic_t hardirqs_off_events; | 
|  | 277 | atomic_t redundant_hardirqs_on; | 
|  | 278 | atomic_t redundant_hardirqs_off; | 
|  | 279 | atomic_t softirqs_on_events; | 
|  | 280 | atomic_t softirqs_off_events; | 
|  | 281 | atomic_t redundant_softirqs_on; | 
|  | 282 | atomic_t redundant_softirqs_off; | 
|  | 283 | atomic_t nr_unused_locks; | 
|  | 284 | atomic_t nr_cyclic_checks; | 
|  | 285 | atomic_t nr_cyclic_check_recursions; | 
|  | 286 | atomic_t nr_find_usage_forwards_checks; | 
|  | 287 | atomic_t nr_find_usage_forwards_recursions; | 
|  | 288 | atomic_t nr_find_usage_backwards_checks; | 
|  | 289 | atomic_t nr_find_usage_backwards_recursions; | 
|  | 290 | # define debug_atomic_inc(ptr)		atomic_inc(ptr) | 
|  | 291 | # define debug_atomic_dec(ptr)		atomic_dec(ptr) | 
|  | 292 | # define debug_atomic_read(ptr)		atomic_read(ptr) | 
|  | 293 | #else | 
|  | 294 | # define debug_atomic_inc(ptr)		do { } while (0) | 
|  | 295 | # define debug_atomic_dec(ptr)		do { } while (0) | 
|  | 296 | # define debug_atomic_read(ptr)		0 | 
|  | 297 | #endif | 
|  | 298 |  | 
|  | 299 | /* | 
|  | 300 | * Locking printouts: | 
|  | 301 | */ | 
|  | 302 |  | 
|  | 303 | static const char *usage_str[] = | 
|  | 304 | { | 
|  | 305 | [LOCK_USED] =			"initial-use ", | 
|  | 306 | [LOCK_USED_IN_HARDIRQ] =	"in-hardirq-W", | 
|  | 307 | [LOCK_USED_IN_SOFTIRQ] =	"in-softirq-W", | 
|  | 308 | [LOCK_ENABLED_SOFTIRQS] =	"softirq-on-W", | 
|  | 309 | [LOCK_ENABLED_HARDIRQS] =	"hardirq-on-W", | 
|  | 310 | [LOCK_USED_IN_HARDIRQ_READ] =	"in-hardirq-R", | 
|  | 311 | [LOCK_USED_IN_SOFTIRQ_READ] =	"in-softirq-R", | 
|  | 312 | [LOCK_ENABLED_SOFTIRQS_READ] =	"softirq-on-R", | 
|  | 313 | [LOCK_ENABLED_HARDIRQS_READ] =	"hardirq-on-R", | 
|  | 314 | }; | 
|  | 315 |  | 
|  | 316 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | 
|  | 317 | { | 
|  | 318 | unsigned long offs, size; | 
|  | 319 | char *modname; | 
|  | 320 |  | 
|  | 321 | return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str); | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | void | 
|  | 325 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | 
|  | 326 | { | 
|  | 327 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | 
|  | 328 |  | 
|  | 329 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | 
|  | 330 | *c1 = '+'; | 
|  | 331 | else | 
|  | 332 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | 
|  | 333 | *c1 = '-'; | 
|  | 334 |  | 
|  | 335 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 
|  | 336 | *c2 = '+'; | 
|  | 337 | else | 
|  | 338 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | 
|  | 339 | *c2 = '-'; | 
|  | 340 |  | 
|  | 341 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 
|  | 342 | *c3 = '-'; | 
|  | 343 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { | 
|  | 344 | *c3 = '+'; | 
|  | 345 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 
|  | 346 | *c3 = '?'; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 
|  | 350 | *c4 = '-'; | 
|  | 351 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { | 
|  | 352 | *c4 = '+'; | 
|  | 353 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 
|  | 354 | *c4 = '?'; | 
|  | 355 | } | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | static void print_lock_name(struct lock_class *class) | 
|  | 359 | { | 
|  | 360 | char str[128], c1, c2, c3, c4; | 
|  | 361 | const char *name; | 
|  | 362 |  | 
|  | 363 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 
|  | 364 |  | 
|  | 365 | name = class->name; | 
|  | 366 | if (!name) { | 
|  | 367 | name = __get_key_name(class->key, str); | 
|  | 368 | printk(" (%s", name); | 
|  | 369 | } else { | 
|  | 370 | printk(" (%s", name); | 
|  | 371 | if (class->name_version > 1) | 
|  | 372 | printk("#%d", class->name_version); | 
|  | 373 | if (class->subclass) | 
|  | 374 | printk("/%d", class->subclass); | 
|  | 375 | } | 
|  | 376 | printk("){%c%c%c%c}", c1, c2, c3, c4); | 
|  | 377 | } | 
|  | 378 |  | 
|  | 379 | static void print_lockdep_cache(struct lockdep_map *lock) | 
|  | 380 | { | 
|  | 381 | const char *name; | 
|  | 382 | char str[128]; | 
|  | 383 |  | 
|  | 384 | name = lock->name; | 
|  | 385 | if (!name) | 
|  | 386 | name = __get_key_name(lock->key->subkeys, str); | 
|  | 387 |  | 
|  | 388 | printk("%s", name); | 
|  | 389 | } | 
|  | 390 |  | 
|  | 391 | static void print_lock(struct held_lock *hlock) | 
|  | 392 | { | 
|  | 393 | print_lock_name(hlock->class); | 
|  | 394 | printk(", at: "); | 
|  | 395 | print_ip_sym(hlock->acquire_ip); | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | static void lockdep_print_held_locks(struct task_struct *curr) | 
|  | 399 | { | 
|  | 400 | int i, depth = curr->lockdep_depth; | 
|  | 401 |  | 
|  | 402 | if (!depth) { | 
|  | 403 | printk("no locks held by %s/%d.\n", curr->comm, curr->pid); | 
|  | 404 | return; | 
|  | 405 | } | 
|  | 406 | printk("%d lock%s held by %s/%d:\n", | 
|  | 407 | depth, depth > 1 ? "s" : "", curr->comm, curr->pid); | 
|  | 408 |  | 
|  | 409 | for (i = 0; i < depth; i++) { | 
|  | 410 | printk(" #%d: ", i); | 
|  | 411 | print_lock(curr->held_locks + i); | 
|  | 412 | } | 
|  | 413 | } | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 414 |  | 
|  | 415 | static void print_lock_class_header(struct lock_class *class, int depth) | 
|  | 416 | { | 
|  | 417 | int bit; | 
|  | 418 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 419 | printk("%*s->", depth, ""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 420 | print_lock_name(class); | 
|  | 421 | printk(" ops: %lu", class->ops); | 
|  | 422 | printk(" {\n"); | 
|  | 423 |  | 
|  | 424 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { | 
|  | 425 | if (class->usage_mask & (1 << bit)) { | 
|  | 426 | int len = depth; | 
|  | 427 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 428 | len += printk("%*s   %s", depth, "", usage_str[bit]); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 429 | len += printk(" at:\n"); | 
|  | 430 | print_stack_trace(class->usage_traces + bit, len); | 
|  | 431 | } | 
|  | 432 | } | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 433 | printk("%*s }\n", depth, ""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 434 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 435 | printk("%*s ... key      at: ",depth,""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 436 | print_ip_sym((unsigned long)class->key); | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 | /* | 
|  | 440 | * printk all lock dependencies starting at <entry>: | 
|  | 441 | */ | 
|  | 442 | static void print_lock_dependencies(struct lock_class *class, int depth) | 
|  | 443 | { | 
|  | 444 | struct lock_list *entry; | 
|  | 445 |  | 
|  | 446 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | 
|  | 447 | return; | 
|  | 448 |  | 
|  | 449 | print_lock_class_header(class, depth); | 
|  | 450 |  | 
|  | 451 | list_for_each_entry(entry, &class->locks_after, entry) { | 
|  | 452 | DEBUG_LOCKS_WARN_ON(!entry->class); | 
|  | 453 | print_lock_dependencies(entry->class, depth + 1); | 
|  | 454 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 455 | printk("%*s ... acquired at:\n",depth,""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 456 | print_stack_trace(&entry->trace, 2); | 
|  | 457 | printk("\n"); | 
|  | 458 | } | 
|  | 459 | } | 
|  | 460 |  | 
|  | 461 | /* | 
|  | 462 | * Add a new dependency to the head of the list: | 
|  | 463 | */ | 
|  | 464 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | 
|  | 465 | struct list_head *head, unsigned long ip) | 
|  | 466 | { | 
|  | 467 | struct lock_list *entry; | 
|  | 468 | /* | 
|  | 469 | * Lock not present yet - get a new dependency struct and | 
|  | 470 | * add it to the list: | 
|  | 471 | */ | 
|  | 472 | entry = alloc_list_entry(); | 
|  | 473 | if (!entry) | 
|  | 474 | return 0; | 
|  | 475 |  | 
|  | 476 | entry->class = this; | 
|  | 477 | save_trace(&entry->trace); | 
|  | 478 |  | 
|  | 479 | /* | 
|  | 480 | * Since we never remove from the dependency list, the list can | 
|  | 481 | * be walked lockless by other CPUs, it's only allocation | 
|  | 482 | * that must be protected by the spinlock. But this also means | 
|  | 483 | * we must make new entries visible only once writes to the | 
|  | 484 | * entry become visible - hence the RCU op: | 
|  | 485 | */ | 
|  | 486 | list_add_tail_rcu(&entry->entry, head); | 
|  | 487 |  | 
|  | 488 | return 1; | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | /* | 
|  | 492 | * Recursive, forwards-direction lock-dependency checking, used for | 
|  | 493 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe | 
|  | 494 | * checking. | 
|  | 495 | * | 
|  | 496 | * (to keep the stackframe of the recursive functions small we | 
|  | 497 | *  use these global variables, and we also mark various helper | 
|  | 498 | *  functions as noinline.) | 
|  | 499 | */ | 
|  | 500 | static struct held_lock *check_source, *check_target; | 
|  | 501 |  | 
|  | 502 | /* | 
|  | 503 | * Print a dependency chain entry (this is only done when a deadlock | 
|  | 504 | * has been detected): | 
|  | 505 | */ | 
|  | 506 | static noinline int | 
|  | 507 | print_circular_bug_entry(struct lock_list *target, unsigned int depth) | 
|  | 508 | { | 
|  | 509 | if (debug_locks_silent) | 
|  | 510 | return 0; | 
|  | 511 | printk("\n-> #%u", depth); | 
|  | 512 | print_lock_name(target->class); | 
|  | 513 | printk(":\n"); | 
|  | 514 | print_stack_trace(&target->trace, 6); | 
|  | 515 |  | 
|  | 516 | return 0; | 
|  | 517 | } | 
|  | 518 |  | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 519 | static void print_kernel_version(void) | 
|  | 520 | { | 
|  | 521 | printk("%s %.*s\n", system_utsname.release, | 
|  | 522 | (int)strcspn(system_utsname.version, " "), | 
|  | 523 | system_utsname.version); | 
|  | 524 | } | 
|  | 525 |  | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 526 | /* | 
|  | 527 | * When a circular dependency is detected, print the | 
|  | 528 | * header first: | 
|  | 529 | */ | 
|  | 530 | static noinline int | 
|  | 531 | print_circular_bug_header(struct lock_list *entry, unsigned int depth) | 
|  | 532 | { | 
|  | 533 | struct task_struct *curr = current; | 
|  | 534 |  | 
|  | 535 | __raw_spin_unlock(&hash_lock); | 
|  | 536 | debug_locks_off(); | 
|  | 537 | if (debug_locks_silent) | 
|  | 538 | return 0; | 
|  | 539 |  | 
|  | 540 | printk("\n=======================================================\n"); | 
|  | 541 | printk(  "[ INFO: possible circular locking dependency detected ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 542 | print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 543 | printk(  "-------------------------------------------------------\n"); | 
|  | 544 | printk("%s/%d is trying to acquire lock:\n", | 
|  | 545 | curr->comm, curr->pid); | 
|  | 546 | print_lock(check_source); | 
|  | 547 | printk("\nbut task is already holding lock:\n"); | 
|  | 548 | print_lock(check_target); | 
|  | 549 | printk("\nwhich lock already depends on the new lock.\n\n"); | 
|  | 550 | printk("\nthe existing dependency chain (in reverse order) is:\n"); | 
|  | 551 |  | 
|  | 552 | print_circular_bug_entry(entry, depth); | 
|  | 553 |  | 
|  | 554 | return 0; | 
|  | 555 | } | 
|  | 556 |  | 
|  | 557 | static noinline int print_circular_bug_tail(void) | 
|  | 558 | { | 
|  | 559 | struct task_struct *curr = current; | 
|  | 560 | struct lock_list this; | 
|  | 561 |  | 
|  | 562 | if (debug_locks_silent) | 
|  | 563 | return 0; | 
|  | 564 |  | 
|  | 565 | this.class = check_source->class; | 
|  | 566 | save_trace(&this.trace); | 
|  | 567 | print_circular_bug_entry(&this, 0); | 
|  | 568 |  | 
|  | 569 | printk("\nother info that might help us debug this:\n\n"); | 
|  | 570 | lockdep_print_held_locks(curr); | 
|  | 571 |  | 
|  | 572 | printk("\nstack backtrace:\n"); | 
|  | 573 | dump_stack(); | 
|  | 574 |  | 
|  | 575 | return 0; | 
|  | 576 | } | 
|  | 577 |  | 
|  | 578 | static int noinline print_infinite_recursion_bug(void) | 
|  | 579 | { | 
|  | 580 | __raw_spin_unlock(&hash_lock); | 
|  | 581 | DEBUG_LOCKS_WARN_ON(1); | 
|  | 582 |  | 
|  | 583 | return 0; | 
|  | 584 | } | 
|  | 585 |  | 
|  | 586 | /* | 
|  | 587 | * Prove that the dependency graph starting at <entry> can not | 
|  | 588 | * lead to <target>. Print an error and return 0 if it does. | 
|  | 589 | */ | 
|  | 590 | static noinline int | 
|  | 591 | check_noncircular(struct lock_class *source, unsigned int depth) | 
|  | 592 | { | 
|  | 593 | struct lock_list *entry; | 
|  | 594 |  | 
|  | 595 | debug_atomic_inc(&nr_cyclic_check_recursions); | 
|  | 596 | if (depth > max_recursion_depth) | 
|  | 597 | max_recursion_depth = depth; | 
|  | 598 | if (depth >= 20) | 
|  | 599 | return print_infinite_recursion_bug(); | 
|  | 600 | /* | 
|  | 601 | * Check this lock's dependency list: | 
|  | 602 | */ | 
|  | 603 | list_for_each_entry(entry, &source->locks_after, entry) { | 
|  | 604 | if (entry->class == check_target->class) | 
|  | 605 | return print_circular_bug_header(entry, depth+1); | 
|  | 606 | debug_atomic_inc(&nr_cyclic_checks); | 
|  | 607 | if (!check_noncircular(entry->class, depth+1)) | 
|  | 608 | return print_circular_bug_entry(entry, depth+1); | 
|  | 609 | } | 
|  | 610 | return 1; | 
|  | 611 | } | 
|  | 612 |  | 
|  | 613 | static int very_verbose(struct lock_class *class) | 
|  | 614 | { | 
|  | 615 | #if VERY_VERBOSE | 
|  | 616 | return class_filter(class); | 
|  | 617 | #endif | 
|  | 618 | return 0; | 
|  | 619 | } | 
|  | 620 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 621 |  | 
|  | 622 | /* | 
|  | 623 | * Forwards and backwards subgraph searching, for the purposes of | 
|  | 624 | * proving that two subgraphs can be connected by a new dependency | 
|  | 625 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. | 
|  | 626 | */ | 
|  | 627 | static enum lock_usage_bit find_usage_bit; | 
|  | 628 | static struct lock_class *forwards_match, *backwards_match; | 
|  | 629 |  | 
|  | 630 | /* | 
|  | 631 | * Find a node in the forwards-direction dependency sub-graph starting | 
|  | 632 | * at <source> that matches <find_usage_bit>. | 
|  | 633 | * | 
|  | 634 | * Return 2 if such a node exists in the subgraph, and put that node | 
|  | 635 | * into <forwards_match>. | 
|  | 636 | * | 
|  | 637 | * Return 1 otherwise and keep <forwards_match> unchanged. | 
|  | 638 | * Return 0 on error. | 
|  | 639 | */ | 
|  | 640 | static noinline int | 
|  | 641 | find_usage_forwards(struct lock_class *source, unsigned int depth) | 
|  | 642 | { | 
|  | 643 | struct lock_list *entry; | 
|  | 644 | int ret; | 
|  | 645 |  | 
|  | 646 | if (depth > max_recursion_depth) | 
|  | 647 | max_recursion_depth = depth; | 
|  | 648 | if (depth >= 20) | 
|  | 649 | return print_infinite_recursion_bug(); | 
|  | 650 |  | 
|  | 651 | debug_atomic_inc(&nr_find_usage_forwards_checks); | 
|  | 652 | if (source->usage_mask & (1 << find_usage_bit)) { | 
|  | 653 | forwards_match = source; | 
|  | 654 | return 2; | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | /* | 
|  | 658 | * Check this lock's dependency list: | 
|  | 659 | */ | 
|  | 660 | list_for_each_entry(entry, &source->locks_after, entry) { | 
|  | 661 | debug_atomic_inc(&nr_find_usage_forwards_recursions); | 
|  | 662 | ret = find_usage_forwards(entry->class, depth+1); | 
|  | 663 | if (ret == 2 || ret == 0) | 
|  | 664 | return ret; | 
|  | 665 | } | 
|  | 666 | return 1; | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | /* | 
|  | 670 | * Find a node in the backwards-direction dependency sub-graph starting | 
|  | 671 | * at <source> that matches <find_usage_bit>. | 
|  | 672 | * | 
|  | 673 | * Return 2 if such a node exists in the subgraph, and put that node | 
|  | 674 | * into <backwards_match>. | 
|  | 675 | * | 
|  | 676 | * Return 1 otherwise and keep <backwards_match> unchanged. | 
|  | 677 | * Return 0 on error. | 
|  | 678 | */ | 
|  | 679 | static noinline int | 
|  | 680 | find_usage_backwards(struct lock_class *source, unsigned int depth) | 
|  | 681 | { | 
|  | 682 | struct lock_list *entry; | 
|  | 683 | int ret; | 
|  | 684 |  | 
|  | 685 | if (depth > max_recursion_depth) | 
|  | 686 | max_recursion_depth = depth; | 
|  | 687 | if (depth >= 20) | 
|  | 688 | return print_infinite_recursion_bug(); | 
|  | 689 |  | 
|  | 690 | debug_atomic_inc(&nr_find_usage_backwards_checks); | 
|  | 691 | if (source->usage_mask & (1 << find_usage_bit)) { | 
|  | 692 | backwards_match = source; | 
|  | 693 | return 2; | 
|  | 694 | } | 
|  | 695 |  | 
|  | 696 | /* | 
|  | 697 | * Check this lock's dependency list: | 
|  | 698 | */ | 
|  | 699 | list_for_each_entry(entry, &source->locks_before, entry) { | 
|  | 700 | debug_atomic_inc(&nr_find_usage_backwards_recursions); | 
|  | 701 | ret = find_usage_backwards(entry->class, depth+1); | 
|  | 702 | if (ret == 2 || ret == 0) | 
|  | 703 | return ret; | 
|  | 704 | } | 
|  | 705 | return 1; | 
|  | 706 | } | 
|  | 707 |  | 
|  | 708 | static int | 
|  | 709 | print_bad_irq_dependency(struct task_struct *curr, | 
|  | 710 | struct held_lock *prev, | 
|  | 711 | struct held_lock *next, | 
|  | 712 | enum lock_usage_bit bit1, | 
|  | 713 | enum lock_usage_bit bit2, | 
|  | 714 | const char *irqclass) | 
|  | 715 | { | 
|  | 716 | __raw_spin_unlock(&hash_lock); | 
|  | 717 | debug_locks_off(); | 
|  | 718 | if (debug_locks_silent) | 
|  | 719 | return 0; | 
|  | 720 |  | 
|  | 721 | printk("\n======================================================\n"); | 
|  | 722 | printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", | 
|  | 723 | irqclass, irqclass); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 724 | print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 725 | printk(  "------------------------------------------------------\n"); | 
|  | 726 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", | 
|  | 727 | curr->comm, curr->pid, | 
|  | 728 | curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, | 
|  | 729 | curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, | 
|  | 730 | curr->hardirqs_enabled, | 
|  | 731 | curr->softirqs_enabled); | 
|  | 732 | print_lock(next); | 
|  | 733 |  | 
|  | 734 | printk("\nand this task is already holding:\n"); | 
|  | 735 | print_lock(prev); | 
|  | 736 | printk("which would create a new lock dependency:\n"); | 
|  | 737 | print_lock_name(prev->class); | 
|  | 738 | printk(" ->"); | 
|  | 739 | print_lock_name(next->class); | 
|  | 740 | printk("\n"); | 
|  | 741 |  | 
|  | 742 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 
|  | 743 | irqclass); | 
|  | 744 | print_lock_name(backwards_match); | 
|  | 745 | printk("\n... which became %s-irq-safe at:\n", irqclass); | 
|  | 746 |  | 
|  | 747 | print_stack_trace(backwards_match->usage_traces + bit1, 1); | 
|  | 748 |  | 
|  | 749 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); | 
|  | 750 | print_lock_name(forwards_match); | 
|  | 751 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); | 
|  | 752 | printk("..."); | 
|  | 753 |  | 
|  | 754 | print_stack_trace(forwards_match->usage_traces + bit2, 1); | 
|  | 755 |  | 
|  | 756 | printk("\nother info that might help us debug this:\n\n"); | 
|  | 757 | lockdep_print_held_locks(curr); | 
|  | 758 |  | 
|  | 759 | printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); | 
|  | 760 | print_lock_dependencies(backwards_match, 0); | 
|  | 761 |  | 
|  | 762 | printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); | 
|  | 763 | print_lock_dependencies(forwards_match, 0); | 
|  | 764 |  | 
|  | 765 | printk("\nstack backtrace:\n"); | 
|  | 766 | dump_stack(); | 
|  | 767 |  | 
|  | 768 | return 0; | 
|  | 769 | } | 
|  | 770 |  | 
|  | 771 | static int | 
|  | 772 | check_usage(struct task_struct *curr, struct held_lock *prev, | 
|  | 773 | struct held_lock *next, enum lock_usage_bit bit_backwards, | 
|  | 774 | enum lock_usage_bit bit_forwards, const char *irqclass) | 
|  | 775 | { | 
|  | 776 | int ret; | 
|  | 777 |  | 
|  | 778 | find_usage_bit = bit_backwards; | 
|  | 779 | /* fills in <backwards_match> */ | 
|  | 780 | ret = find_usage_backwards(prev->class, 0); | 
|  | 781 | if (!ret || ret == 1) | 
|  | 782 | return ret; | 
|  | 783 |  | 
|  | 784 | find_usage_bit = bit_forwards; | 
|  | 785 | ret = find_usage_forwards(next->class, 0); | 
|  | 786 | if (!ret || ret == 1) | 
|  | 787 | return ret; | 
|  | 788 | /* ret == 2 */ | 
|  | 789 | return print_bad_irq_dependency(curr, prev, next, | 
|  | 790 | bit_backwards, bit_forwards, irqclass); | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | #endif | 
|  | 794 |  | 
|  | 795 | static int | 
|  | 796 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | 
|  | 797 | struct held_lock *next) | 
|  | 798 | { | 
|  | 799 | debug_locks_off(); | 
|  | 800 | __raw_spin_unlock(&hash_lock); | 
|  | 801 | if (debug_locks_silent) | 
|  | 802 | return 0; | 
|  | 803 |  | 
|  | 804 | printk("\n=============================================\n"); | 
|  | 805 | printk(  "[ INFO: possible recursive locking detected ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 806 | print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 807 | printk(  "---------------------------------------------\n"); | 
|  | 808 | printk("%s/%d is trying to acquire lock:\n", | 
|  | 809 | curr->comm, curr->pid); | 
|  | 810 | print_lock(next); | 
|  | 811 | printk("\nbut task is already holding lock:\n"); | 
|  | 812 | print_lock(prev); | 
|  | 813 |  | 
|  | 814 | printk("\nother info that might help us debug this:\n"); | 
|  | 815 | lockdep_print_held_locks(curr); | 
|  | 816 |  | 
|  | 817 | printk("\nstack backtrace:\n"); | 
|  | 818 | dump_stack(); | 
|  | 819 |  | 
|  | 820 | return 0; | 
|  | 821 | } | 
|  | 822 |  | 
|  | 823 | /* | 
|  | 824 | * Check whether we are holding such a class already. | 
|  | 825 | * | 
|  | 826 | * (Note that this has to be done separately, because the graph cannot | 
|  | 827 | * detect such classes of deadlocks.) | 
|  | 828 | * | 
|  | 829 | * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read | 
|  | 830 | */ | 
|  | 831 | static int | 
|  | 832 | check_deadlock(struct task_struct *curr, struct held_lock *next, | 
|  | 833 | struct lockdep_map *next_instance, int read) | 
|  | 834 | { | 
|  | 835 | struct held_lock *prev; | 
|  | 836 | int i; | 
|  | 837 |  | 
|  | 838 | for (i = 0; i < curr->lockdep_depth; i++) { | 
|  | 839 | prev = curr->held_locks + i; | 
|  | 840 | if (prev->class != next->class) | 
|  | 841 | continue; | 
|  | 842 | /* | 
|  | 843 | * Allow read-after-read recursion of the same | 
| Ingo Molnar | 6c9076e | 2006-07-03 00:24:51 -0700 | [diff] [blame] | 844 | * lock class (i.e. read_lock(lock)+read_lock(lock)): | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 845 | */ | 
| Ingo Molnar | 6c9076e | 2006-07-03 00:24:51 -0700 | [diff] [blame] | 846 | if ((read == 2) && prev->read) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 847 | return 2; | 
|  | 848 | return print_deadlock_bug(curr, prev, next); | 
|  | 849 | } | 
|  | 850 | return 1; | 
|  | 851 | } | 
|  | 852 |  | 
|  | 853 | /* | 
|  | 854 | * There was a chain-cache miss, and we are about to add a new dependency | 
|  | 855 | * to a previous lock. We recursively validate the following rules: | 
|  | 856 | * | 
|  | 857 | *  - would the adding of the <prev> -> <next> dependency create a | 
|  | 858 | *    circular dependency in the graph? [== circular deadlock] | 
|  | 859 | * | 
|  | 860 | *  - does the new prev->next dependency connect any hardirq-safe lock | 
|  | 861 | *    (in the full backwards-subgraph starting at <prev>) with any | 
|  | 862 | *    hardirq-unsafe lock (in the full forwards-subgraph starting at | 
|  | 863 | *    <next>)? [== illegal lock inversion with hardirq contexts] | 
|  | 864 | * | 
|  | 865 | *  - does the new prev->next dependency connect any softirq-safe lock | 
|  | 866 | *    (in the full backwards-subgraph starting at <prev>) with any | 
|  | 867 | *    softirq-unsafe lock (in the full forwards-subgraph starting at | 
|  | 868 | *    <next>)? [== illegal lock inversion with softirq contexts] | 
|  | 869 | * | 
|  | 870 | * any of these scenarios could lead to a deadlock. | 
|  | 871 | * | 
|  | 872 | * Then if all the validations pass, we add the forwards and backwards | 
|  | 873 | * dependency. | 
|  | 874 | */ | 
|  | 875 | static int | 
|  | 876 | check_prev_add(struct task_struct *curr, struct held_lock *prev, | 
|  | 877 | struct held_lock *next) | 
|  | 878 | { | 
|  | 879 | struct lock_list *entry; | 
|  | 880 | int ret; | 
|  | 881 |  | 
|  | 882 | /* | 
|  | 883 | * Prove that the new <prev> -> <next> dependency would not | 
|  | 884 | * create a circular dependency in the graph. (We do this by | 
|  | 885 | * forward-recursing into the graph starting at <next>, and | 
|  | 886 | * checking whether we can reach <prev>.) | 
|  | 887 | * | 
|  | 888 | * We are using global variables to control the recursion, to | 
|  | 889 | * keep the stackframe size of the recursive functions low: | 
|  | 890 | */ | 
|  | 891 | check_source = next; | 
|  | 892 | check_target = prev; | 
|  | 893 | if (!(check_noncircular(next->class, 0))) | 
|  | 894 | return print_circular_bug_tail(); | 
|  | 895 |  | 
|  | 896 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 897 | /* | 
|  | 898 | * Prove that the new dependency does not connect a hardirq-safe | 
|  | 899 | * lock with a hardirq-unsafe lock - to achieve this we search | 
|  | 900 | * the backwards-subgraph starting at <prev>, and the | 
|  | 901 | * forwards-subgraph starting at <next>: | 
|  | 902 | */ | 
|  | 903 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, | 
|  | 904 | LOCK_ENABLED_HARDIRQS, "hard")) | 
|  | 905 | return 0; | 
|  | 906 |  | 
|  | 907 | /* | 
|  | 908 | * Prove that the new dependency does not connect a hardirq-safe-read | 
|  | 909 | * lock with a hardirq-unsafe lock - to achieve this we search | 
|  | 910 | * the backwards-subgraph starting at <prev>, and the | 
|  | 911 | * forwards-subgraph starting at <next>: | 
|  | 912 | */ | 
|  | 913 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, | 
|  | 914 | LOCK_ENABLED_HARDIRQS, "hard-read")) | 
|  | 915 | return 0; | 
|  | 916 |  | 
|  | 917 | /* | 
|  | 918 | * Prove that the new dependency does not connect a softirq-safe | 
|  | 919 | * lock with a softirq-unsafe lock - to achieve this we search | 
|  | 920 | * the backwards-subgraph starting at <prev>, and the | 
|  | 921 | * forwards-subgraph starting at <next>: | 
|  | 922 | */ | 
|  | 923 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, | 
|  | 924 | LOCK_ENABLED_SOFTIRQS, "soft")) | 
|  | 925 | return 0; | 
|  | 926 | /* | 
|  | 927 | * Prove that the new dependency does not connect a softirq-safe-read | 
|  | 928 | * lock with a softirq-unsafe lock - to achieve this we search | 
|  | 929 | * the backwards-subgraph starting at <prev>, and the | 
|  | 930 | * forwards-subgraph starting at <next>: | 
|  | 931 | */ | 
|  | 932 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, | 
|  | 933 | LOCK_ENABLED_SOFTIRQS, "soft")) | 
|  | 934 | return 0; | 
|  | 935 | #endif | 
|  | 936 | /* | 
|  | 937 | * For recursive read-locks we do all the dependency checks, | 
|  | 938 | * but we dont store read-triggered dependencies (only | 
|  | 939 | * write-triggered dependencies). This ensures that only the | 
|  | 940 | * write-side dependencies matter, and that if for example a | 
|  | 941 | * write-lock never takes any other locks, then the reads are | 
|  | 942 | * equivalent to a NOP. | 
|  | 943 | */ | 
|  | 944 | if (next->read == 2 || prev->read == 2) | 
|  | 945 | return 1; | 
|  | 946 | /* | 
|  | 947 | * Is the <prev> -> <next> dependency already present? | 
|  | 948 | * | 
|  | 949 | * (this may occur even though this is a new chain: consider | 
|  | 950 | *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 | 
|  | 951 | *  chains - the second one will be new, but L1 already has | 
|  | 952 | *  L2 added to its dependency list, due to the first chain.) | 
|  | 953 | */ | 
|  | 954 | list_for_each_entry(entry, &prev->class->locks_after, entry) { | 
|  | 955 | if (entry->class == next->class) | 
|  | 956 | return 2; | 
|  | 957 | } | 
|  | 958 |  | 
|  | 959 | /* | 
|  | 960 | * Ok, all validations passed, add the new lock | 
|  | 961 | * to the previous lock's dependency list: | 
|  | 962 | */ | 
|  | 963 | ret = add_lock_to_list(prev->class, next->class, | 
|  | 964 | &prev->class->locks_after, next->acquire_ip); | 
|  | 965 | if (!ret) | 
|  | 966 | return 0; | 
|  | 967 | /* | 
|  | 968 | * Return value of 2 signals 'dependency already added', | 
|  | 969 | * in that case we dont have to add the backlink either. | 
|  | 970 | */ | 
|  | 971 | if (ret == 2) | 
|  | 972 | return 2; | 
|  | 973 | ret = add_lock_to_list(next->class, prev->class, | 
|  | 974 | &next->class->locks_before, next->acquire_ip); | 
|  | 975 |  | 
|  | 976 | /* | 
|  | 977 | * Debugging printouts: | 
|  | 978 | */ | 
|  | 979 | if (verbose(prev->class) || verbose(next->class)) { | 
|  | 980 | __raw_spin_unlock(&hash_lock); | 
|  | 981 | printk("\n new dependency: "); | 
|  | 982 | print_lock_name(prev->class); | 
|  | 983 | printk(" => "); | 
|  | 984 | print_lock_name(next->class); | 
|  | 985 | printk("\n"); | 
|  | 986 | dump_stack(); | 
|  | 987 | __raw_spin_lock(&hash_lock); | 
|  | 988 | } | 
|  | 989 | return 1; | 
|  | 990 | } | 
|  | 991 |  | 
|  | 992 | /* | 
|  | 993 | * Add the dependency to all directly-previous locks that are 'relevant'. | 
|  | 994 | * The ones that are relevant are (in increasing distance from curr): | 
|  | 995 | * all consecutive trylock entries and the final non-trylock entry - or | 
|  | 996 | * the end of this context's lock-chain - whichever comes first. | 
|  | 997 | */ | 
|  | 998 | static int | 
|  | 999 | check_prevs_add(struct task_struct *curr, struct held_lock *next) | 
|  | 1000 | { | 
|  | 1001 | int depth = curr->lockdep_depth; | 
|  | 1002 | struct held_lock *hlock; | 
|  | 1003 |  | 
|  | 1004 | /* | 
|  | 1005 | * Debugging checks. | 
|  | 1006 | * | 
|  | 1007 | * Depth must not be zero for a non-head lock: | 
|  | 1008 | */ | 
|  | 1009 | if (!depth) | 
|  | 1010 | goto out_bug; | 
|  | 1011 | /* | 
|  | 1012 | * At least two relevant locks must exist for this | 
|  | 1013 | * to be a head: | 
|  | 1014 | */ | 
|  | 1015 | if (curr->held_locks[depth].irq_context != | 
|  | 1016 | curr->held_locks[depth-1].irq_context) | 
|  | 1017 | goto out_bug; | 
|  | 1018 |  | 
|  | 1019 | for (;;) { | 
|  | 1020 | hlock = curr->held_locks + depth-1; | 
|  | 1021 | /* | 
|  | 1022 | * Only non-recursive-read entries get new dependencies | 
|  | 1023 | * added: | 
|  | 1024 | */ | 
|  | 1025 | if (hlock->read != 2) { | 
|  | 1026 | check_prev_add(curr, hlock, next); | 
|  | 1027 | /* | 
|  | 1028 | * Stop after the first non-trylock entry, | 
|  | 1029 | * as non-trylock entries have added their | 
|  | 1030 | * own direct dependencies already, so this | 
|  | 1031 | * lock is connected to them indirectly: | 
|  | 1032 | */ | 
|  | 1033 | if (!hlock->trylock) | 
|  | 1034 | break; | 
|  | 1035 | } | 
|  | 1036 | depth--; | 
|  | 1037 | /* | 
|  | 1038 | * End of lock-stack? | 
|  | 1039 | */ | 
|  | 1040 | if (!depth) | 
|  | 1041 | break; | 
|  | 1042 | /* | 
|  | 1043 | * Stop the search if we cross into another context: | 
|  | 1044 | */ | 
|  | 1045 | if (curr->held_locks[depth].irq_context != | 
|  | 1046 | curr->held_locks[depth-1].irq_context) | 
|  | 1047 | break; | 
|  | 1048 | } | 
|  | 1049 | return 1; | 
|  | 1050 | out_bug: | 
|  | 1051 | __raw_spin_unlock(&hash_lock); | 
|  | 1052 | DEBUG_LOCKS_WARN_ON(1); | 
|  | 1053 |  | 
|  | 1054 | return 0; | 
|  | 1055 | } | 
|  | 1056 |  | 
|  | 1057 |  | 
|  | 1058 | /* | 
|  | 1059 | * Is this the address of a static object: | 
|  | 1060 | */ | 
|  | 1061 | static int static_obj(void *obj) | 
|  | 1062 | { | 
|  | 1063 | unsigned long start = (unsigned long) &_stext, | 
|  | 1064 | end   = (unsigned long) &_end, | 
|  | 1065 | addr  = (unsigned long) obj; | 
|  | 1066 | #ifdef CONFIG_SMP | 
|  | 1067 | int i; | 
|  | 1068 | #endif | 
|  | 1069 |  | 
|  | 1070 | /* | 
|  | 1071 | * static variable? | 
|  | 1072 | */ | 
|  | 1073 | if ((addr >= start) && (addr < end)) | 
|  | 1074 | return 1; | 
|  | 1075 |  | 
|  | 1076 | #ifdef CONFIG_SMP | 
|  | 1077 | /* | 
|  | 1078 | * percpu var? | 
|  | 1079 | */ | 
|  | 1080 | for_each_possible_cpu(i) { | 
|  | 1081 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 
|  | 1082 | end   = (unsigned long) &__per_cpu_end   + per_cpu_offset(i); | 
|  | 1083 |  | 
|  | 1084 | if ((addr >= start) && (addr < end)) | 
|  | 1085 | return 1; | 
|  | 1086 | } | 
|  | 1087 | #endif | 
|  | 1088 |  | 
|  | 1089 | /* | 
|  | 1090 | * module var? | 
|  | 1091 | */ | 
|  | 1092 | return is_module_address(addr); | 
|  | 1093 | } | 
|  | 1094 |  | 
|  | 1095 | /* | 
|  | 1096 | * To make lock name printouts unique, we calculate a unique | 
|  | 1097 | * class->name_version generation counter: | 
|  | 1098 | */ | 
|  | 1099 | static int count_matching_names(struct lock_class *new_class) | 
|  | 1100 | { | 
|  | 1101 | struct lock_class *class; | 
|  | 1102 | int count = 0; | 
|  | 1103 |  | 
|  | 1104 | if (!new_class->name) | 
|  | 1105 | return 0; | 
|  | 1106 |  | 
|  | 1107 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | 
|  | 1108 | if (new_class->key - new_class->subclass == class->key) | 
|  | 1109 | return class->name_version; | 
|  | 1110 | if (class->name && !strcmp(class->name, new_class->name)) | 
|  | 1111 | count = max(count, class->name_version); | 
|  | 1112 | } | 
|  | 1113 |  | 
|  | 1114 | return count + 1; | 
|  | 1115 | } | 
|  | 1116 |  | 
|  | 1117 | extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); | 
|  | 1118 |  | 
|  | 1119 | /* | 
|  | 1120 | * Register a lock's class in the hash-table, if the class is not present | 
|  | 1121 | * yet. Otherwise we look it up. We cache the result in the lock object | 
|  | 1122 | * itself, so actual lookup of the hash should be once per lock object. | 
|  | 1123 | */ | 
|  | 1124 | static inline struct lock_class * | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1125 | look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1126 | { | 
|  | 1127 | struct lockdep_subclass_key *key; | 
|  | 1128 | struct list_head *hash_head; | 
|  | 1129 | struct lock_class *class; | 
|  | 1130 |  | 
|  | 1131 | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | 1132 | /* | 
|  | 1133 | * If the architecture calls into lockdep before initializing | 
|  | 1134 | * the hashes then we'll warn about it later. (we cannot printk | 
|  | 1135 | * right now) | 
|  | 1136 | */ | 
|  | 1137 | if (unlikely(!lockdep_initialized)) { | 
|  | 1138 | lockdep_init(); | 
|  | 1139 | lockdep_init_error = 1; | 
|  | 1140 | } | 
|  | 1141 | #endif | 
|  | 1142 |  | 
|  | 1143 | /* | 
|  | 1144 | * Static locks do not have their class-keys yet - for them the key | 
|  | 1145 | * is the lock object itself: | 
|  | 1146 | */ | 
|  | 1147 | if (unlikely(!lock->key)) | 
|  | 1148 | lock->key = (void *)lock; | 
|  | 1149 |  | 
|  | 1150 | /* | 
|  | 1151 | * NOTE: the class-key must be unique. For dynamic locks, a static | 
|  | 1152 | * lock_class_key variable is passed in through the mutex_init() | 
|  | 1153 | * (or spin_lock_init()) call - which acts as the key. For static | 
|  | 1154 | * locks we use the lock object itself as the key. | 
|  | 1155 | */ | 
|  | 1156 | if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) | 
|  | 1157 | __error_too_big_MAX_LOCKDEP_SUBCLASSES(); | 
|  | 1158 |  | 
|  | 1159 | key = lock->key->subkeys + subclass; | 
|  | 1160 |  | 
|  | 1161 | hash_head = classhashentry(key); | 
|  | 1162 |  | 
|  | 1163 | /* | 
|  | 1164 | * We can walk the hash lockfree, because the hash only | 
|  | 1165 | * grows, and we are careful when adding entries to the end: | 
|  | 1166 | */ | 
|  | 1167 | list_for_each_entry(class, hash_head, hash_entry) | 
|  | 1168 | if (class->key == key) | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1169 | return class; | 
|  | 1170 |  | 
|  | 1171 | return NULL; | 
|  | 1172 | } | 
|  | 1173 |  | 
|  | 1174 | /* | 
|  | 1175 | * Register a lock's class in the hash-table, if the class is not present | 
|  | 1176 | * yet. Otherwise we look it up. We cache the result in the lock object | 
|  | 1177 | * itself, so actual lookup of the hash should be once per lock object. | 
|  | 1178 | */ | 
|  | 1179 | static inline struct lock_class * | 
|  | 1180 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | 
|  | 1181 | { | 
|  | 1182 | struct lockdep_subclass_key *key; | 
|  | 1183 | struct list_head *hash_head; | 
|  | 1184 | struct lock_class *class; | 
|  | 1185 |  | 
|  | 1186 | class = look_up_lock_class(lock, subclass); | 
|  | 1187 | if (likely(class)) | 
|  | 1188 | return class; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1189 |  | 
|  | 1190 | /* | 
|  | 1191 | * Debug-check: all keys must be persistent! | 
|  | 1192 | */ | 
|  | 1193 | if (!static_obj(lock->key)) { | 
|  | 1194 | debug_locks_off(); | 
|  | 1195 | printk("INFO: trying to register non-static key.\n"); | 
|  | 1196 | printk("the code is fine but needs lockdep annotation.\n"); | 
|  | 1197 | printk("turning off the locking correctness validator.\n"); | 
|  | 1198 | dump_stack(); | 
|  | 1199 |  | 
|  | 1200 | return NULL; | 
|  | 1201 | } | 
|  | 1202 |  | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1203 | key = lock->key->subkeys + subclass; | 
|  | 1204 | hash_head = classhashentry(key); | 
|  | 1205 |  | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1206 | __raw_spin_lock(&hash_lock); | 
|  | 1207 | /* | 
|  | 1208 | * We have to do the hash-walk again, to avoid races | 
|  | 1209 | * with another CPU: | 
|  | 1210 | */ | 
|  | 1211 | list_for_each_entry(class, hash_head, hash_entry) | 
|  | 1212 | if (class->key == key) | 
|  | 1213 | goto out_unlock_set; | 
|  | 1214 | /* | 
|  | 1215 | * Allocate a new key from the static array, and add it to | 
|  | 1216 | * the hash: | 
|  | 1217 | */ | 
|  | 1218 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 
|  | 1219 | __raw_spin_unlock(&hash_lock); | 
|  | 1220 | debug_locks_off(); | 
|  | 1221 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | 
|  | 1222 | printk("turning off the locking correctness validator.\n"); | 
|  | 1223 | return NULL; | 
|  | 1224 | } | 
|  | 1225 | class = lock_classes + nr_lock_classes++; | 
|  | 1226 | debug_atomic_inc(&nr_unused_locks); | 
|  | 1227 | class->key = key; | 
|  | 1228 | class->name = lock->name; | 
|  | 1229 | class->subclass = subclass; | 
|  | 1230 | INIT_LIST_HEAD(&class->lock_entry); | 
|  | 1231 | INIT_LIST_HEAD(&class->locks_before); | 
|  | 1232 | INIT_LIST_HEAD(&class->locks_after); | 
|  | 1233 | class->name_version = count_matching_names(class); | 
|  | 1234 | /* | 
|  | 1235 | * We use RCU's safe list-add method to make | 
|  | 1236 | * parallel walking of the hash-list safe: | 
|  | 1237 | */ | 
|  | 1238 | list_add_tail_rcu(&class->hash_entry, hash_head); | 
|  | 1239 |  | 
|  | 1240 | if (verbose(class)) { | 
|  | 1241 | __raw_spin_unlock(&hash_lock); | 
|  | 1242 | printk("\nnew class %p: %s", class->key, class->name); | 
|  | 1243 | if (class->name_version > 1) | 
|  | 1244 | printk("#%d", class->name_version); | 
|  | 1245 | printk("\n"); | 
|  | 1246 | dump_stack(); | 
|  | 1247 | __raw_spin_lock(&hash_lock); | 
|  | 1248 | } | 
|  | 1249 | out_unlock_set: | 
|  | 1250 | __raw_spin_unlock(&hash_lock); | 
|  | 1251 |  | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1252 | if (!subclass) | 
|  | 1253 | lock->class_cache = class; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1254 |  | 
|  | 1255 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 
|  | 1256 |  | 
|  | 1257 | return class; | 
|  | 1258 | } | 
|  | 1259 |  | 
|  | 1260 | /* | 
|  | 1261 | * Look up a dependency chain. If the key is not present yet then | 
|  | 1262 | * add it and return 0 - in this case the new dependency chain is | 
|  | 1263 | * validated. If the key is already hashed, return 1. | 
|  | 1264 | */ | 
|  | 1265 | static inline int lookup_chain_cache(u64 chain_key) | 
|  | 1266 | { | 
|  | 1267 | struct list_head *hash_head = chainhashentry(chain_key); | 
|  | 1268 | struct lock_chain *chain; | 
|  | 1269 |  | 
|  | 1270 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | 
|  | 1271 | /* | 
|  | 1272 | * We can walk it lock-free, because entries only get added | 
|  | 1273 | * to the hash: | 
|  | 1274 | */ | 
|  | 1275 | list_for_each_entry(chain, hash_head, entry) { | 
|  | 1276 | if (chain->chain_key == chain_key) { | 
|  | 1277 | cache_hit: | 
|  | 1278 | debug_atomic_inc(&chain_lookup_hits); | 
|  | 1279 | /* | 
|  | 1280 | * In the debugging case, force redundant checking | 
|  | 1281 | * by returning 1: | 
|  | 1282 | */ | 
|  | 1283 | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | 1284 | __raw_spin_lock(&hash_lock); | 
|  | 1285 | return 1; | 
|  | 1286 | #endif | 
|  | 1287 | return 0; | 
|  | 1288 | } | 
|  | 1289 | } | 
|  | 1290 | /* | 
|  | 1291 | * Allocate a new chain entry from the static array, and add | 
|  | 1292 | * it to the hash: | 
|  | 1293 | */ | 
|  | 1294 | __raw_spin_lock(&hash_lock); | 
|  | 1295 | /* | 
|  | 1296 | * We have to walk the chain again locked - to avoid duplicates: | 
|  | 1297 | */ | 
|  | 1298 | list_for_each_entry(chain, hash_head, entry) { | 
|  | 1299 | if (chain->chain_key == chain_key) { | 
|  | 1300 | __raw_spin_unlock(&hash_lock); | 
|  | 1301 | goto cache_hit; | 
|  | 1302 | } | 
|  | 1303 | } | 
|  | 1304 | if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { | 
|  | 1305 | __raw_spin_unlock(&hash_lock); | 
|  | 1306 | debug_locks_off(); | 
|  | 1307 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); | 
|  | 1308 | printk("turning off the locking correctness validator.\n"); | 
|  | 1309 | return 0; | 
|  | 1310 | } | 
|  | 1311 | chain = lock_chains + nr_lock_chains++; | 
|  | 1312 | chain->chain_key = chain_key; | 
|  | 1313 | list_add_tail_rcu(&chain->entry, hash_head); | 
|  | 1314 | debug_atomic_inc(&chain_lookup_misses); | 
|  | 1315 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 1316 | if (current->hardirq_context) | 
|  | 1317 | nr_hardirq_chains++; | 
|  | 1318 | else { | 
|  | 1319 | if (current->softirq_context) | 
|  | 1320 | nr_softirq_chains++; | 
|  | 1321 | else | 
|  | 1322 | nr_process_chains++; | 
|  | 1323 | } | 
|  | 1324 | #else | 
|  | 1325 | nr_process_chains++; | 
|  | 1326 | #endif | 
|  | 1327 |  | 
|  | 1328 | return 1; | 
|  | 1329 | } | 
|  | 1330 |  | 
|  | 1331 | /* | 
|  | 1332 | * We are building curr_chain_key incrementally, so double-check | 
|  | 1333 | * it from scratch, to make sure that it's done correctly: | 
|  | 1334 | */ | 
|  | 1335 | static void check_chain_key(struct task_struct *curr) | 
|  | 1336 | { | 
|  | 1337 | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | 1338 | struct held_lock *hlock, *prev_hlock = NULL; | 
|  | 1339 | unsigned int i, id; | 
|  | 1340 | u64 chain_key = 0; | 
|  | 1341 |  | 
|  | 1342 | for (i = 0; i < curr->lockdep_depth; i++) { | 
|  | 1343 | hlock = curr->held_locks + i; | 
|  | 1344 | if (chain_key != hlock->prev_chain_key) { | 
|  | 1345 | debug_locks_off(); | 
|  | 1346 | printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 
|  | 1347 | curr->lockdep_depth, i, | 
|  | 1348 | (unsigned long long)chain_key, | 
|  | 1349 | (unsigned long long)hlock->prev_chain_key); | 
|  | 1350 | WARN_ON(1); | 
|  | 1351 | return; | 
|  | 1352 | } | 
|  | 1353 | id = hlock->class - lock_classes; | 
|  | 1354 | DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS); | 
|  | 1355 | if (prev_hlock && (prev_hlock->irq_context != | 
|  | 1356 | hlock->irq_context)) | 
|  | 1357 | chain_key = 0; | 
|  | 1358 | chain_key = iterate_chain_key(chain_key, id); | 
|  | 1359 | prev_hlock = hlock; | 
|  | 1360 | } | 
|  | 1361 | if (chain_key != curr->curr_chain_key) { | 
|  | 1362 | debug_locks_off(); | 
|  | 1363 | printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 
|  | 1364 | curr->lockdep_depth, i, | 
|  | 1365 | (unsigned long long)chain_key, | 
|  | 1366 | (unsigned long long)curr->curr_chain_key); | 
|  | 1367 | WARN_ON(1); | 
|  | 1368 | } | 
|  | 1369 | #endif | 
|  | 1370 | } | 
|  | 1371 |  | 
|  | 1372 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 1373 |  | 
|  | 1374 | /* | 
|  | 1375 | * print irq inversion bug: | 
|  | 1376 | */ | 
|  | 1377 | static int | 
|  | 1378 | print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | 
|  | 1379 | struct held_lock *this, int forwards, | 
|  | 1380 | const char *irqclass) | 
|  | 1381 | { | 
|  | 1382 | __raw_spin_unlock(&hash_lock); | 
|  | 1383 | debug_locks_off(); | 
|  | 1384 | if (debug_locks_silent) | 
|  | 1385 | return 0; | 
|  | 1386 |  | 
|  | 1387 | printk("\n=========================================================\n"); | 
|  | 1388 | printk(  "[ INFO: possible irq lock inversion dependency detected ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 1389 | print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1390 | printk(  "---------------------------------------------------------\n"); | 
|  | 1391 | printk("%s/%d just changed the state of lock:\n", | 
|  | 1392 | curr->comm, curr->pid); | 
|  | 1393 | print_lock(this); | 
|  | 1394 | if (forwards) | 
|  | 1395 | printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | 
|  | 1396 | else | 
|  | 1397 | printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | 
|  | 1398 | print_lock_name(other); | 
|  | 1399 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 
|  | 1400 |  | 
|  | 1401 | printk("\nother info that might help us debug this:\n"); | 
|  | 1402 | lockdep_print_held_locks(curr); | 
|  | 1403 |  | 
|  | 1404 | printk("\nthe first lock's dependencies:\n"); | 
|  | 1405 | print_lock_dependencies(this->class, 0); | 
|  | 1406 |  | 
|  | 1407 | printk("\nthe second lock's dependencies:\n"); | 
|  | 1408 | print_lock_dependencies(other, 0); | 
|  | 1409 |  | 
|  | 1410 | printk("\nstack backtrace:\n"); | 
|  | 1411 | dump_stack(); | 
|  | 1412 |  | 
|  | 1413 | return 0; | 
|  | 1414 | } | 
|  | 1415 |  | 
|  | 1416 | /* | 
|  | 1417 | * Prove that in the forwards-direction subgraph starting at <this> | 
|  | 1418 | * there is no lock matching <mask>: | 
|  | 1419 | */ | 
|  | 1420 | static int | 
|  | 1421 | check_usage_forwards(struct task_struct *curr, struct held_lock *this, | 
|  | 1422 | enum lock_usage_bit bit, const char *irqclass) | 
|  | 1423 | { | 
|  | 1424 | int ret; | 
|  | 1425 |  | 
|  | 1426 | find_usage_bit = bit; | 
|  | 1427 | /* fills in <forwards_match> */ | 
|  | 1428 | ret = find_usage_forwards(this->class, 0); | 
|  | 1429 | if (!ret || ret == 1) | 
|  | 1430 | return ret; | 
|  | 1431 |  | 
|  | 1432 | return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); | 
|  | 1433 | } | 
|  | 1434 |  | 
|  | 1435 | /* | 
|  | 1436 | * Prove that in the backwards-direction subgraph starting at <this> | 
|  | 1437 | * there is no lock matching <mask>: | 
|  | 1438 | */ | 
|  | 1439 | static int | 
|  | 1440 | check_usage_backwards(struct task_struct *curr, struct held_lock *this, | 
|  | 1441 | enum lock_usage_bit bit, const char *irqclass) | 
|  | 1442 | { | 
|  | 1443 | int ret; | 
|  | 1444 |  | 
|  | 1445 | find_usage_bit = bit; | 
|  | 1446 | /* fills in <backwards_match> */ | 
|  | 1447 | ret = find_usage_backwards(this->class, 0); | 
|  | 1448 | if (!ret || ret == 1) | 
|  | 1449 | return ret; | 
|  | 1450 |  | 
|  | 1451 | return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); | 
|  | 1452 | } | 
|  | 1453 |  | 
|  | 1454 | static inline void print_irqtrace_events(struct task_struct *curr) | 
|  | 1455 | { | 
|  | 1456 | printk("irq event stamp: %u\n", curr->irq_events); | 
|  | 1457 | printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event); | 
|  | 1458 | print_ip_sym(curr->hardirq_enable_ip); | 
|  | 1459 | printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); | 
|  | 1460 | print_ip_sym(curr->hardirq_disable_ip); | 
|  | 1461 | printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event); | 
|  | 1462 | print_ip_sym(curr->softirq_enable_ip); | 
|  | 1463 | printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); | 
|  | 1464 | print_ip_sym(curr->softirq_disable_ip); | 
|  | 1465 | } | 
|  | 1466 |  | 
|  | 1467 | #else | 
|  | 1468 | static inline void print_irqtrace_events(struct task_struct *curr) | 
|  | 1469 | { | 
|  | 1470 | } | 
|  | 1471 | #endif | 
|  | 1472 |  | 
|  | 1473 | static int | 
|  | 1474 | print_usage_bug(struct task_struct *curr, struct held_lock *this, | 
|  | 1475 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) | 
|  | 1476 | { | 
|  | 1477 | __raw_spin_unlock(&hash_lock); | 
|  | 1478 | debug_locks_off(); | 
|  | 1479 | if (debug_locks_silent) | 
|  | 1480 | return 0; | 
|  | 1481 |  | 
|  | 1482 | printk("\n=================================\n"); | 
|  | 1483 | printk(  "[ INFO: inconsistent lock state ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 1484 | print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1485 | printk(  "---------------------------------\n"); | 
|  | 1486 |  | 
|  | 1487 | printk("inconsistent {%s} -> {%s} usage.\n", | 
|  | 1488 | usage_str[prev_bit], usage_str[new_bit]); | 
|  | 1489 |  | 
|  | 1490 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", | 
|  | 1491 | curr->comm, curr->pid, | 
|  | 1492 | trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, | 
|  | 1493 | trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, | 
|  | 1494 | trace_hardirqs_enabled(curr), | 
|  | 1495 | trace_softirqs_enabled(curr)); | 
|  | 1496 | print_lock(this); | 
|  | 1497 |  | 
|  | 1498 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 
|  | 1499 | print_stack_trace(this->class->usage_traces + prev_bit, 1); | 
|  | 1500 |  | 
|  | 1501 | print_irqtrace_events(curr); | 
|  | 1502 | printk("\nother info that might help us debug this:\n"); | 
|  | 1503 | lockdep_print_held_locks(curr); | 
|  | 1504 |  | 
|  | 1505 | printk("\nstack backtrace:\n"); | 
|  | 1506 | dump_stack(); | 
|  | 1507 |  | 
|  | 1508 | return 0; | 
|  | 1509 | } | 
|  | 1510 |  | 
|  | 1511 | /* | 
|  | 1512 | * Print out an error if an invalid bit is set: | 
|  | 1513 | */ | 
|  | 1514 | static inline int | 
|  | 1515 | valid_state(struct task_struct *curr, struct held_lock *this, | 
|  | 1516 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | 
|  | 1517 | { | 
|  | 1518 | if (unlikely(this->class->usage_mask & (1 << bad_bit))) | 
|  | 1519 | return print_usage_bug(curr, this, bad_bit, new_bit); | 
|  | 1520 | return 1; | 
|  | 1521 | } | 
|  | 1522 |  | 
|  | 1523 | #define STRICT_READ_CHECKS	1 | 
|  | 1524 |  | 
|  | 1525 | /* | 
|  | 1526 | * Mark a lock with a usage bit, and validate the state transition: | 
|  | 1527 | */ | 
|  | 1528 | static int mark_lock(struct task_struct *curr, struct held_lock *this, | 
|  | 1529 | enum lock_usage_bit new_bit, unsigned long ip) | 
|  | 1530 | { | 
|  | 1531 | unsigned int new_mask = 1 << new_bit, ret = 1; | 
|  | 1532 |  | 
|  | 1533 | /* | 
|  | 1534 | * If already set then do not dirty the cacheline, | 
|  | 1535 | * nor do any checks: | 
|  | 1536 | */ | 
|  | 1537 | if (likely(this->class->usage_mask & new_mask)) | 
|  | 1538 | return 1; | 
|  | 1539 |  | 
|  | 1540 | __raw_spin_lock(&hash_lock); | 
|  | 1541 | /* | 
|  | 1542 | * Make sure we didnt race: | 
|  | 1543 | */ | 
|  | 1544 | if (unlikely(this->class->usage_mask & new_mask)) { | 
|  | 1545 | __raw_spin_unlock(&hash_lock); | 
|  | 1546 | return 1; | 
|  | 1547 | } | 
|  | 1548 |  | 
|  | 1549 | this->class->usage_mask |= new_mask; | 
|  | 1550 |  | 
|  | 1551 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 1552 | if (new_bit == LOCK_ENABLED_HARDIRQS || | 
|  | 1553 | new_bit == LOCK_ENABLED_HARDIRQS_READ) | 
|  | 1554 | ip = curr->hardirq_enable_ip; | 
|  | 1555 | else if (new_bit == LOCK_ENABLED_SOFTIRQS || | 
|  | 1556 | new_bit == LOCK_ENABLED_SOFTIRQS_READ) | 
|  | 1557 | ip = curr->softirq_enable_ip; | 
|  | 1558 | #endif | 
|  | 1559 | if (!save_trace(this->class->usage_traces + new_bit)) | 
|  | 1560 | return 0; | 
|  | 1561 |  | 
|  | 1562 | switch (new_bit) { | 
|  | 1563 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 1564 | case LOCK_USED_IN_HARDIRQ: | 
|  | 1565 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 
|  | 1566 | return 0; | 
|  | 1567 | if (!valid_state(curr, this, new_bit, | 
|  | 1568 | LOCK_ENABLED_HARDIRQS_READ)) | 
|  | 1569 | return 0; | 
|  | 1570 | /* | 
|  | 1571 | * just marked it hardirq-safe, check that this lock | 
|  | 1572 | * took no hardirq-unsafe lock in the past: | 
|  | 1573 | */ | 
|  | 1574 | if (!check_usage_forwards(curr, this, | 
|  | 1575 | LOCK_ENABLED_HARDIRQS, "hard")) | 
|  | 1576 | return 0; | 
|  | 1577 | #if STRICT_READ_CHECKS | 
|  | 1578 | /* | 
|  | 1579 | * just marked it hardirq-safe, check that this lock | 
|  | 1580 | * took no hardirq-unsafe-read lock in the past: | 
|  | 1581 | */ | 
|  | 1582 | if (!check_usage_forwards(curr, this, | 
|  | 1583 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 
|  | 1584 | return 0; | 
|  | 1585 | #endif | 
|  | 1586 | if (hardirq_verbose(this->class)) | 
|  | 1587 | ret = 2; | 
|  | 1588 | break; | 
|  | 1589 | case LOCK_USED_IN_SOFTIRQ: | 
|  | 1590 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 
|  | 1591 | return 0; | 
|  | 1592 | if (!valid_state(curr, this, new_bit, | 
|  | 1593 | LOCK_ENABLED_SOFTIRQS_READ)) | 
|  | 1594 | return 0; | 
|  | 1595 | /* | 
|  | 1596 | * just marked it softirq-safe, check that this lock | 
|  | 1597 | * took no softirq-unsafe lock in the past: | 
|  | 1598 | */ | 
|  | 1599 | if (!check_usage_forwards(curr, this, | 
|  | 1600 | LOCK_ENABLED_SOFTIRQS, "soft")) | 
|  | 1601 | return 0; | 
|  | 1602 | #if STRICT_READ_CHECKS | 
|  | 1603 | /* | 
|  | 1604 | * just marked it softirq-safe, check that this lock | 
|  | 1605 | * took no softirq-unsafe-read lock in the past: | 
|  | 1606 | */ | 
|  | 1607 | if (!check_usage_forwards(curr, this, | 
|  | 1608 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | 
|  | 1609 | return 0; | 
|  | 1610 | #endif | 
|  | 1611 | if (softirq_verbose(this->class)) | 
|  | 1612 | ret = 2; | 
|  | 1613 | break; | 
|  | 1614 | case LOCK_USED_IN_HARDIRQ_READ: | 
|  | 1615 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 
|  | 1616 | return 0; | 
|  | 1617 | /* | 
|  | 1618 | * just marked it hardirq-read-safe, check that this lock | 
|  | 1619 | * took no hardirq-unsafe lock in the past: | 
|  | 1620 | */ | 
|  | 1621 | if (!check_usage_forwards(curr, this, | 
|  | 1622 | LOCK_ENABLED_HARDIRQS, "hard")) | 
|  | 1623 | return 0; | 
|  | 1624 | if (hardirq_verbose(this->class)) | 
|  | 1625 | ret = 2; | 
|  | 1626 | break; | 
|  | 1627 | case LOCK_USED_IN_SOFTIRQ_READ: | 
|  | 1628 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 
|  | 1629 | return 0; | 
|  | 1630 | /* | 
|  | 1631 | * just marked it softirq-read-safe, check that this lock | 
|  | 1632 | * took no softirq-unsafe lock in the past: | 
|  | 1633 | */ | 
|  | 1634 | if (!check_usage_forwards(curr, this, | 
|  | 1635 | LOCK_ENABLED_SOFTIRQS, "soft")) | 
|  | 1636 | return 0; | 
|  | 1637 | if (softirq_verbose(this->class)) | 
|  | 1638 | ret = 2; | 
|  | 1639 | break; | 
|  | 1640 | case LOCK_ENABLED_HARDIRQS: | 
|  | 1641 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | 
|  | 1642 | return 0; | 
|  | 1643 | if (!valid_state(curr, this, new_bit, | 
|  | 1644 | LOCK_USED_IN_HARDIRQ_READ)) | 
|  | 1645 | return 0; | 
|  | 1646 | /* | 
|  | 1647 | * just marked it hardirq-unsafe, check that no hardirq-safe | 
|  | 1648 | * lock in the system ever took it in the past: | 
|  | 1649 | */ | 
|  | 1650 | if (!check_usage_backwards(curr, this, | 
|  | 1651 | LOCK_USED_IN_HARDIRQ, "hard")) | 
|  | 1652 | return 0; | 
|  | 1653 | #if STRICT_READ_CHECKS | 
|  | 1654 | /* | 
|  | 1655 | * just marked it hardirq-unsafe, check that no | 
|  | 1656 | * hardirq-safe-read lock in the system ever took | 
|  | 1657 | * it in the past: | 
|  | 1658 | */ | 
|  | 1659 | if (!check_usage_backwards(curr, this, | 
|  | 1660 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | 
|  | 1661 | return 0; | 
|  | 1662 | #endif | 
|  | 1663 | if (hardirq_verbose(this->class)) | 
|  | 1664 | ret = 2; | 
|  | 1665 | break; | 
|  | 1666 | case LOCK_ENABLED_SOFTIRQS: | 
|  | 1667 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | 
|  | 1668 | return 0; | 
|  | 1669 | if (!valid_state(curr, this, new_bit, | 
|  | 1670 | LOCK_USED_IN_SOFTIRQ_READ)) | 
|  | 1671 | return 0; | 
|  | 1672 | /* | 
|  | 1673 | * just marked it softirq-unsafe, check that no softirq-safe | 
|  | 1674 | * lock in the system ever took it in the past: | 
|  | 1675 | */ | 
|  | 1676 | if (!check_usage_backwards(curr, this, | 
|  | 1677 | LOCK_USED_IN_SOFTIRQ, "soft")) | 
|  | 1678 | return 0; | 
|  | 1679 | #if STRICT_READ_CHECKS | 
|  | 1680 | /* | 
|  | 1681 | * just marked it softirq-unsafe, check that no | 
|  | 1682 | * softirq-safe-read lock in the system ever took | 
|  | 1683 | * it in the past: | 
|  | 1684 | */ | 
|  | 1685 | if (!check_usage_backwards(curr, this, | 
|  | 1686 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | 
|  | 1687 | return 0; | 
|  | 1688 | #endif | 
|  | 1689 | if (softirq_verbose(this->class)) | 
|  | 1690 | ret = 2; | 
|  | 1691 | break; | 
|  | 1692 | case LOCK_ENABLED_HARDIRQS_READ: | 
|  | 1693 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | 
|  | 1694 | return 0; | 
|  | 1695 | #if STRICT_READ_CHECKS | 
|  | 1696 | /* | 
|  | 1697 | * just marked it hardirq-read-unsafe, check that no | 
|  | 1698 | * hardirq-safe lock in the system ever took it in the past: | 
|  | 1699 | */ | 
|  | 1700 | if (!check_usage_backwards(curr, this, | 
|  | 1701 | LOCK_USED_IN_HARDIRQ, "hard")) | 
|  | 1702 | return 0; | 
|  | 1703 | #endif | 
|  | 1704 | if (hardirq_verbose(this->class)) | 
|  | 1705 | ret = 2; | 
|  | 1706 | break; | 
|  | 1707 | case LOCK_ENABLED_SOFTIRQS_READ: | 
|  | 1708 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | 
|  | 1709 | return 0; | 
|  | 1710 | #if STRICT_READ_CHECKS | 
|  | 1711 | /* | 
|  | 1712 | * just marked it softirq-read-unsafe, check that no | 
|  | 1713 | * softirq-safe lock in the system ever took it in the past: | 
|  | 1714 | */ | 
|  | 1715 | if (!check_usage_backwards(curr, this, | 
|  | 1716 | LOCK_USED_IN_SOFTIRQ, "soft")) | 
|  | 1717 | return 0; | 
|  | 1718 | #endif | 
|  | 1719 | if (softirq_verbose(this->class)) | 
|  | 1720 | ret = 2; | 
|  | 1721 | break; | 
|  | 1722 | #endif | 
|  | 1723 | case LOCK_USED: | 
|  | 1724 | /* | 
|  | 1725 | * Add it to the global list of classes: | 
|  | 1726 | */ | 
|  | 1727 | list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes); | 
|  | 1728 | debug_atomic_dec(&nr_unused_locks); | 
|  | 1729 | break; | 
|  | 1730 | default: | 
|  | 1731 | debug_locks_off(); | 
|  | 1732 | WARN_ON(1); | 
|  | 1733 | return 0; | 
|  | 1734 | } | 
|  | 1735 |  | 
|  | 1736 | __raw_spin_unlock(&hash_lock); | 
|  | 1737 |  | 
|  | 1738 | /* | 
|  | 1739 | * We must printk outside of the hash_lock: | 
|  | 1740 | */ | 
|  | 1741 | if (ret == 2) { | 
|  | 1742 | printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); | 
|  | 1743 | print_lock(this); | 
|  | 1744 | print_irqtrace_events(curr); | 
|  | 1745 | dump_stack(); | 
|  | 1746 | } | 
|  | 1747 |  | 
|  | 1748 | return ret; | 
|  | 1749 | } | 
|  | 1750 |  | 
|  | 1751 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 1752 | /* | 
|  | 1753 | * Mark all held locks with a usage bit: | 
|  | 1754 | */ | 
|  | 1755 | static int | 
|  | 1756 | mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip) | 
|  | 1757 | { | 
|  | 1758 | enum lock_usage_bit usage_bit; | 
|  | 1759 | struct held_lock *hlock; | 
|  | 1760 | int i; | 
|  | 1761 |  | 
|  | 1762 | for (i = 0; i < curr->lockdep_depth; i++) { | 
|  | 1763 | hlock = curr->held_locks + i; | 
|  | 1764 |  | 
|  | 1765 | if (hardirq) { | 
|  | 1766 | if (hlock->read) | 
|  | 1767 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; | 
|  | 1768 | else | 
|  | 1769 | usage_bit = LOCK_ENABLED_HARDIRQS; | 
|  | 1770 | } else { | 
|  | 1771 | if (hlock->read) | 
|  | 1772 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | 
|  | 1773 | else | 
|  | 1774 | usage_bit = LOCK_ENABLED_SOFTIRQS; | 
|  | 1775 | } | 
|  | 1776 | if (!mark_lock(curr, hlock, usage_bit, ip)) | 
|  | 1777 | return 0; | 
|  | 1778 | } | 
|  | 1779 |  | 
|  | 1780 | return 1; | 
|  | 1781 | } | 
|  | 1782 |  | 
|  | 1783 | /* | 
|  | 1784 | * Debugging helper: via this flag we know that we are in | 
|  | 1785 | * 'early bootup code', and will warn about any invalid irqs-on event: | 
|  | 1786 | */ | 
|  | 1787 | static int early_boot_irqs_enabled; | 
|  | 1788 |  | 
|  | 1789 | void early_boot_irqs_off(void) | 
|  | 1790 | { | 
|  | 1791 | early_boot_irqs_enabled = 0; | 
|  | 1792 | } | 
|  | 1793 |  | 
|  | 1794 | void early_boot_irqs_on(void) | 
|  | 1795 | { | 
|  | 1796 | early_boot_irqs_enabled = 1; | 
|  | 1797 | } | 
|  | 1798 |  | 
|  | 1799 | /* | 
|  | 1800 | * Hardirqs will be enabled: | 
|  | 1801 | */ | 
|  | 1802 | void trace_hardirqs_on(void) | 
|  | 1803 | { | 
|  | 1804 | struct task_struct *curr = current; | 
|  | 1805 | unsigned long ip; | 
|  | 1806 |  | 
|  | 1807 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 
|  | 1808 | return; | 
|  | 1809 |  | 
|  | 1810 | if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) | 
|  | 1811 | return; | 
|  | 1812 |  | 
|  | 1813 | if (unlikely(curr->hardirqs_enabled)) { | 
|  | 1814 | debug_atomic_inc(&redundant_hardirqs_on); | 
|  | 1815 | return; | 
|  | 1816 | } | 
|  | 1817 | /* we'll do an OFF -> ON transition: */ | 
|  | 1818 | curr->hardirqs_enabled = 1; | 
|  | 1819 | ip = (unsigned long) __builtin_return_address(0); | 
|  | 1820 |  | 
|  | 1821 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
|  | 1822 | return; | 
|  | 1823 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | 
|  | 1824 | return; | 
|  | 1825 | /* | 
|  | 1826 | * We are going to turn hardirqs on, so set the | 
|  | 1827 | * usage bit for all held locks: | 
|  | 1828 | */ | 
|  | 1829 | if (!mark_held_locks(curr, 1, ip)) | 
|  | 1830 | return; | 
|  | 1831 | /* | 
|  | 1832 | * If we have softirqs enabled, then set the usage | 
|  | 1833 | * bit for all held locks. (disabled hardirqs prevented | 
|  | 1834 | * this bit from being set before) | 
|  | 1835 | */ | 
|  | 1836 | if (curr->softirqs_enabled) | 
|  | 1837 | if (!mark_held_locks(curr, 0, ip)) | 
|  | 1838 | return; | 
|  | 1839 |  | 
|  | 1840 | curr->hardirq_enable_ip = ip; | 
|  | 1841 | curr->hardirq_enable_event = ++curr->irq_events; | 
|  | 1842 | debug_atomic_inc(&hardirqs_on_events); | 
|  | 1843 | } | 
|  | 1844 |  | 
|  | 1845 | EXPORT_SYMBOL(trace_hardirqs_on); | 
|  | 1846 |  | 
|  | 1847 | /* | 
|  | 1848 | * Hardirqs were disabled: | 
|  | 1849 | */ | 
|  | 1850 | void trace_hardirqs_off(void) | 
|  | 1851 | { | 
|  | 1852 | struct task_struct *curr = current; | 
|  | 1853 |  | 
|  | 1854 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 
|  | 1855 | return; | 
|  | 1856 |  | 
|  | 1857 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
|  | 1858 | return; | 
|  | 1859 |  | 
|  | 1860 | if (curr->hardirqs_enabled) { | 
|  | 1861 | /* | 
|  | 1862 | * We have done an ON -> OFF transition: | 
|  | 1863 | */ | 
|  | 1864 | curr->hardirqs_enabled = 0; | 
|  | 1865 | curr->hardirq_disable_ip = _RET_IP_; | 
|  | 1866 | curr->hardirq_disable_event = ++curr->irq_events; | 
|  | 1867 | debug_atomic_inc(&hardirqs_off_events); | 
|  | 1868 | } else | 
|  | 1869 | debug_atomic_inc(&redundant_hardirqs_off); | 
|  | 1870 | } | 
|  | 1871 |  | 
|  | 1872 | EXPORT_SYMBOL(trace_hardirqs_off); | 
|  | 1873 |  | 
|  | 1874 | /* | 
|  | 1875 | * Softirqs will be enabled: | 
|  | 1876 | */ | 
|  | 1877 | void trace_softirqs_on(unsigned long ip) | 
|  | 1878 | { | 
|  | 1879 | struct task_struct *curr = current; | 
|  | 1880 |  | 
|  | 1881 | if (unlikely(!debug_locks)) | 
|  | 1882 | return; | 
|  | 1883 |  | 
|  | 1884 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
|  | 1885 | return; | 
|  | 1886 |  | 
|  | 1887 | if (curr->softirqs_enabled) { | 
|  | 1888 | debug_atomic_inc(&redundant_softirqs_on); | 
|  | 1889 | return; | 
|  | 1890 | } | 
|  | 1891 |  | 
|  | 1892 | /* | 
|  | 1893 | * We'll do an OFF -> ON transition: | 
|  | 1894 | */ | 
|  | 1895 | curr->softirqs_enabled = 1; | 
|  | 1896 | curr->softirq_enable_ip = ip; | 
|  | 1897 | curr->softirq_enable_event = ++curr->irq_events; | 
|  | 1898 | debug_atomic_inc(&softirqs_on_events); | 
|  | 1899 | /* | 
|  | 1900 | * We are going to turn softirqs on, so set the | 
|  | 1901 | * usage bit for all held locks, if hardirqs are | 
|  | 1902 | * enabled too: | 
|  | 1903 | */ | 
|  | 1904 | if (curr->hardirqs_enabled) | 
|  | 1905 | mark_held_locks(curr, 0, ip); | 
|  | 1906 | } | 
|  | 1907 |  | 
|  | 1908 | /* | 
|  | 1909 | * Softirqs were disabled: | 
|  | 1910 | */ | 
|  | 1911 | void trace_softirqs_off(unsigned long ip) | 
|  | 1912 | { | 
|  | 1913 | struct task_struct *curr = current; | 
|  | 1914 |  | 
|  | 1915 | if (unlikely(!debug_locks)) | 
|  | 1916 | return; | 
|  | 1917 |  | 
|  | 1918 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
|  | 1919 | return; | 
|  | 1920 |  | 
|  | 1921 | if (curr->softirqs_enabled) { | 
|  | 1922 | /* | 
|  | 1923 | * We have done an ON -> OFF transition: | 
|  | 1924 | */ | 
|  | 1925 | curr->softirqs_enabled = 0; | 
|  | 1926 | curr->softirq_disable_ip = ip; | 
|  | 1927 | curr->softirq_disable_event = ++curr->irq_events; | 
|  | 1928 | debug_atomic_inc(&softirqs_off_events); | 
|  | 1929 | DEBUG_LOCKS_WARN_ON(!softirq_count()); | 
|  | 1930 | } else | 
|  | 1931 | debug_atomic_inc(&redundant_softirqs_off); | 
|  | 1932 | } | 
|  | 1933 |  | 
|  | 1934 | #endif | 
|  | 1935 |  | 
|  | 1936 | /* | 
|  | 1937 | * Initialize a lock instance's lock-class mapping info: | 
|  | 1938 | */ | 
|  | 1939 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 
|  | 1940 | struct lock_class_key *key) | 
|  | 1941 | { | 
|  | 1942 | if (unlikely(!debug_locks)) | 
|  | 1943 | return; | 
|  | 1944 |  | 
|  | 1945 | if (DEBUG_LOCKS_WARN_ON(!key)) | 
|  | 1946 | return; | 
|  | 1947 | if (DEBUG_LOCKS_WARN_ON(!name)) | 
|  | 1948 | return; | 
|  | 1949 | /* | 
|  | 1950 | * Sanity check, the lock-class key must be persistent: | 
|  | 1951 | */ | 
|  | 1952 | if (!static_obj(key)) { | 
|  | 1953 | printk("BUG: key %p not in .data!\n", key); | 
|  | 1954 | DEBUG_LOCKS_WARN_ON(1); | 
|  | 1955 | return; | 
|  | 1956 | } | 
|  | 1957 | lock->name = name; | 
|  | 1958 | lock->key = key; | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1959 | lock->class_cache = NULL; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1960 | } | 
|  | 1961 |  | 
|  | 1962 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 
|  | 1963 |  | 
|  | 1964 | /* | 
|  | 1965 | * This gets called for every mutex_lock*()/spin_lock*() operation. | 
|  | 1966 | * We maintain the dependency maps and validate the locking attempt: | 
|  | 1967 | */ | 
|  | 1968 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 
|  | 1969 | int trylock, int read, int check, int hardirqs_off, | 
|  | 1970 | unsigned long ip) | 
|  | 1971 | { | 
|  | 1972 | struct task_struct *curr = current; | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1973 | struct lock_class *class = NULL; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1974 | struct held_lock *hlock; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1975 | unsigned int depth, id; | 
|  | 1976 | int chain_head = 0; | 
|  | 1977 | u64 chain_key; | 
|  | 1978 |  | 
|  | 1979 | if (unlikely(!debug_locks)) | 
|  | 1980 | return 0; | 
|  | 1981 |  | 
|  | 1982 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
|  | 1983 | return 0; | 
|  | 1984 |  | 
|  | 1985 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { | 
|  | 1986 | debug_locks_off(); | 
|  | 1987 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | 
|  | 1988 | printk("turning off the locking correctness validator.\n"); | 
|  | 1989 | return 0; | 
|  | 1990 | } | 
|  | 1991 |  | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1992 | if (!subclass) | 
|  | 1993 | class = lock->class_cache; | 
|  | 1994 | /* | 
|  | 1995 | * Not cached yet or subclass? | 
|  | 1996 | */ | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1997 | if (unlikely(!class)) { | 
|  | 1998 | class = register_lock_class(lock, subclass); | 
|  | 1999 | if (!class) | 
|  | 2000 | return 0; | 
|  | 2001 | } | 
|  | 2002 | debug_atomic_inc((atomic_t *)&class->ops); | 
|  | 2003 | if (very_verbose(class)) { | 
|  | 2004 | printk("\nacquire class [%p] %s", class->key, class->name); | 
|  | 2005 | if (class->name_version > 1) | 
|  | 2006 | printk("#%d", class->name_version); | 
|  | 2007 | printk("\n"); | 
|  | 2008 | dump_stack(); | 
|  | 2009 | } | 
|  | 2010 |  | 
|  | 2011 | /* | 
|  | 2012 | * Add the lock to the list of currently held locks. | 
|  | 2013 | * (we dont increase the depth just yet, up until the | 
|  | 2014 | * dependency checks are done) | 
|  | 2015 | */ | 
|  | 2016 | depth = curr->lockdep_depth; | 
|  | 2017 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) | 
|  | 2018 | return 0; | 
|  | 2019 |  | 
|  | 2020 | hlock = curr->held_locks + depth; | 
|  | 2021 |  | 
|  | 2022 | hlock->class = class; | 
|  | 2023 | hlock->acquire_ip = ip; | 
|  | 2024 | hlock->instance = lock; | 
|  | 2025 | hlock->trylock = trylock; | 
|  | 2026 | hlock->read = read; | 
|  | 2027 | hlock->check = check; | 
|  | 2028 | hlock->hardirqs_off = hardirqs_off; | 
|  | 2029 |  | 
|  | 2030 | if (check != 2) | 
|  | 2031 | goto out_calc_hash; | 
|  | 2032 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 2033 | /* | 
|  | 2034 | * If non-trylock use in a hardirq or softirq context, then | 
|  | 2035 | * mark the lock as used in these contexts: | 
|  | 2036 | */ | 
|  | 2037 | if (!trylock) { | 
|  | 2038 | if (read) { | 
|  | 2039 | if (curr->hardirq_context) | 
|  | 2040 | if (!mark_lock(curr, hlock, | 
|  | 2041 | LOCK_USED_IN_HARDIRQ_READ, ip)) | 
|  | 2042 | return 0; | 
|  | 2043 | if (curr->softirq_context) | 
|  | 2044 | if (!mark_lock(curr, hlock, | 
|  | 2045 | LOCK_USED_IN_SOFTIRQ_READ, ip)) | 
|  | 2046 | return 0; | 
|  | 2047 | } else { | 
|  | 2048 | if (curr->hardirq_context) | 
|  | 2049 | if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip)) | 
|  | 2050 | return 0; | 
|  | 2051 | if (curr->softirq_context) | 
|  | 2052 | if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip)) | 
|  | 2053 | return 0; | 
|  | 2054 | } | 
|  | 2055 | } | 
|  | 2056 | if (!hardirqs_off) { | 
|  | 2057 | if (read) { | 
|  | 2058 | if (!mark_lock(curr, hlock, | 
|  | 2059 | LOCK_ENABLED_HARDIRQS_READ, ip)) | 
|  | 2060 | return 0; | 
|  | 2061 | if (curr->softirqs_enabled) | 
|  | 2062 | if (!mark_lock(curr, hlock, | 
|  | 2063 | LOCK_ENABLED_SOFTIRQS_READ, ip)) | 
|  | 2064 | return 0; | 
|  | 2065 | } else { | 
|  | 2066 | if (!mark_lock(curr, hlock, | 
|  | 2067 | LOCK_ENABLED_HARDIRQS, ip)) | 
|  | 2068 | return 0; | 
|  | 2069 | if (curr->softirqs_enabled) | 
|  | 2070 | if (!mark_lock(curr, hlock, | 
|  | 2071 | LOCK_ENABLED_SOFTIRQS, ip)) | 
|  | 2072 | return 0; | 
|  | 2073 | } | 
|  | 2074 | } | 
|  | 2075 | #endif | 
|  | 2076 | /* mark it as used: */ | 
|  | 2077 | if (!mark_lock(curr, hlock, LOCK_USED, ip)) | 
|  | 2078 | return 0; | 
|  | 2079 | out_calc_hash: | 
|  | 2080 | /* | 
|  | 2081 | * Calculate the chain hash: it's the combined has of all the | 
|  | 2082 | * lock keys along the dependency chain. We save the hash value | 
|  | 2083 | * at every step so that we can get the current hash easily | 
|  | 2084 | * after unlock. The chain hash is then used to cache dependency | 
|  | 2085 | * results. | 
|  | 2086 | * | 
|  | 2087 | * The 'key ID' is what is the most compact key value to drive | 
|  | 2088 | * the hash, not class->key. | 
|  | 2089 | */ | 
|  | 2090 | id = class - lock_classes; | 
|  | 2091 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 
|  | 2092 | return 0; | 
|  | 2093 |  | 
|  | 2094 | chain_key = curr->curr_chain_key; | 
|  | 2095 | if (!depth) { | 
|  | 2096 | if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) | 
|  | 2097 | return 0; | 
|  | 2098 | chain_head = 1; | 
|  | 2099 | } | 
|  | 2100 |  | 
|  | 2101 | hlock->prev_chain_key = chain_key; | 
|  | 2102 |  | 
|  | 2103 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 2104 | /* | 
|  | 2105 | * Keep track of points where we cross into an interrupt context: | 
|  | 2106 | */ | 
|  | 2107 | hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + | 
|  | 2108 | curr->softirq_context; | 
|  | 2109 | if (depth) { | 
|  | 2110 | struct held_lock *prev_hlock; | 
|  | 2111 |  | 
|  | 2112 | prev_hlock = curr->held_locks + depth-1; | 
|  | 2113 | /* | 
|  | 2114 | * If we cross into another context, reset the | 
|  | 2115 | * hash key (this also prevents the checking and the | 
|  | 2116 | * adding of the dependency to 'prev'): | 
|  | 2117 | */ | 
|  | 2118 | if (prev_hlock->irq_context != hlock->irq_context) { | 
|  | 2119 | chain_key = 0; | 
|  | 2120 | chain_head = 1; | 
|  | 2121 | } | 
|  | 2122 | } | 
|  | 2123 | #endif | 
|  | 2124 | chain_key = iterate_chain_key(chain_key, id); | 
|  | 2125 | curr->curr_chain_key = chain_key; | 
|  | 2126 |  | 
|  | 2127 | /* | 
|  | 2128 | * Trylock needs to maintain the stack of held locks, but it | 
|  | 2129 | * does not add new dependencies, because trylock can be done | 
|  | 2130 | * in any order. | 
|  | 2131 | * | 
|  | 2132 | * We look up the chain_key and do the O(N^2) check and update of | 
|  | 2133 | * the dependencies only if this is a new dependency chain. | 
|  | 2134 | * (If lookup_chain_cache() returns with 1 it acquires | 
|  | 2135 | * hash_lock for us) | 
|  | 2136 | */ | 
|  | 2137 | if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { | 
|  | 2138 | /* | 
|  | 2139 | * Check whether last held lock: | 
|  | 2140 | * | 
|  | 2141 | * - is irq-safe, if this lock is irq-unsafe | 
|  | 2142 | * - is softirq-safe, if this lock is hardirq-unsafe | 
|  | 2143 | * | 
|  | 2144 | * And check whether the new lock's dependency graph | 
|  | 2145 | * could lead back to the previous lock. | 
|  | 2146 | * | 
|  | 2147 | * any of these scenarios could lead to a deadlock. If | 
|  | 2148 | * All validations | 
|  | 2149 | */ | 
|  | 2150 | int ret = check_deadlock(curr, hlock, lock, read); | 
|  | 2151 |  | 
|  | 2152 | if (!ret) | 
|  | 2153 | return 0; | 
|  | 2154 | /* | 
|  | 2155 | * Mark recursive read, as we jump over it when | 
|  | 2156 | * building dependencies (just like we jump over | 
|  | 2157 | * trylock entries): | 
|  | 2158 | */ | 
|  | 2159 | if (ret == 2) | 
|  | 2160 | hlock->read = 2; | 
|  | 2161 | /* | 
|  | 2162 | * Add dependency only if this lock is not the head | 
|  | 2163 | * of the chain, and if it's not a secondary read-lock: | 
|  | 2164 | */ | 
|  | 2165 | if (!chain_head && ret != 2) | 
|  | 2166 | if (!check_prevs_add(curr, hlock)) | 
|  | 2167 | return 0; | 
|  | 2168 | __raw_spin_unlock(&hash_lock); | 
|  | 2169 | } | 
|  | 2170 | curr->lockdep_depth++; | 
|  | 2171 | check_chain_key(curr); | 
|  | 2172 | if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { | 
|  | 2173 | debug_locks_off(); | 
|  | 2174 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); | 
|  | 2175 | printk("turning off the locking correctness validator.\n"); | 
|  | 2176 | return 0; | 
|  | 2177 | } | 
|  | 2178 | if (unlikely(curr->lockdep_depth > max_lockdep_depth)) | 
|  | 2179 | max_lockdep_depth = curr->lockdep_depth; | 
|  | 2180 |  | 
|  | 2181 | return 1; | 
|  | 2182 | } | 
|  | 2183 |  | 
|  | 2184 | static int | 
|  | 2185 | print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, | 
|  | 2186 | unsigned long ip) | 
|  | 2187 | { | 
|  | 2188 | if (!debug_locks_off()) | 
|  | 2189 | return 0; | 
|  | 2190 | if (debug_locks_silent) | 
|  | 2191 | return 0; | 
|  | 2192 |  | 
|  | 2193 | printk("\n=====================================\n"); | 
|  | 2194 | printk(  "[ BUG: bad unlock balance detected! ]\n"); | 
|  | 2195 | printk(  "-------------------------------------\n"); | 
|  | 2196 | printk("%s/%d is trying to release lock (", | 
|  | 2197 | curr->comm, curr->pid); | 
|  | 2198 | print_lockdep_cache(lock); | 
|  | 2199 | printk(") at:\n"); | 
|  | 2200 | print_ip_sym(ip); | 
|  | 2201 | printk("but there are no more locks to release!\n"); | 
|  | 2202 | printk("\nother info that might help us debug this:\n"); | 
|  | 2203 | lockdep_print_held_locks(curr); | 
|  | 2204 |  | 
|  | 2205 | printk("\nstack backtrace:\n"); | 
|  | 2206 | dump_stack(); | 
|  | 2207 |  | 
|  | 2208 | return 0; | 
|  | 2209 | } | 
|  | 2210 |  | 
|  | 2211 | /* | 
|  | 2212 | * Common debugging checks for both nested and non-nested unlock: | 
|  | 2213 | */ | 
|  | 2214 | static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | 
|  | 2215 | unsigned long ip) | 
|  | 2216 | { | 
|  | 2217 | if (unlikely(!debug_locks)) | 
|  | 2218 | return 0; | 
|  | 2219 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
|  | 2220 | return 0; | 
|  | 2221 |  | 
|  | 2222 | if (curr->lockdep_depth <= 0) | 
|  | 2223 | return print_unlock_inbalance_bug(curr, lock, ip); | 
|  | 2224 |  | 
|  | 2225 | return 1; | 
|  | 2226 | } | 
|  | 2227 |  | 
|  | 2228 | /* | 
|  | 2229 | * Remove the lock to the list of currently held locks in a | 
|  | 2230 | * potentially non-nested (out of order) manner. This is a | 
|  | 2231 | * relatively rare operation, as all the unlock APIs default | 
|  | 2232 | * to nested mode (which uses lock_release()): | 
|  | 2233 | */ | 
|  | 2234 | static int | 
|  | 2235 | lock_release_non_nested(struct task_struct *curr, | 
|  | 2236 | struct lockdep_map *lock, unsigned long ip) | 
|  | 2237 | { | 
|  | 2238 | struct held_lock *hlock, *prev_hlock; | 
|  | 2239 | unsigned int depth; | 
|  | 2240 | int i; | 
|  | 2241 |  | 
|  | 2242 | /* | 
|  | 2243 | * Check whether the lock exists in the current stack | 
|  | 2244 | * of held locks: | 
|  | 2245 | */ | 
|  | 2246 | depth = curr->lockdep_depth; | 
|  | 2247 | if (DEBUG_LOCKS_WARN_ON(!depth)) | 
|  | 2248 | return 0; | 
|  | 2249 |  | 
|  | 2250 | prev_hlock = NULL; | 
|  | 2251 | for (i = depth-1; i >= 0; i--) { | 
|  | 2252 | hlock = curr->held_locks + i; | 
|  | 2253 | /* | 
|  | 2254 | * We must not cross into another context: | 
|  | 2255 | */ | 
|  | 2256 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | 
|  | 2257 | break; | 
|  | 2258 | if (hlock->instance == lock) | 
|  | 2259 | goto found_it; | 
|  | 2260 | prev_hlock = hlock; | 
|  | 2261 | } | 
|  | 2262 | return print_unlock_inbalance_bug(curr, lock, ip); | 
|  | 2263 |  | 
|  | 2264 | found_it: | 
|  | 2265 | /* | 
|  | 2266 | * We have the right lock to unlock, 'hlock' points to it. | 
|  | 2267 | * Now we remove it from the stack, and add back the other | 
|  | 2268 | * entries (if any), recalculating the hash along the way: | 
|  | 2269 | */ | 
|  | 2270 | curr->lockdep_depth = i; | 
|  | 2271 | curr->curr_chain_key = hlock->prev_chain_key; | 
|  | 2272 |  | 
|  | 2273 | for (i++; i < depth; i++) { | 
|  | 2274 | hlock = curr->held_locks + i; | 
|  | 2275 | if (!__lock_acquire(hlock->instance, | 
|  | 2276 | hlock->class->subclass, hlock->trylock, | 
|  | 2277 | hlock->read, hlock->check, hlock->hardirqs_off, | 
|  | 2278 | hlock->acquire_ip)) | 
|  | 2279 | return 0; | 
|  | 2280 | } | 
|  | 2281 |  | 
|  | 2282 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) | 
|  | 2283 | return 0; | 
|  | 2284 | return 1; | 
|  | 2285 | } | 
|  | 2286 |  | 
|  | 2287 | /* | 
|  | 2288 | * Remove the lock to the list of currently held locks - this gets | 
|  | 2289 | * called on mutex_unlock()/spin_unlock*() (or on a failed | 
|  | 2290 | * mutex_lock_interruptible()). This is done for unlocks that nest | 
|  | 2291 | * perfectly. (i.e. the current top of the lock-stack is unlocked) | 
|  | 2292 | */ | 
|  | 2293 | static int lock_release_nested(struct task_struct *curr, | 
|  | 2294 | struct lockdep_map *lock, unsigned long ip) | 
|  | 2295 | { | 
|  | 2296 | struct held_lock *hlock; | 
|  | 2297 | unsigned int depth; | 
|  | 2298 |  | 
|  | 2299 | /* | 
|  | 2300 | * Pop off the top of the lock stack: | 
|  | 2301 | */ | 
|  | 2302 | depth = curr->lockdep_depth - 1; | 
|  | 2303 | hlock = curr->held_locks + depth; | 
|  | 2304 |  | 
|  | 2305 | /* | 
|  | 2306 | * Is the unlock non-nested: | 
|  | 2307 | */ | 
|  | 2308 | if (hlock->instance != lock) | 
|  | 2309 | return lock_release_non_nested(curr, lock, ip); | 
|  | 2310 | curr->lockdep_depth--; | 
|  | 2311 |  | 
|  | 2312 | if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) | 
|  | 2313 | return 0; | 
|  | 2314 |  | 
|  | 2315 | curr->curr_chain_key = hlock->prev_chain_key; | 
|  | 2316 |  | 
|  | 2317 | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | 2318 | hlock->prev_chain_key = 0; | 
|  | 2319 | hlock->class = NULL; | 
|  | 2320 | hlock->acquire_ip = 0; | 
|  | 2321 | hlock->irq_context = 0; | 
|  | 2322 | #endif | 
|  | 2323 | return 1; | 
|  | 2324 | } | 
|  | 2325 |  | 
|  | 2326 | /* | 
|  | 2327 | * Remove the lock to the list of currently held locks - this gets | 
|  | 2328 | * called on mutex_unlock()/spin_unlock*() (or on a failed | 
|  | 2329 | * mutex_lock_interruptible()). This is done for unlocks that nest | 
|  | 2330 | * perfectly. (i.e. the current top of the lock-stack is unlocked) | 
|  | 2331 | */ | 
|  | 2332 | static void | 
|  | 2333 | __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | 
|  | 2334 | { | 
|  | 2335 | struct task_struct *curr = current; | 
|  | 2336 |  | 
|  | 2337 | if (!check_unlock(curr, lock, ip)) | 
|  | 2338 | return; | 
|  | 2339 |  | 
|  | 2340 | if (nested) { | 
|  | 2341 | if (!lock_release_nested(curr, lock, ip)) | 
|  | 2342 | return; | 
|  | 2343 | } else { | 
|  | 2344 | if (!lock_release_non_nested(curr, lock, ip)) | 
|  | 2345 | return; | 
|  | 2346 | } | 
|  | 2347 |  | 
|  | 2348 | check_chain_key(curr); | 
|  | 2349 | } | 
|  | 2350 |  | 
|  | 2351 | /* | 
|  | 2352 | * Check whether we follow the irq-flags state precisely: | 
|  | 2353 | */ | 
|  | 2354 | static void check_flags(unsigned long flags) | 
|  | 2355 | { | 
|  | 2356 | #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) | 
|  | 2357 | if (!debug_locks) | 
|  | 2358 | return; | 
|  | 2359 |  | 
|  | 2360 | if (irqs_disabled_flags(flags)) | 
|  | 2361 | DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled); | 
|  | 2362 | else | 
|  | 2363 | DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled); | 
|  | 2364 |  | 
|  | 2365 | /* | 
|  | 2366 | * We dont accurately track softirq state in e.g. | 
|  | 2367 | * hardirq contexts (such as on 4KSTACKS), so only | 
|  | 2368 | * check if not in hardirq contexts: | 
|  | 2369 | */ | 
|  | 2370 | if (!hardirq_count()) { | 
|  | 2371 | if (softirq_count()) | 
|  | 2372 | DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); | 
|  | 2373 | else | 
|  | 2374 | DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); | 
|  | 2375 | } | 
|  | 2376 |  | 
|  | 2377 | if (!debug_locks) | 
|  | 2378 | print_irqtrace_events(current); | 
|  | 2379 | #endif | 
|  | 2380 | } | 
|  | 2381 |  | 
|  | 2382 | /* | 
|  | 2383 | * We are not always called with irqs disabled - do that here, | 
|  | 2384 | * and also avoid lockdep recursion: | 
|  | 2385 | */ | 
|  | 2386 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 
|  | 2387 | int trylock, int read, int check, unsigned long ip) | 
|  | 2388 | { | 
|  | 2389 | unsigned long flags; | 
|  | 2390 |  | 
|  | 2391 | if (unlikely(current->lockdep_recursion)) | 
|  | 2392 | return; | 
|  | 2393 |  | 
|  | 2394 | raw_local_irq_save(flags); | 
|  | 2395 | check_flags(flags); | 
|  | 2396 |  | 
|  | 2397 | current->lockdep_recursion = 1; | 
|  | 2398 | __lock_acquire(lock, subclass, trylock, read, check, | 
|  | 2399 | irqs_disabled_flags(flags), ip); | 
|  | 2400 | current->lockdep_recursion = 0; | 
|  | 2401 | raw_local_irq_restore(flags); | 
|  | 2402 | } | 
|  | 2403 |  | 
|  | 2404 | EXPORT_SYMBOL_GPL(lock_acquire); | 
|  | 2405 |  | 
|  | 2406 | void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | 
|  | 2407 | { | 
|  | 2408 | unsigned long flags; | 
|  | 2409 |  | 
|  | 2410 | if (unlikely(current->lockdep_recursion)) | 
|  | 2411 | return; | 
|  | 2412 |  | 
|  | 2413 | raw_local_irq_save(flags); | 
|  | 2414 | check_flags(flags); | 
|  | 2415 | current->lockdep_recursion = 1; | 
|  | 2416 | __lock_release(lock, nested, ip); | 
|  | 2417 | current->lockdep_recursion = 0; | 
|  | 2418 | raw_local_irq_restore(flags); | 
|  | 2419 | } | 
|  | 2420 |  | 
|  | 2421 | EXPORT_SYMBOL_GPL(lock_release); | 
|  | 2422 |  | 
|  | 2423 | /* | 
|  | 2424 | * Used by the testsuite, sanitize the validator state | 
|  | 2425 | * after a simulated failure: | 
|  | 2426 | */ | 
|  | 2427 |  | 
|  | 2428 | void lockdep_reset(void) | 
|  | 2429 | { | 
|  | 2430 | unsigned long flags; | 
|  | 2431 |  | 
|  | 2432 | raw_local_irq_save(flags); | 
|  | 2433 | current->curr_chain_key = 0; | 
|  | 2434 | current->lockdep_depth = 0; | 
|  | 2435 | current->lockdep_recursion = 0; | 
|  | 2436 | memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); | 
|  | 2437 | nr_hardirq_chains = 0; | 
|  | 2438 | nr_softirq_chains = 0; | 
|  | 2439 | nr_process_chains = 0; | 
|  | 2440 | debug_locks = 1; | 
|  | 2441 | raw_local_irq_restore(flags); | 
|  | 2442 | } | 
|  | 2443 |  | 
|  | 2444 | static void zap_class(struct lock_class *class) | 
|  | 2445 | { | 
|  | 2446 | int i; | 
|  | 2447 |  | 
|  | 2448 | /* | 
|  | 2449 | * Remove all dependencies this lock is | 
|  | 2450 | * involved in: | 
|  | 2451 | */ | 
|  | 2452 | for (i = 0; i < nr_list_entries; i++) { | 
|  | 2453 | if (list_entries[i].class == class) | 
|  | 2454 | list_del_rcu(&list_entries[i].entry); | 
|  | 2455 | } | 
|  | 2456 | /* | 
|  | 2457 | * Unhash the class and remove it from the all_lock_classes list: | 
|  | 2458 | */ | 
|  | 2459 | list_del_rcu(&class->hash_entry); | 
|  | 2460 | list_del_rcu(&class->lock_entry); | 
|  | 2461 |  | 
|  | 2462 | } | 
|  | 2463 |  | 
|  | 2464 | static inline int within(void *addr, void *start, unsigned long size) | 
|  | 2465 | { | 
|  | 2466 | return addr >= start && addr < start + size; | 
|  | 2467 | } | 
|  | 2468 |  | 
|  | 2469 | void lockdep_free_key_range(void *start, unsigned long size) | 
|  | 2470 | { | 
|  | 2471 | struct lock_class *class, *next; | 
|  | 2472 | struct list_head *head; | 
|  | 2473 | unsigned long flags; | 
|  | 2474 | int i; | 
|  | 2475 |  | 
|  | 2476 | raw_local_irq_save(flags); | 
|  | 2477 | __raw_spin_lock(&hash_lock); | 
|  | 2478 |  | 
|  | 2479 | /* | 
|  | 2480 | * Unhash all classes that were created by this module: | 
|  | 2481 | */ | 
|  | 2482 | for (i = 0; i < CLASSHASH_SIZE; i++) { | 
|  | 2483 | head = classhash_table + i; | 
|  | 2484 | if (list_empty(head)) | 
|  | 2485 | continue; | 
|  | 2486 | list_for_each_entry_safe(class, next, head, hash_entry) | 
|  | 2487 | if (within(class->key, start, size)) | 
|  | 2488 | zap_class(class); | 
|  | 2489 | } | 
|  | 2490 |  | 
|  | 2491 | __raw_spin_unlock(&hash_lock); | 
|  | 2492 | raw_local_irq_restore(flags); | 
|  | 2493 | } | 
|  | 2494 |  | 
|  | 2495 | void lockdep_reset_lock(struct lockdep_map *lock) | 
|  | 2496 | { | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2497 | struct lock_class *class, *next; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2498 | struct list_head *head; | 
|  | 2499 | unsigned long flags; | 
|  | 2500 | int i, j; | 
|  | 2501 |  | 
|  | 2502 | raw_local_irq_save(flags); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2503 |  | 
|  | 2504 | /* | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2505 | * Remove all classes this lock might have: | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2506 | */ | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2507 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | 
|  | 2508 | /* | 
|  | 2509 | * If the class exists we look it up and zap it: | 
|  | 2510 | */ | 
|  | 2511 | class = look_up_lock_class(lock, j); | 
|  | 2512 | if (class) | 
|  | 2513 | zap_class(class); | 
|  | 2514 | } | 
|  | 2515 | /* | 
|  | 2516 | * Debug check: in the end all mapped classes should | 
|  | 2517 | * be gone. | 
|  | 2518 | */ | 
|  | 2519 | __raw_spin_lock(&hash_lock); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2520 | for (i = 0; i < CLASSHASH_SIZE; i++) { | 
|  | 2521 | head = classhash_table + i; | 
|  | 2522 | if (list_empty(head)) | 
|  | 2523 | continue; | 
|  | 2524 | list_for_each_entry_safe(class, next, head, hash_entry) { | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2525 | if (unlikely(class == lock->class_cache)) { | 
|  | 2526 | __raw_spin_unlock(&hash_lock); | 
|  | 2527 | DEBUG_LOCKS_WARN_ON(1); | 
|  | 2528 | goto out_restore; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2529 | } | 
|  | 2530 | } | 
|  | 2531 | } | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2532 | __raw_spin_unlock(&hash_lock); | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2533 |  | 
|  | 2534 | out_restore: | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2535 | raw_local_irq_restore(flags); | 
|  | 2536 | } | 
|  | 2537 |  | 
|  | 2538 | void __init lockdep_init(void) | 
|  | 2539 | { | 
|  | 2540 | int i; | 
|  | 2541 |  | 
|  | 2542 | /* | 
|  | 2543 | * Some architectures have their own start_kernel() | 
|  | 2544 | * code which calls lockdep_init(), while we also | 
|  | 2545 | * call lockdep_init() from the start_kernel() itself, | 
|  | 2546 | * and we want to initialize the hashes only once: | 
|  | 2547 | */ | 
|  | 2548 | if (lockdep_initialized) | 
|  | 2549 | return; | 
|  | 2550 |  | 
|  | 2551 | for (i = 0; i < CLASSHASH_SIZE; i++) | 
|  | 2552 | INIT_LIST_HEAD(classhash_table + i); | 
|  | 2553 |  | 
|  | 2554 | for (i = 0; i < CHAINHASH_SIZE; i++) | 
|  | 2555 | INIT_LIST_HEAD(chainhash_table + i); | 
|  | 2556 |  | 
|  | 2557 | lockdep_initialized = 1; | 
|  | 2558 | } | 
|  | 2559 |  | 
|  | 2560 | void __init lockdep_info(void) | 
|  | 2561 | { | 
|  | 2562 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); | 
|  | 2563 |  | 
|  | 2564 | printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES); | 
|  | 2565 | printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH); | 
|  | 2566 | printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS); | 
|  | 2567 | printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE); | 
|  | 2568 | printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES); | 
|  | 2569 | printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS); | 
|  | 2570 | printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE); | 
|  | 2571 |  | 
|  | 2572 | printk(" memory used by lock dependency info: %lu kB\n", | 
|  | 2573 | (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + | 
|  | 2574 | sizeof(struct list_head) * CLASSHASH_SIZE + | 
|  | 2575 | sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + | 
|  | 2576 | sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + | 
|  | 2577 | sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); | 
|  | 2578 |  | 
|  | 2579 | printk(" per task-struct memory footprint: %lu bytes\n", | 
|  | 2580 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); | 
|  | 2581 |  | 
|  | 2582 | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | 2583 | if (lockdep_init_error) | 
|  | 2584 | printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n"); | 
|  | 2585 | #endif | 
|  | 2586 | } | 
|  | 2587 |  | 
|  | 2588 | static inline int in_range(const void *start, const void *addr, const void *end) | 
|  | 2589 | { | 
|  | 2590 | return addr >= start && addr <= end; | 
|  | 2591 | } | 
|  | 2592 |  | 
|  | 2593 | static void | 
|  | 2594 | print_freed_lock_bug(struct task_struct *curr, const void *mem_from, | 
| Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 2595 | const void *mem_to, struct held_lock *hlock) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2596 | { | 
|  | 2597 | if (!debug_locks_off()) | 
|  | 2598 | return; | 
|  | 2599 | if (debug_locks_silent) | 
|  | 2600 | return; | 
|  | 2601 |  | 
|  | 2602 | printk("\n=========================\n"); | 
|  | 2603 | printk(  "[ BUG: held lock freed! ]\n"); | 
|  | 2604 | printk(  "-------------------------\n"); | 
|  | 2605 | printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", | 
|  | 2606 | curr->comm, curr->pid, mem_from, mem_to-1); | 
| Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 2607 | print_lock(hlock); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2608 | lockdep_print_held_locks(curr); | 
|  | 2609 |  | 
|  | 2610 | printk("\nstack backtrace:\n"); | 
|  | 2611 | dump_stack(); | 
|  | 2612 | } | 
|  | 2613 |  | 
|  | 2614 | /* | 
|  | 2615 | * Called when kernel memory is freed (or unmapped), or if a lock | 
|  | 2616 | * is destroyed or reinitialized - this code checks whether there is | 
|  | 2617 | * any held lock in the memory range of <from> to <to>: | 
|  | 2618 | */ | 
|  | 2619 | void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | 
|  | 2620 | { | 
|  | 2621 | const void *mem_to = mem_from + mem_len, *lock_from, *lock_to; | 
|  | 2622 | struct task_struct *curr = current; | 
|  | 2623 | struct held_lock *hlock; | 
|  | 2624 | unsigned long flags; | 
|  | 2625 | int i; | 
|  | 2626 |  | 
|  | 2627 | if (unlikely(!debug_locks)) | 
|  | 2628 | return; | 
|  | 2629 |  | 
|  | 2630 | local_irq_save(flags); | 
|  | 2631 | for (i = 0; i < curr->lockdep_depth; i++) { | 
|  | 2632 | hlock = curr->held_locks + i; | 
|  | 2633 |  | 
|  | 2634 | lock_from = (void *)hlock->instance; | 
|  | 2635 | lock_to = (void *)(hlock->instance + 1); | 
|  | 2636 |  | 
|  | 2637 | if (!in_range(mem_from, lock_from, mem_to) && | 
|  | 2638 | !in_range(mem_from, lock_to, mem_to)) | 
|  | 2639 | continue; | 
|  | 2640 |  | 
| Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 2641 | print_freed_lock_bug(curr, mem_from, mem_to, hlock); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2642 | break; | 
|  | 2643 | } | 
|  | 2644 | local_irq_restore(flags); | 
|  | 2645 | } | 
|  | 2646 |  | 
|  | 2647 | static void print_held_locks_bug(struct task_struct *curr) | 
|  | 2648 | { | 
|  | 2649 | if (!debug_locks_off()) | 
|  | 2650 | return; | 
|  | 2651 | if (debug_locks_silent) | 
|  | 2652 | return; | 
|  | 2653 |  | 
|  | 2654 | printk("\n=====================================\n"); | 
|  | 2655 | printk(  "[ BUG: lock held at task exit time! ]\n"); | 
|  | 2656 | printk(  "-------------------------------------\n"); | 
|  | 2657 | printk("%s/%d is exiting with locks still held!\n", | 
|  | 2658 | curr->comm, curr->pid); | 
|  | 2659 | lockdep_print_held_locks(curr); | 
|  | 2660 |  | 
|  | 2661 | printk("\nstack backtrace:\n"); | 
|  | 2662 | dump_stack(); | 
|  | 2663 | } | 
|  | 2664 |  | 
|  | 2665 | void debug_check_no_locks_held(struct task_struct *task) | 
|  | 2666 | { | 
|  | 2667 | if (unlikely(task->lockdep_depth > 0)) | 
|  | 2668 | print_held_locks_bug(task); | 
|  | 2669 | } | 
|  | 2670 |  | 
|  | 2671 | void debug_show_all_locks(void) | 
|  | 2672 | { | 
|  | 2673 | struct task_struct *g, *p; | 
|  | 2674 | int count = 10; | 
|  | 2675 | int unlock = 1; | 
|  | 2676 |  | 
|  | 2677 | printk("\nShowing all locks held in the system:\n"); | 
|  | 2678 |  | 
|  | 2679 | /* | 
|  | 2680 | * Here we try to get the tasklist_lock as hard as possible, | 
|  | 2681 | * if not successful after 2 seconds we ignore it (but keep | 
|  | 2682 | * trying). This is to enable a debug printout even if a | 
|  | 2683 | * tasklist_lock-holding task deadlocks or crashes. | 
|  | 2684 | */ | 
|  | 2685 | retry: | 
|  | 2686 | if (!read_trylock(&tasklist_lock)) { | 
|  | 2687 | if (count == 10) | 
|  | 2688 | printk("hm, tasklist_lock locked, retrying... "); | 
|  | 2689 | if (count) { | 
|  | 2690 | count--; | 
|  | 2691 | printk(" #%d", 10-count); | 
|  | 2692 | mdelay(200); | 
|  | 2693 | goto retry; | 
|  | 2694 | } | 
|  | 2695 | printk(" ignoring it.\n"); | 
|  | 2696 | unlock = 0; | 
|  | 2697 | } | 
|  | 2698 | if (count != 10) | 
|  | 2699 | printk(" locked it.\n"); | 
|  | 2700 |  | 
|  | 2701 | do_each_thread(g, p) { | 
|  | 2702 | if (p->lockdep_depth) | 
|  | 2703 | lockdep_print_held_locks(p); | 
|  | 2704 | if (!unlock) | 
|  | 2705 | if (read_trylock(&tasklist_lock)) | 
|  | 2706 | unlock = 1; | 
|  | 2707 | } while_each_thread(g, p); | 
|  | 2708 |  | 
|  | 2709 | printk("\n"); | 
|  | 2710 | printk("=============================================\n\n"); | 
|  | 2711 |  | 
|  | 2712 | if (unlock) | 
|  | 2713 | read_unlock(&tasklist_lock); | 
|  | 2714 | } | 
|  | 2715 |  | 
|  | 2716 | EXPORT_SYMBOL_GPL(debug_show_all_locks); | 
|  | 2717 |  | 
|  | 2718 | void debug_show_held_locks(struct task_struct *task) | 
|  | 2719 | { | 
|  | 2720 | lockdep_print_held_locks(task); | 
|  | 2721 | } | 
|  | 2722 |  | 
|  | 2723 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 
|  | 2724 |  |