| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * kernel/lockdep.c | 
 | 3 |  * | 
 | 4 |  * Runtime locking correctness validator | 
 | 5 |  * | 
 | 6 |  * Started by Ingo Molnar: | 
 | 7 |  * | 
 | 8 |  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 9 |  * | 
 | 10 |  * this code maps all the lock dependencies as they occur in a live kernel | 
 | 11 |  * and will warn about the following classes of locking bugs: | 
 | 12 |  * | 
 | 13 |  * - lock inversion scenarios | 
 | 14 |  * - circular lock dependencies | 
 | 15 |  * - hardirq/softirq safe/unsafe locking bugs | 
 | 16 |  * | 
 | 17 |  * Bugs are reported even if the current locking scenario does not cause | 
 | 18 |  * any deadlock at this point. | 
 | 19 |  * | 
 | 20 |  * I.e. if anytime in the past two locks were taken in a different order, | 
 | 21 |  * even if it happened for another task, even if those were different | 
 | 22 |  * locks (but of the same class as this lock), this code will detect it. | 
 | 23 |  * | 
 | 24 |  * Thanks to Arjan van de Ven for coming up with the initial idea of | 
 | 25 |  * mapping lock dependencies runtime. | 
 | 26 |  */ | 
 | 27 | #include <linux/mutex.h> | 
 | 28 | #include <linux/sched.h> | 
 | 29 | #include <linux/delay.h> | 
 | 30 | #include <linux/module.h> | 
 | 31 | #include <linux/proc_fs.h> | 
 | 32 | #include <linux/seq_file.h> | 
 | 33 | #include <linux/spinlock.h> | 
 | 34 | #include <linux/kallsyms.h> | 
 | 35 | #include <linux/interrupt.h> | 
 | 36 | #include <linux/stacktrace.h> | 
 | 37 | #include <linux/debug_locks.h> | 
 | 38 | #include <linux/irqflags.h> | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 39 | #include <linux/utsname.h> | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 40 |  | 
 | 41 | #include <asm/sections.h> | 
 | 42 |  | 
 | 43 | #include "lockdep_internals.h" | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * hash_lock: protects the lockdep hashes and class/list/hash allocators. | 
 | 47 |  * | 
 | 48 |  * This is one of the rare exceptions where it's justified | 
 | 49 |  * to use a raw spinlock - we really dont want the spinlock | 
 | 50 |  * code to recurse back into the lockdep code. | 
 | 51 |  */ | 
 | 52 | static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 
 | 53 |  | 
 | 54 | static int lockdep_initialized; | 
 | 55 |  | 
 | 56 | unsigned long nr_list_entries; | 
 | 57 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | 
 | 58 |  | 
 | 59 | /* | 
 | 60 |  * Allocate a lockdep entry. (assumes hash_lock held, returns | 
 | 61 |  * with NULL on failure) | 
 | 62 |  */ | 
 | 63 | static struct lock_list *alloc_list_entry(void) | 
 | 64 | { | 
 | 65 | 	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { | 
 | 66 | 		__raw_spin_unlock(&hash_lock); | 
 | 67 | 		debug_locks_off(); | 
 | 68 | 		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); | 
 | 69 | 		printk("turning off the locking correctness validator.\n"); | 
 | 70 | 		return NULL; | 
 | 71 | 	} | 
 | 72 | 	return list_entries + nr_list_entries++; | 
 | 73 | } | 
 | 74 |  | 
 | 75 | /* | 
 | 76 |  * All data structures here are protected by the global debug_lock. | 
 | 77 |  * | 
 | 78 |  * Mutex key structs only get allocated, once during bootup, and never | 
 | 79 |  * get freed - this significantly simplifies the debugging code. | 
 | 80 |  */ | 
 | 81 | unsigned long nr_lock_classes; | 
 | 82 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 
 | 83 |  | 
 | 84 | /* | 
 | 85 |  * We keep a global list of all lock classes. The list only grows, | 
 | 86 |  * never shrinks. The list is only accessed with the lockdep | 
 | 87 |  * spinlock lock held. | 
 | 88 |  */ | 
 | 89 | LIST_HEAD(all_lock_classes); | 
 | 90 |  | 
 | 91 | /* | 
 | 92 |  * The lockdep classes are in a hash-table as well, for fast lookup: | 
 | 93 |  */ | 
 | 94 | #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1) | 
 | 95 | #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS) | 
 | 96 | #define CLASSHASH_MASK		(CLASSHASH_SIZE - 1) | 
 | 97 | #define __classhashfn(key)	((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK) | 
 | 98 | #define classhashentry(key)	(classhash_table + __classhashfn((key))) | 
 | 99 |  | 
 | 100 | static struct list_head classhash_table[CLASSHASH_SIZE]; | 
 | 101 |  | 
 | 102 | unsigned long nr_lock_chains; | 
 | 103 | static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; | 
 | 104 |  | 
 | 105 | /* | 
 | 106 |  * We put the lock dependency chains into a hash-table as well, to cache | 
 | 107 |  * their existence: | 
 | 108 |  */ | 
 | 109 | #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1) | 
 | 110 | #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS) | 
 | 111 | #define CHAINHASH_MASK		(CHAINHASH_SIZE - 1) | 
 | 112 | #define __chainhashfn(chain) \ | 
 | 113 | 		(((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK) | 
 | 114 | #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain))) | 
 | 115 |  | 
 | 116 | static struct list_head chainhash_table[CHAINHASH_SIZE]; | 
 | 117 |  | 
 | 118 | /* | 
 | 119 |  * The hash key of the lock dependency chains is a hash itself too: | 
 | 120 |  * it's a hash of all locks taken up to that lock, including that lock. | 
 | 121 |  * It's a 64-bit hash, because it's important for the keys to be | 
 | 122 |  * unique. | 
 | 123 |  */ | 
 | 124 | #define iterate_chain_key(key1, key2) \ | 
| Ingo Molnar | 03cbc35 | 2006-09-29 02:01:46 -0700 | [diff] [blame] | 125 | 	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ | 
 | 126 | 	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 127 | 	(key2)) | 
 | 128 |  | 
 | 129 | void lockdep_off(void) | 
 | 130 | { | 
 | 131 | 	current->lockdep_recursion++; | 
 | 132 | } | 
 | 133 |  | 
 | 134 | EXPORT_SYMBOL(lockdep_off); | 
 | 135 |  | 
 | 136 | void lockdep_on(void) | 
 | 137 | { | 
 | 138 | 	current->lockdep_recursion--; | 
 | 139 | } | 
 | 140 |  | 
 | 141 | EXPORT_SYMBOL(lockdep_on); | 
 | 142 |  | 
 | 143 | int lockdep_internal(void) | 
 | 144 | { | 
 | 145 | 	return current->lockdep_recursion != 0; | 
 | 146 | } | 
 | 147 |  | 
 | 148 | EXPORT_SYMBOL(lockdep_internal); | 
 | 149 |  | 
 | 150 | /* | 
 | 151 |  * Debugging switches: | 
 | 152 |  */ | 
 | 153 |  | 
 | 154 | #define VERBOSE			0 | 
 | 155 | #ifdef VERBOSE | 
 | 156 | # define VERY_VERBOSE		0 | 
 | 157 | #endif | 
 | 158 |  | 
 | 159 | #if VERBOSE | 
 | 160 | # define HARDIRQ_VERBOSE	1 | 
 | 161 | # define SOFTIRQ_VERBOSE	1 | 
 | 162 | #else | 
 | 163 | # define HARDIRQ_VERBOSE	0 | 
 | 164 | # define SOFTIRQ_VERBOSE	0 | 
 | 165 | #endif | 
 | 166 |  | 
 | 167 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | 
 | 168 | /* | 
 | 169 |  * Quick filtering for interesting events: | 
 | 170 |  */ | 
 | 171 | static int class_filter(struct lock_class *class) | 
 | 172 | { | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 173 | #if 0 | 
 | 174 | 	/* Example */ | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 175 | 	if (class->name_version == 1 && | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 176 | 			!strcmp(class->name, "lockname")) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 177 | 		return 1; | 
 | 178 | 	if (class->name_version == 1 && | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 179 | 			!strcmp(class->name, "&struct->lockfield")) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 180 | 		return 1; | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 181 | #endif | 
 | 182 | 	/* Allow everything else. 0 would be filter everything else */ | 
 | 183 | 	return 1; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 184 | } | 
 | 185 | #endif | 
 | 186 |  | 
 | 187 | static int verbose(struct lock_class *class) | 
 | 188 | { | 
 | 189 | #if VERBOSE | 
 | 190 | 	return class_filter(class); | 
 | 191 | #endif | 
 | 192 | 	return 0; | 
 | 193 | } | 
 | 194 |  | 
 | 195 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 196 |  | 
 | 197 | static int hardirq_verbose(struct lock_class *class) | 
 | 198 | { | 
 | 199 | #if HARDIRQ_VERBOSE | 
 | 200 | 	return class_filter(class); | 
 | 201 | #endif | 
 | 202 | 	return 0; | 
 | 203 | } | 
 | 204 |  | 
 | 205 | static int softirq_verbose(struct lock_class *class) | 
 | 206 | { | 
 | 207 | #if SOFTIRQ_VERBOSE | 
 | 208 | 	return class_filter(class); | 
 | 209 | #endif | 
 | 210 | 	return 0; | 
 | 211 | } | 
 | 212 |  | 
 | 213 | #endif | 
 | 214 |  | 
 | 215 | /* | 
 | 216 |  * Stack-trace: tightly packed array of stack backtrace | 
 | 217 |  * addresses. Protected by the hash_lock. | 
 | 218 |  */ | 
 | 219 | unsigned long nr_stack_trace_entries; | 
 | 220 | static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; | 
 | 221 |  | 
 | 222 | static int save_trace(struct stack_trace *trace) | 
 | 223 | { | 
 | 224 | 	trace->nr_entries = 0; | 
 | 225 | 	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; | 
 | 226 | 	trace->entries = stack_trace + nr_stack_trace_entries; | 
 | 227 |  | 
| Andi Kleen | 5a1b399 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 228 | 	trace->skip = 3; | 
 | 229 | 	trace->all_contexts = 0; | 
 | 230 |  | 
| Andi Kleen | 3fa7c79 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 231 | 	/* Make sure to not recurse in case the the unwinder needs to tak | 
 | 232 | e	   locks. */ | 
 | 233 | 	lockdep_off(); | 
| Andi Kleen | 5a1b399 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 234 | 	save_stack_trace(trace, NULL); | 
| Andi Kleen | 3fa7c79 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 235 | 	lockdep_on(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 236 |  | 
 | 237 | 	trace->max_entries = trace->nr_entries; | 
 | 238 |  | 
 | 239 | 	nr_stack_trace_entries += trace->nr_entries; | 
 | 240 | 	if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) | 
 | 241 | 		return 0; | 
 | 242 |  | 
 | 243 | 	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { | 
 | 244 | 		__raw_spin_unlock(&hash_lock); | 
 | 245 | 		if (debug_locks_off()) { | 
 | 246 | 			printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); | 
 | 247 | 			printk("turning off the locking correctness validator.\n"); | 
 | 248 | 			dump_stack(); | 
 | 249 | 		} | 
 | 250 | 		return 0; | 
 | 251 | 	} | 
 | 252 |  | 
 | 253 | 	return 1; | 
 | 254 | } | 
 | 255 |  | 
 | 256 | unsigned int nr_hardirq_chains; | 
 | 257 | unsigned int nr_softirq_chains; | 
 | 258 | unsigned int nr_process_chains; | 
 | 259 | unsigned int max_lockdep_depth; | 
 | 260 | unsigned int max_recursion_depth; | 
 | 261 |  | 
 | 262 | #ifdef CONFIG_DEBUG_LOCKDEP | 
 | 263 | /* | 
 | 264 |  * We cannot printk in early bootup code. Not even early_printk() | 
 | 265 |  * might work. So we mark any initialization errors and printk | 
 | 266 |  * about it later on, in lockdep_info(). | 
 | 267 |  */ | 
 | 268 | static int lockdep_init_error; | 
 | 269 |  | 
 | 270 | /* | 
 | 271 |  * Various lockdep statistics: | 
 | 272 |  */ | 
 | 273 | atomic_t chain_lookup_hits; | 
 | 274 | atomic_t chain_lookup_misses; | 
 | 275 | atomic_t hardirqs_on_events; | 
 | 276 | atomic_t hardirqs_off_events; | 
 | 277 | atomic_t redundant_hardirqs_on; | 
 | 278 | atomic_t redundant_hardirqs_off; | 
 | 279 | atomic_t softirqs_on_events; | 
 | 280 | atomic_t softirqs_off_events; | 
 | 281 | atomic_t redundant_softirqs_on; | 
 | 282 | atomic_t redundant_softirqs_off; | 
 | 283 | atomic_t nr_unused_locks; | 
 | 284 | atomic_t nr_cyclic_checks; | 
 | 285 | atomic_t nr_cyclic_check_recursions; | 
 | 286 | atomic_t nr_find_usage_forwards_checks; | 
 | 287 | atomic_t nr_find_usage_forwards_recursions; | 
 | 288 | atomic_t nr_find_usage_backwards_checks; | 
 | 289 | atomic_t nr_find_usage_backwards_recursions; | 
 | 290 | # define debug_atomic_inc(ptr)		atomic_inc(ptr) | 
 | 291 | # define debug_atomic_dec(ptr)		atomic_dec(ptr) | 
 | 292 | # define debug_atomic_read(ptr)		atomic_read(ptr) | 
 | 293 | #else | 
 | 294 | # define debug_atomic_inc(ptr)		do { } while (0) | 
 | 295 | # define debug_atomic_dec(ptr)		do { } while (0) | 
 | 296 | # define debug_atomic_read(ptr)		0 | 
 | 297 | #endif | 
 | 298 |  | 
 | 299 | /* | 
 | 300 |  * Locking printouts: | 
 | 301 |  */ | 
 | 302 |  | 
 | 303 | static const char *usage_str[] = | 
 | 304 | { | 
 | 305 | 	[LOCK_USED] =			"initial-use ", | 
 | 306 | 	[LOCK_USED_IN_HARDIRQ] =	"in-hardirq-W", | 
 | 307 | 	[LOCK_USED_IN_SOFTIRQ] =	"in-softirq-W", | 
 | 308 | 	[LOCK_ENABLED_SOFTIRQS] =	"softirq-on-W", | 
 | 309 | 	[LOCK_ENABLED_HARDIRQS] =	"hardirq-on-W", | 
 | 310 | 	[LOCK_USED_IN_HARDIRQ_READ] =	"in-hardirq-R", | 
 | 311 | 	[LOCK_USED_IN_SOFTIRQ_READ] =	"in-softirq-R", | 
 | 312 | 	[LOCK_ENABLED_SOFTIRQS_READ] =	"softirq-on-R", | 
 | 313 | 	[LOCK_ENABLED_HARDIRQS_READ] =	"hardirq-on-R", | 
 | 314 | }; | 
 | 315 |  | 
 | 316 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | 
 | 317 | { | 
 | 318 | 	unsigned long offs, size; | 
 | 319 | 	char *modname; | 
 | 320 |  | 
 | 321 | 	return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str); | 
 | 322 | } | 
 | 323 |  | 
 | 324 | void | 
 | 325 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | 
 | 326 | { | 
 | 327 | 	*c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | 
 | 328 |  | 
 | 329 | 	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | 
 | 330 | 		*c1 = '+'; | 
 | 331 | 	else | 
 | 332 | 		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | 
 | 333 | 			*c1 = '-'; | 
 | 334 |  | 
 | 335 | 	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 
 | 336 | 		*c2 = '+'; | 
 | 337 | 	else | 
 | 338 | 		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | 
 | 339 | 			*c2 = '-'; | 
 | 340 |  | 
 | 341 | 	if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 
 | 342 | 		*c3 = '-'; | 
 | 343 | 	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { | 
 | 344 | 		*c3 = '+'; | 
 | 345 | 		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 
 | 346 | 			*c3 = '?'; | 
 | 347 | 	} | 
 | 348 |  | 
 | 349 | 	if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 
 | 350 | 		*c4 = '-'; | 
 | 351 | 	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { | 
 | 352 | 		*c4 = '+'; | 
 | 353 | 		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 
 | 354 | 			*c4 = '?'; | 
 | 355 | 	} | 
 | 356 | } | 
 | 357 |  | 
 | 358 | static void print_lock_name(struct lock_class *class) | 
 | 359 | { | 
 | 360 | 	char str[128], c1, c2, c3, c4; | 
 | 361 | 	const char *name; | 
 | 362 |  | 
 | 363 | 	get_usage_chars(class, &c1, &c2, &c3, &c4); | 
 | 364 |  | 
 | 365 | 	name = class->name; | 
 | 366 | 	if (!name) { | 
 | 367 | 		name = __get_key_name(class->key, str); | 
 | 368 | 		printk(" (%s", name); | 
 | 369 | 	} else { | 
 | 370 | 		printk(" (%s", name); | 
 | 371 | 		if (class->name_version > 1) | 
 | 372 | 			printk("#%d", class->name_version); | 
 | 373 | 		if (class->subclass) | 
 | 374 | 			printk("/%d", class->subclass); | 
 | 375 | 	} | 
 | 376 | 	printk("){%c%c%c%c}", c1, c2, c3, c4); | 
 | 377 | } | 
 | 378 |  | 
 | 379 | static void print_lockdep_cache(struct lockdep_map *lock) | 
 | 380 | { | 
 | 381 | 	const char *name; | 
 | 382 | 	char str[128]; | 
 | 383 |  | 
 | 384 | 	name = lock->name; | 
 | 385 | 	if (!name) | 
 | 386 | 		name = __get_key_name(lock->key->subkeys, str); | 
 | 387 |  | 
 | 388 | 	printk("%s", name); | 
 | 389 | } | 
 | 390 |  | 
 | 391 | static void print_lock(struct held_lock *hlock) | 
 | 392 | { | 
 | 393 | 	print_lock_name(hlock->class); | 
 | 394 | 	printk(", at: "); | 
 | 395 | 	print_ip_sym(hlock->acquire_ip); | 
 | 396 | } | 
 | 397 |  | 
 | 398 | static void lockdep_print_held_locks(struct task_struct *curr) | 
 | 399 | { | 
 | 400 | 	int i, depth = curr->lockdep_depth; | 
 | 401 |  | 
 | 402 | 	if (!depth) { | 
 | 403 | 		printk("no locks held by %s/%d.\n", curr->comm, curr->pid); | 
 | 404 | 		return; | 
 | 405 | 	} | 
 | 406 | 	printk("%d lock%s held by %s/%d:\n", | 
 | 407 | 		depth, depth > 1 ? "s" : "", curr->comm, curr->pid); | 
 | 408 |  | 
 | 409 | 	for (i = 0; i < depth; i++) { | 
 | 410 | 		printk(" #%d: ", i); | 
 | 411 | 		print_lock(curr->held_locks + i); | 
 | 412 | 	} | 
 | 413 | } | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 414 |  | 
 | 415 | static void print_lock_class_header(struct lock_class *class, int depth) | 
 | 416 | { | 
 | 417 | 	int bit; | 
 | 418 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 419 | 	printk("%*s->", depth, ""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 420 | 	print_lock_name(class); | 
 | 421 | 	printk(" ops: %lu", class->ops); | 
 | 422 | 	printk(" {\n"); | 
 | 423 |  | 
 | 424 | 	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { | 
 | 425 | 		if (class->usage_mask & (1 << bit)) { | 
 | 426 | 			int len = depth; | 
 | 427 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 428 | 			len += printk("%*s   %s", depth, "", usage_str[bit]); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 429 | 			len += printk(" at:\n"); | 
 | 430 | 			print_stack_trace(class->usage_traces + bit, len); | 
 | 431 | 		} | 
 | 432 | 	} | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 433 | 	printk("%*s }\n", depth, ""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 434 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 435 | 	printk("%*s ... key      at: ",depth,""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 436 | 	print_ip_sym((unsigned long)class->key); | 
 | 437 | } | 
 | 438 |  | 
 | 439 | /* | 
 | 440 |  * printk all lock dependencies starting at <entry>: | 
 | 441 |  */ | 
 | 442 | static void print_lock_dependencies(struct lock_class *class, int depth) | 
 | 443 | { | 
 | 444 | 	struct lock_list *entry; | 
 | 445 |  | 
 | 446 | 	if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | 
 | 447 | 		return; | 
 | 448 |  | 
 | 449 | 	print_lock_class_header(class, depth); | 
 | 450 |  | 
 | 451 | 	list_for_each_entry(entry, &class->locks_after, entry) { | 
 | 452 | 		DEBUG_LOCKS_WARN_ON(!entry->class); | 
 | 453 | 		print_lock_dependencies(entry->class, depth + 1); | 
 | 454 |  | 
| Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 455 | 		printk("%*s ... acquired at:\n",depth,""); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 456 | 		print_stack_trace(&entry->trace, 2); | 
 | 457 | 		printk("\n"); | 
 | 458 | 	} | 
 | 459 | } | 
 | 460 |  | 
 | 461 | /* | 
 | 462 |  * Add a new dependency to the head of the list: | 
 | 463 |  */ | 
 | 464 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | 
 | 465 | 			    struct list_head *head, unsigned long ip) | 
 | 466 | { | 
 | 467 | 	struct lock_list *entry; | 
 | 468 | 	/* | 
 | 469 | 	 * Lock not present yet - get a new dependency struct and | 
 | 470 | 	 * add it to the list: | 
 | 471 | 	 */ | 
 | 472 | 	entry = alloc_list_entry(); | 
 | 473 | 	if (!entry) | 
 | 474 | 		return 0; | 
 | 475 |  | 
 | 476 | 	entry->class = this; | 
 | 477 | 	save_trace(&entry->trace); | 
 | 478 |  | 
 | 479 | 	/* | 
 | 480 | 	 * Since we never remove from the dependency list, the list can | 
 | 481 | 	 * be walked lockless by other CPUs, it's only allocation | 
 | 482 | 	 * that must be protected by the spinlock. But this also means | 
 | 483 | 	 * we must make new entries visible only once writes to the | 
 | 484 | 	 * entry become visible - hence the RCU op: | 
 | 485 | 	 */ | 
 | 486 | 	list_add_tail_rcu(&entry->entry, head); | 
 | 487 |  | 
 | 488 | 	return 1; | 
 | 489 | } | 
 | 490 |  | 
 | 491 | /* | 
 | 492 |  * Recursive, forwards-direction lock-dependency checking, used for | 
 | 493 |  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe | 
 | 494 |  * checking. | 
 | 495 |  * | 
 | 496 |  * (to keep the stackframe of the recursive functions small we | 
 | 497 |  *  use these global variables, and we also mark various helper | 
 | 498 |  *  functions as noinline.) | 
 | 499 |  */ | 
 | 500 | static struct held_lock *check_source, *check_target; | 
 | 501 |  | 
 | 502 | /* | 
 | 503 |  * Print a dependency chain entry (this is only done when a deadlock | 
 | 504 |  * has been detected): | 
 | 505 |  */ | 
 | 506 | static noinline int | 
 | 507 | print_circular_bug_entry(struct lock_list *target, unsigned int depth) | 
 | 508 | { | 
 | 509 | 	if (debug_locks_silent) | 
 | 510 | 		return 0; | 
 | 511 | 	printk("\n-> #%u", depth); | 
 | 512 | 	print_lock_name(target->class); | 
 | 513 | 	printk(":\n"); | 
 | 514 | 	print_stack_trace(&target->trace, 6); | 
 | 515 |  | 
 | 516 | 	return 0; | 
 | 517 | } | 
 | 518 |  | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 519 | static void print_kernel_version(void) | 
 | 520 | { | 
| Serge E. Hallyn | 96b644b | 2006-10-02 02:18:13 -0700 | [diff] [blame] | 521 | 	printk("%s %.*s\n", init_utsname()->release, | 
 | 522 | 		(int)strcspn(init_utsname()->version, " "), | 
 | 523 | 		init_utsname()->version); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 524 | } | 
 | 525 |  | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 526 | /* | 
 | 527 |  * When a circular dependency is detected, print the | 
 | 528 |  * header first: | 
 | 529 |  */ | 
 | 530 | static noinline int | 
 | 531 | print_circular_bug_header(struct lock_list *entry, unsigned int depth) | 
 | 532 | { | 
 | 533 | 	struct task_struct *curr = current; | 
 | 534 |  | 
 | 535 | 	__raw_spin_unlock(&hash_lock); | 
 | 536 | 	debug_locks_off(); | 
 | 537 | 	if (debug_locks_silent) | 
 | 538 | 		return 0; | 
 | 539 |  | 
 | 540 | 	printk("\n=======================================================\n"); | 
 | 541 | 	printk(  "[ INFO: possible circular locking dependency detected ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 542 | 	print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 543 | 	printk(  "-------------------------------------------------------\n"); | 
 | 544 | 	printk("%s/%d is trying to acquire lock:\n", | 
 | 545 | 		curr->comm, curr->pid); | 
 | 546 | 	print_lock(check_source); | 
 | 547 | 	printk("\nbut task is already holding lock:\n"); | 
 | 548 | 	print_lock(check_target); | 
 | 549 | 	printk("\nwhich lock already depends on the new lock.\n\n"); | 
 | 550 | 	printk("\nthe existing dependency chain (in reverse order) is:\n"); | 
 | 551 |  | 
 | 552 | 	print_circular_bug_entry(entry, depth); | 
 | 553 |  | 
 | 554 | 	return 0; | 
 | 555 | } | 
 | 556 |  | 
 | 557 | static noinline int print_circular_bug_tail(void) | 
 | 558 | { | 
 | 559 | 	struct task_struct *curr = current; | 
 | 560 | 	struct lock_list this; | 
 | 561 |  | 
 | 562 | 	if (debug_locks_silent) | 
 | 563 | 		return 0; | 
 | 564 |  | 
 | 565 | 	this.class = check_source->class; | 
 | 566 | 	save_trace(&this.trace); | 
 | 567 | 	print_circular_bug_entry(&this, 0); | 
 | 568 |  | 
 | 569 | 	printk("\nother info that might help us debug this:\n\n"); | 
 | 570 | 	lockdep_print_held_locks(curr); | 
 | 571 |  | 
 | 572 | 	printk("\nstack backtrace:\n"); | 
 | 573 | 	dump_stack(); | 
 | 574 |  | 
 | 575 | 	return 0; | 
 | 576 | } | 
 | 577 |  | 
| Ingo Molnar | ca268c6 | 2006-10-17 00:09:28 -0700 | [diff] [blame] | 578 | #define RECURSION_LIMIT 40 | 
 | 579 |  | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 580 | static int noinline print_infinite_recursion_bug(void) | 
 | 581 | { | 
 | 582 | 	__raw_spin_unlock(&hash_lock); | 
 | 583 | 	DEBUG_LOCKS_WARN_ON(1); | 
 | 584 |  | 
 | 585 | 	return 0; | 
 | 586 | } | 
 | 587 |  | 
 | 588 | /* | 
 | 589 |  * Prove that the dependency graph starting at <entry> can not | 
 | 590 |  * lead to <target>. Print an error and return 0 if it does. | 
 | 591 |  */ | 
 | 592 | static noinline int | 
 | 593 | check_noncircular(struct lock_class *source, unsigned int depth) | 
 | 594 | { | 
 | 595 | 	struct lock_list *entry; | 
 | 596 |  | 
 | 597 | 	debug_atomic_inc(&nr_cyclic_check_recursions); | 
 | 598 | 	if (depth > max_recursion_depth) | 
 | 599 | 		max_recursion_depth = depth; | 
| Ingo Molnar | ca268c6 | 2006-10-17 00:09:28 -0700 | [diff] [blame] | 600 | 	if (depth >= RECURSION_LIMIT) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 601 | 		return print_infinite_recursion_bug(); | 
 | 602 | 	/* | 
 | 603 | 	 * Check this lock's dependency list: | 
 | 604 | 	 */ | 
 | 605 | 	list_for_each_entry(entry, &source->locks_after, entry) { | 
 | 606 | 		if (entry->class == check_target->class) | 
 | 607 | 			return print_circular_bug_header(entry, depth+1); | 
 | 608 | 		debug_atomic_inc(&nr_cyclic_checks); | 
 | 609 | 		if (!check_noncircular(entry->class, depth+1)) | 
 | 610 | 			return print_circular_bug_entry(entry, depth+1); | 
 | 611 | 	} | 
 | 612 | 	return 1; | 
 | 613 | } | 
 | 614 |  | 
 | 615 | static int very_verbose(struct lock_class *class) | 
 | 616 | { | 
 | 617 | #if VERY_VERBOSE | 
 | 618 | 	return class_filter(class); | 
 | 619 | #endif | 
 | 620 | 	return 0; | 
 | 621 | } | 
 | 622 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 623 |  | 
 | 624 | /* | 
 | 625 |  * Forwards and backwards subgraph searching, for the purposes of | 
 | 626 |  * proving that two subgraphs can be connected by a new dependency | 
 | 627 |  * without creating any illegal irq-safe -> irq-unsafe lock dependency. | 
 | 628 |  */ | 
 | 629 | static enum lock_usage_bit find_usage_bit; | 
 | 630 | static struct lock_class *forwards_match, *backwards_match; | 
 | 631 |  | 
 | 632 | /* | 
 | 633 |  * Find a node in the forwards-direction dependency sub-graph starting | 
 | 634 |  * at <source> that matches <find_usage_bit>. | 
 | 635 |  * | 
 | 636 |  * Return 2 if such a node exists in the subgraph, and put that node | 
 | 637 |  * into <forwards_match>. | 
 | 638 |  * | 
 | 639 |  * Return 1 otherwise and keep <forwards_match> unchanged. | 
 | 640 |  * Return 0 on error. | 
 | 641 |  */ | 
 | 642 | static noinline int | 
 | 643 | find_usage_forwards(struct lock_class *source, unsigned int depth) | 
 | 644 | { | 
 | 645 | 	struct lock_list *entry; | 
 | 646 | 	int ret; | 
 | 647 |  | 
 | 648 | 	if (depth > max_recursion_depth) | 
 | 649 | 		max_recursion_depth = depth; | 
| Ingo Molnar | ca268c6 | 2006-10-17 00:09:28 -0700 | [diff] [blame] | 650 | 	if (depth >= RECURSION_LIMIT) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 651 | 		return print_infinite_recursion_bug(); | 
 | 652 |  | 
 | 653 | 	debug_atomic_inc(&nr_find_usage_forwards_checks); | 
 | 654 | 	if (source->usage_mask & (1 << find_usage_bit)) { | 
 | 655 | 		forwards_match = source; | 
 | 656 | 		return 2; | 
 | 657 | 	} | 
 | 658 |  | 
 | 659 | 	/* | 
 | 660 | 	 * Check this lock's dependency list: | 
 | 661 | 	 */ | 
 | 662 | 	list_for_each_entry(entry, &source->locks_after, entry) { | 
 | 663 | 		debug_atomic_inc(&nr_find_usage_forwards_recursions); | 
 | 664 | 		ret = find_usage_forwards(entry->class, depth+1); | 
 | 665 | 		if (ret == 2 || ret == 0) | 
 | 666 | 			return ret; | 
 | 667 | 	} | 
 | 668 | 	return 1; | 
 | 669 | } | 
 | 670 |  | 
 | 671 | /* | 
 | 672 |  * Find a node in the backwards-direction dependency sub-graph starting | 
 | 673 |  * at <source> that matches <find_usage_bit>. | 
 | 674 |  * | 
 | 675 |  * Return 2 if such a node exists in the subgraph, and put that node | 
 | 676 |  * into <backwards_match>. | 
 | 677 |  * | 
 | 678 |  * Return 1 otherwise and keep <backwards_match> unchanged. | 
 | 679 |  * Return 0 on error. | 
 | 680 |  */ | 
 | 681 | static noinline int | 
 | 682 | find_usage_backwards(struct lock_class *source, unsigned int depth) | 
 | 683 | { | 
 | 684 | 	struct lock_list *entry; | 
 | 685 | 	int ret; | 
 | 686 |  | 
 | 687 | 	if (depth > max_recursion_depth) | 
 | 688 | 		max_recursion_depth = depth; | 
| Ingo Molnar | ca268c6 | 2006-10-17 00:09:28 -0700 | [diff] [blame] | 689 | 	if (depth >= RECURSION_LIMIT) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 690 | 		return print_infinite_recursion_bug(); | 
 | 691 |  | 
 | 692 | 	debug_atomic_inc(&nr_find_usage_backwards_checks); | 
 | 693 | 	if (source->usage_mask & (1 << find_usage_bit)) { | 
 | 694 | 		backwards_match = source; | 
 | 695 | 		return 2; | 
 | 696 | 	} | 
 | 697 |  | 
 | 698 | 	/* | 
 | 699 | 	 * Check this lock's dependency list: | 
 | 700 | 	 */ | 
 | 701 | 	list_for_each_entry(entry, &source->locks_before, entry) { | 
 | 702 | 		debug_atomic_inc(&nr_find_usage_backwards_recursions); | 
 | 703 | 		ret = find_usage_backwards(entry->class, depth+1); | 
 | 704 | 		if (ret == 2 || ret == 0) | 
 | 705 | 			return ret; | 
 | 706 | 	} | 
 | 707 | 	return 1; | 
 | 708 | } | 
 | 709 |  | 
 | 710 | static int | 
 | 711 | print_bad_irq_dependency(struct task_struct *curr, | 
 | 712 | 			 struct held_lock *prev, | 
 | 713 | 			 struct held_lock *next, | 
 | 714 | 			 enum lock_usage_bit bit1, | 
 | 715 | 			 enum lock_usage_bit bit2, | 
 | 716 | 			 const char *irqclass) | 
 | 717 | { | 
 | 718 | 	__raw_spin_unlock(&hash_lock); | 
 | 719 | 	debug_locks_off(); | 
 | 720 | 	if (debug_locks_silent) | 
 | 721 | 		return 0; | 
 | 722 |  | 
 | 723 | 	printk("\n======================================================\n"); | 
 | 724 | 	printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", | 
 | 725 | 		irqclass, irqclass); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 726 | 	print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 727 | 	printk(  "------------------------------------------------------\n"); | 
 | 728 | 	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", | 
 | 729 | 		curr->comm, curr->pid, | 
 | 730 | 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, | 
 | 731 | 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, | 
 | 732 | 		curr->hardirqs_enabled, | 
 | 733 | 		curr->softirqs_enabled); | 
 | 734 | 	print_lock(next); | 
 | 735 |  | 
 | 736 | 	printk("\nand this task is already holding:\n"); | 
 | 737 | 	print_lock(prev); | 
 | 738 | 	printk("which would create a new lock dependency:\n"); | 
 | 739 | 	print_lock_name(prev->class); | 
 | 740 | 	printk(" ->"); | 
 | 741 | 	print_lock_name(next->class); | 
 | 742 | 	printk("\n"); | 
 | 743 |  | 
 | 744 | 	printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 
 | 745 | 		irqclass); | 
 | 746 | 	print_lock_name(backwards_match); | 
 | 747 | 	printk("\n... which became %s-irq-safe at:\n", irqclass); | 
 | 748 |  | 
 | 749 | 	print_stack_trace(backwards_match->usage_traces + bit1, 1); | 
 | 750 |  | 
 | 751 | 	printk("\nto a %s-irq-unsafe lock:\n", irqclass); | 
 | 752 | 	print_lock_name(forwards_match); | 
 | 753 | 	printk("\n... which became %s-irq-unsafe at:\n", irqclass); | 
 | 754 | 	printk("..."); | 
 | 755 |  | 
 | 756 | 	print_stack_trace(forwards_match->usage_traces + bit2, 1); | 
 | 757 |  | 
 | 758 | 	printk("\nother info that might help us debug this:\n\n"); | 
 | 759 | 	lockdep_print_held_locks(curr); | 
 | 760 |  | 
 | 761 | 	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); | 
 | 762 | 	print_lock_dependencies(backwards_match, 0); | 
 | 763 |  | 
 | 764 | 	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); | 
 | 765 | 	print_lock_dependencies(forwards_match, 0); | 
 | 766 |  | 
 | 767 | 	printk("\nstack backtrace:\n"); | 
 | 768 | 	dump_stack(); | 
 | 769 |  | 
 | 770 | 	return 0; | 
 | 771 | } | 
 | 772 |  | 
 | 773 | static int | 
 | 774 | check_usage(struct task_struct *curr, struct held_lock *prev, | 
 | 775 | 	    struct held_lock *next, enum lock_usage_bit bit_backwards, | 
 | 776 | 	    enum lock_usage_bit bit_forwards, const char *irqclass) | 
 | 777 | { | 
 | 778 | 	int ret; | 
 | 779 |  | 
 | 780 | 	find_usage_bit = bit_backwards; | 
 | 781 | 	/* fills in <backwards_match> */ | 
 | 782 | 	ret = find_usage_backwards(prev->class, 0); | 
 | 783 | 	if (!ret || ret == 1) | 
 | 784 | 		return ret; | 
 | 785 |  | 
 | 786 | 	find_usage_bit = bit_forwards; | 
 | 787 | 	ret = find_usage_forwards(next->class, 0); | 
 | 788 | 	if (!ret || ret == 1) | 
 | 789 | 		return ret; | 
 | 790 | 	/* ret == 2 */ | 
 | 791 | 	return print_bad_irq_dependency(curr, prev, next, | 
 | 792 | 			bit_backwards, bit_forwards, irqclass); | 
 | 793 | } | 
 | 794 |  | 
 | 795 | #endif | 
 | 796 |  | 
 | 797 | static int | 
 | 798 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | 
 | 799 | 		   struct held_lock *next) | 
 | 800 | { | 
 | 801 | 	debug_locks_off(); | 
 | 802 | 	__raw_spin_unlock(&hash_lock); | 
 | 803 | 	if (debug_locks_silent) | 
 | 804 | 		return 0; | 
 | 805 |  | 
 | 806 | 	printk("\n=============================================\n"); | 
 | 807 | 	printk(  "[ INFO: possible recursive locking detected ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 808 | 	print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 809 | 	printk(  "---------------------------------------------\n"); | 
 | 810 | 	printk("%s/%d is trying to acquire lock:\n", | 
 | 811 | 		curr->comm, curr->pid); | 
 | 812 | 	print_lock(next); | 
 | 813 | 	printk("\nbut task is already holding lock:\n"); | 
 | 814 | 	print_lock(prev); | 
 | 815 |  | 
 | 816 | 	printk("\nother info that might help us debug this:\n"); | 
 | 817 | 	lockdep_print_held_locks(curr); | 
 | 818 |  | 
 | 819 | 	printk("\nstack backtrace:\n"); | 
 | 820 | 	dump_stack(); | 
 | 821 |  | 
 | 822 | 	return 0; | 
 | 823 | } | 
 | 824 |  | 
 | 825 | /* | 
 | 826 |  * Check whether we are holding such a class already. | 
 | 827 |  * | 
 | 828 |  * (Note that this has to be done separately, because the graph cannot | 
 | 829 |  * detect such classes of deadlocks.) | 
 | 830 |  * | 
 | 831 |  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read | 
 | 832 |  */ | 
 | 833 | static int | 
 | 834 | check_deadlock(struct task_struct *curr, struct held_lock *next, | 
 | 835 | 	       struct lockdep_map *next_instance, int read) | 
 | 836 | { | 
 | 837 | 	struct held_lock *prev; | 
 | 838 | 	int i; | 
 | 839 |  | 
 | 840 | 	for (i = 0; i < curr->lockdep_depth; i++) { | 
 | 841 | 		prev = curr->held_locks + i; | 
 | 842 | 		if (prev->class != next->class) | 
 | 843 | 			continue; | 
 | 844 | 		/* | 
 | 845 | 		 * Allow read-after-read recursion of the same | 
| Ingo Molnar | 6c9076e | 2006-07-03 00:24:51 -0700 | [diff] [blame] | 846 | 		 * lock class (i.e. read_lock(lock)+read_lock(lock)): | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 847 | 		 */ | 
| Ingo Molnar | 6c9076e | 2006-07-03 00:24:51 -0700 | [diff] [blame] | 848 | 		if ((read == 2) && prev->read) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 849 | 			return 2; | 
 | 850 | 		return print_deadlock_bug(curr, prev, next); | 
 | 851 | 	} | 
 | 852 | 	return 1; | 
 | 853 | } | 
 | 854 |  | 
 | 855 | /* | 
 | 856 |  * There was a chain-cache miss, and we are about to add a new dependency | 
 | 857 |  * to a previous lock. We recursively validate the following rules: | 
 | 858 |  * | 
 | 859 |  *  - would the adding of the <prev> -> <next> dependency create a | 
 | 860 |  *    circular dependency in the graph? [== circular deadlock] | 
 | 861 |  * | 
 | 862 |  *  - does the new prev->next dependency connect any hardirq-safe lock | 
 | 863 |  *    (in the full backwards-subgraph starting at <prev>) with any | 
 | 864 |  *    hardirq-unsafe lock (in the full forwards-subgraph starting at | 
 | 865 |  *    <next>)? [== illegal lock inversion with hardirq contexts] | 
 | 866 |  * | 
 | 867 |  *  - does the new prev->next dependency connect any softirq-safe lock | 
 | 868 |  *    (in the full backwards-subgraph starting at <prev>) with any | 
 | 869 |  *    softirq-unsafe lock (in the full forwards-subgraph starting at | 
 | 870 |  *    <next>)? [== illegal lock inversion with softirq contexts] | 
 | 871 |  * | 
 | 872 |  * any of these scenarios could lead to a deadlock. | 
 | 873 |  * | 
 | 874 |  * Then if all the validations pass, we add the forwards and backwards | 
 | 875 |  * dependency. | 
 | 876 |  */ | 
 | 877 | static int | 
 | 878 | check_prev_add(struct task_struct *curr, struct held_lock *prev, | 
 | 879 | 	       struct held_lock *next) | 
 | 880 | { | 
 | 881 | 	struct lock_list *entry; | 
 | 882 | 	int ret; | 
 | 883 |  | 
 | 884 | 	/* | 
 | 885 | 	 * Prove that the new <prev> -> <next> dependency would not | 
 | 886 | 	 * create a circular dependency in the graph. (We do this by | 
 | 887 | 	 * forward-recursing into the graph starting at <next>, and | 
 | 888 | 	 * checking whether we can reach <prev>.) | 
 | 889 | 	 * | 
 | 890 | 	 * We are using global variables to control the recursion, to | 
 | 891 | 	 * keep the stackframe size of the recursive functions low: | 
 | 892 | 	 */ | 
 | 893 | 	check_source = next; | 
 | 894 | 	check_target = prev; | 
 | 895 | 	if (!(check_noncircular(next->class, 0))) | 
 | 896 | 		return print_circular_bug_tail(); | 
 | 897 |  | 
 | 898 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 899 | 	/* | 
 | 900 | 	 * Prove that the new dependency does not connect a hardirq-safe | 
 | 901 | 	 * lock with a hardirq-unsafe lock - to achieve this we search | 
 | 902 | 	 * the backwards-subgraph starting at <prev>, and the | 
 | 903 | 	 * forwards-subgraph starting at <next>: | 
 | 904 | 	 */ | 
 | 905 | 	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, | 
 | 906 | 					LOCK_ENABLED_HARDIRQS, "hard")) | 
 | 907 | 		return 0; | 
 | 908 |  | 
 | 909 | 	/* | 
 | 910 | 	 * Prove that the new dependency does not connect a hardirq-safe-read | 
 | 911 | 	 * lock with a hardirq-unsafe lock - to achieve this we search | 
 | 912 | 	 * the backwards-subgraph starting at <prev>, and the | 
 | 913 | 	 * forwards-subgraph starting at <next>: | 
 | 914 | 	 */ | 
 | 915 | 	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, | 
 | 916 | 					LOCK_ENABLED_HARDIRQS, "hard-read")) | 
 | 917 | 		return 0; | 
 | 918 |  | 
 | 919 | 	/* | 
 | 920 | 	 * Prove that the new dependency does not connect a softirq-safe | 
 | 921 | 	 * lock with a softirq-unsafe lock - to achieve this we search | 
 | 922 | 	 * the backwards-subgraph starting at <prev>, and the | 
 | 923 | 	 * forwards-subgraph starting at <next>: | 
 | 924 | 	 */ | 
 | 925 | 	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, | 
 | 926 | 					LOCK_ENABLED_SOFTIRQS, "soft")) | 
 | 927 | 		return 0; | 
 | 928 | 	/* | 
 | 929 | 	 * Prove that the new dependency does not connect a softirq-safe-read | 
 | 930 | 	 * lock with a softirq-unsafe lock - to achieve this we search | 
 | 931 | 	 * the backwards-subgraph starting at <prev>, and the | 
 | 932 | 	 * forwards-subgraph starting at <next>: | 
 | 933 | 	 */ | 
 | 934 | 	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, | 
 | 935 | 					LOCK_ENABLED_SOFTIRQS, "soft")) | 
 | 936 | 		return 0; | 
 | 937 | #endif | 
 | 938 | 	/* | 
 | 939 | 	 * For recursive read-locks we do all the dependency checks, | 
 | 940 | 	 * but we dont store read-triggered dependencies (only | 
 | 941 | 	 * write-triggered dependencies). This ensures that only the | 
 | 942 | 	 * write-side dependencies matter, and that if for example a | 
 | 943 | 	 * write-lock never takes any other locks, then the reads are | 
 | 944 | 	 * equivalent to a NOP. | 
 | 945 | 	 */ | 
 | 946 | 	if (next->read == 2 || prev->read == 2) | 
 | 947 | 		return 1; | 
 | 948 | 	/* | 
 | 949 | 	 * Is the <prev> -> <next> dependency already present? | 
 | 950 | 	 * | 
 | 951 | 	 * (this may occur even though this is a new chain: consider | 
 | 952 | 	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 | 
 | 953 | 	 *  chains - the second one will be new, but L1 already has | 
 | 954 | 	 *  L2 added to its dependency list, due to the first chain.) | 
 | 955 | 	 */ | 
 | 956 | 	list_for_each_entry(entry, &prev->class->locks_after, entry) { | 
 | 957 | 		if (entry->class == next->class) | 
 | 958 | 			return 2; | 
 | 959 | 	} | 
 | 960 |  | 
 | 961 | 	/* | 
 | 962 | 	 * Ok, all validations passed, add the new lock | 
 | 963 | 	 * to the previous lock's dependency list: | 
 | 964 | 	 */ | 
 | 965 | 	ret = add_lock_to_list(prev->class, next->class, | 
 | 966 | 			       &prev->class->locks_after, next->acquire_ip); | 
 | 967 | 	if (!ret) | 
 | 968 | 		return 0; | 
 | 969 | 	/* | 
 | 970 | 	 * Return value of 2 signals 'dependency already added', | 
 | 971 | 	 * in that case we dont have to add the backlink either. | 
 | 972 | 	 */ | 
 | 973 | 	if (ret == 2) | 
 | 974 | 		return 2; | 
 | 975 | 	ret = add_lock_to_list(next->class, prev->class, | 
 | 976 | 			       &next->class->locks_before, next->acquire_ip); | 
 | 977 |  | 
 | 978 | 	/* | 
 | 979 | 	 * Debugging printouts: | 
 | 980 | 	 */ | 
 | 981 | 	if (verbose(prev->class) || verbose(next->class)) { | 
 | 982 | 		__raw_spin_unlock(&hash_lock); | 
 | 983 | 		printk("\n new dependency: "); | 
 | 984 | 		print_lock_name(prev->class); | 
 | 985 | 		printk(" => "); | 
 | 986 | 		print_lock_name(next->class); | 
 | 987 | 		printk("\n"); | 
 | 988 | 		dump_stack(); | 
 | 989 | 		__raw_spin_lock(&hash_lock); | 
 | 990 | 	} | 
 | 991 | 	return 1; | 
 | 992 | } | 
 | 993 |  | 
 | 994 | /* | 
 | 995 |  * Add the dependency to all directly-previous locks that are 'relevant'. | 
 | 996 |  * The ones that are relevant are (in increasing distance from curr): | 
 | 997 |  * all consecutive trylock entries and the final non-trylock entry - or | 
 | 998 |  * the end of this context's lock-chain - whichever comes first. | 
 | 999 |  */ | 
 | 1000 | static int | 
 | 1001 | check_prevs_add(struct task_struct *curr, struct held_lock *next) | 
 | 1002 | { | 
 | 1003 | 	int depth = curr->lockdep_depth; | 
 | 1004 | 	struct held_lock *hlock; | 
 | 1005 |  | 
 | 1006 | 	/* | 
 | 1007 | 	 * Debugging checks. | 
 | 1008 | 	 * | 
 | 1009 | 	 * Depth must not be zero for a non-head lock: | 
 | 1010 | 	 */ | 
 | 1011 | 	if (!depth) | 
 | 1012 | 		goto out_bug; | 
 | 1013 | 	/* | 
 | 1014 | 	 * At least two relevant locks must exist for this | 
 | 1015 | 	 * to be a head: | 
 | 1016 | 	 */ | 
 | 1017 | 	if (curr->held_locks[depth].irq_context != | 
 | 1018 | 			curr->held_locks[depth-1].irq_context) | 
 | 1019 | 		goto out_bug; | 
 | 1020 |  | 
 | 1021 | 	for (;;) { | 
 | 1022 | 		hlock = curr->held_locks + depth-1; | 
 | 1023 | 		/* | 
 | 1024 | 		 * Only non-recursive-read entries get new dependencies | 
 | 1025 | 		 * added: | 
 | 1026 | 		 */ | 
 | 1027 | 		if (hlock->read != 2) { | 
 | 1028 | 			check_prev_add(curr, hlock, next); | 
 | 1029 | 			/* | 
 | 1030 | 			 * Stop after the first non-trylock entry, | 
 | 1031 | 			 * as non-trylock entries have added their | 
 | 1032 | 			 * own direct dependencies already, so this | 
 | 1033 | 			 * lock is connected to them indirectly: | 
 | 1034 | 			 */ | 
 | 1035 | 			if (!hlock->trylock) | 
 | 1036 | 				break; | 
 | 1037 | 		} | 
 | 1038 | 		depth--; | 
 | 1039 | 		/* | 
 | 1040 | 		 * End of lock-stack? | 
 | 1041 | 		 */ | 
 | 1042 | 		if (!depth) | 
 | 1043 | 			break; | 
 | 1044 | 		/* | 
 | 1045 | 		 * Stop the search if we cross into another context: | 
 | 1046 | 		 */ | 
 | 1047 | 		if (curr->held_locks[depth].irq_context != | 
 | 1048 | 				curr->held_locks[depth-1].irq_context) | 
 | 1049 | 			break; | 
 | 1050 | 	} | 
 | 1051 | 	return 1; | 
 | 1052 | out_bug: | 
 | 1053 | 	__raw_spin_unlock(&hash_lock); | 
 | 1054 | 	DEBUG_LOCKS_WARN_ON(1); | 
 | 1055 |  | 
 | 1056 | 	return 0; | 
 | 1057 | } | 
 | 1058 |  | 
 | 1059 |  | 
 | 1060 | /* | 
 | 1061 |  * Is this the address of a static object: | 
 | 1062 |  */ | 
 | 1063 | static int static_obj(void *obj) | 
 | 1064 | { | 
 | 1065 | 	unsigned long start = (unsigned long) &_stext, | 
 | 1066 | 		      end   = (unsigned long) &_end, | 
 | 1067 | 		      addr  = (unsigned long) obj; | 
 | 1068 | #ifdef CONFIG_SMP | 
 | 1069 | 	int i; | 
 | 1070 | #endif | 
 | 1071 |  | 
 | 1072 | 	/* | 
 | 1073 | 	 * static variable? | 
 | 1074 | 	 */ | 
 | 1075 | 	if ((addr >= start) && (addr < end)) | 
 | 1076 | 		return 1; | 
 | 1077 |  | 
 | 1078 | #ifdef CONFIG_SMP | 
 | 1079 | 	/* | 
 | 1080 | 	 * percpu var? | 
 | 1081 | 	 */ | 
 | 1082 | 	for_each_possible_cpu(i) { | 
 | 1083 | 		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 
 | 1084 | 		end   = (unsigned long) &__per_cpu_end   + per_cpu_offset(i); | 
 | 1085 |  | 
 | 1086 | 		if ((addr >= start) && (addr < end)) | 
 | 1087 | 			return 1; | 
 | 1088 | 	} | 
 | 1089 | #endif | 
 | 1090 |  | 
 | 1091 | 	/* | 
 | 1092 | 	 * module var? | 
 | 1093 | 	 */ | 
 | 1094 | 	return is_module_address(addr); | 
 | 1095 | } | 
 | 1096 |  | 
 | 1097 | /* | 
 | 1098 |  * To make lock name printouts unique, we calculate a unique | 
 | 1099 |  * class->name_version generation counter: | 
 | 1100 |  */ | 
 | 1101 | static int count_matching_names(struct lock_class *new_class) | 
 | 1102 | { | 
 | 1103 | 	struct lock_class *class; | 
 | 1104 | 	int count = 0; | 
 | 1105 |  | 
 | 1106 | 	if (!new_class->name) | 
 | 1107 | 		return 0; | 
 | 1108 |  | 
 | 1109 | 	list_for_each_entry(class, &all_lock_classes, lock_entry) { | 
 | 1110 | 		if (new_class->key - new_class->subclass == class->key) | 
 | 1111 | 			return class->name_version; | 
 | 1112 | 		if (class->name && !strcmp(class->name, new_class->name)) | 
 | 1113 | 			count = max(count, class->name_version); | 
 | 1114 | 	} | 
 | 1115 |  | 
 | 1116 | 	return count + 1; | 
 | 1117 | } | 
 | 1118 |  | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1119 | /* | 
 | 1120 |  * Register a lock's class in the hash-table, if the class is not present | 
 | 1121 |  * yet. Otherwise we look it up. We cache the result in the lock object | 
 | 1122 |  * itself, so actual lookup of the hash should be once per lock object. | 
 | 1123 |  */ | 
 | 1124 | static inline struct lock_class * | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1125 | look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1126 | { | 
 | 1127 | 	struct lockdep_subclass_key *key; | 
 | 1128 | 	struct list_head *hash_head; | 
 | 1129 | 	struct lock_class *class; | 
 | 1130 |  | 
 | 1131 | #ifdef CONFIG_DEBUG_LOCKDEP | 
 | 1132 | 	/* | 
 | 1133 | 	 * If the architecture calls into lockdep before initializing | 
 | 1134 | 	 * the hashes then we'll warn about it later. (we cannot printk | 
 | 1135 | 	 * right now) | 
 | 1136 | 	 */ | 
 | 1137 | 	if (unlikely(!lockdep_initialized)) { | 
 | 1138 | 		lockdep_init(); | 
 | 1139 | 		lockdep_init_error = 1; | 
 | 1140 | 	} | 
 | 1141 | #endif | 
 | 1142 |  | 
 | 1143 | 	/* | 
 | 1144 | 	 * Static locks do not have their class-keys yet - for them the key | 
 | 1145 | 	 * is the lock object itself: | 
 | 1146 | 	 */ | 
 | 1147 | 	if (unlikely(!lock->key)) | 
 | 1148 | 		lock->key = (void *)lock; | 
 | 1149 |  | 
 | 1150 | 	/* | 
 | 1151 | 	 * NOTE: the class-key must be unique. For dynamic locks, a static | 
 | 1152 | 	 * lock_class_key variable is passed in through the mutex_init() | 
 | 1153 | 	 * (or spin_lock_init()) call - which acts as the key. For static | 
 | 1154 | 	 * locks we use the lock object itself as the key. | 
 | 1155 | 	 */ | 
| Alexey Dobriyan | 3dc3099 | 2006-10-11 01:22:06 -0700 | [diff] [blame] | 1156 | 	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class)); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1157 |  | 
 | 1158 | 	key = lock->key->subkeys + subclass; | 
 | 1159 |  | 
 | 1160 | 	hash_head = classhashentry(key); | 
 | 1161 |  | 
 | 1162 | 	/* | 
 | 1163 | 	 * We can walk the hash lockfree, because the hash only | 
 | 1164 | 	 * grows, and we are careful when adding entries to the end: | 
 | 1165 | 	 */ | 
 | 1166 | 	list_for_each_entry(class, hash_head, hash_entry) | 
 | 1167 | 		if (class->key == key) | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1168 | 			return class; | 
 | 1169 |  | 
 | 1170 | 	return NULL; | 
 | 1171 | } | 
 | 1172 |  | 
 | 1173 | /* | 
 | 1174 |  * Register a lock's class in the hash-table, if the class is not present | 
 | 1175 |  * yet. Otherwise we look it up. We cache the result in the lock object | 
 | 1176 |  * itself, so actual lookup of the hash should be once per lock object. | 
 | 1177 |  */ | 
 | 1178 | static inline struct lock_class * | 
| Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 1179 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1180 | { | 
 | 1181 | 	struct lockdep_subclass_key *key; | 
 | 1182 | 	struct list_head *hash_head; | 
 | 1183 | 	struct lock_class *class; | 
 | 1184 |  | 
 | 1185 | 	class = look_up_lock_class(lock, subclass); | 
 | 1186 | 	if (likely(class)) | 
 | 1187 | 		return class; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1188 |  | 
 | 1189 | 	/* | 
 | 1190 | 	 * Debug-check: all keys must be persistent! | 
 | 1191 |  	 */ | 
 | 1192 | 	if (!static_obj(lock->key)) { | 
 | 1193 | 		debug_locks_off(); | 
 | 1194 | 		printk("INFO: trying to register non-static key.\n"); | 
 | 1195 | 		printk("the code is fine but needs lockdep annotation.\n"); | 
 | 1196 | 		printk("turning off the locking correctness validator.\n"); | 
 | 1197 | 		dump_stack(); | 
 | 1198 |  | 
 | 1199 | 		return NULL; | 
 | 1200 | 	} | 
 | 1201 |  | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1202 | 	key = lock->key->subkeys + subclass; | 
 | 1203 | 	hash_head = classhashentry(key); | 
 | 1204 |  | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1205 | 	__raw_spin_lock(&hash_lock); | 
 | 1206 | 	/* | 
 | 1207 | 	 * We have to do the hash-walk again, to avoid races | 
 | 1208 | 	 * with another CPU: | 
 | 1209 | 	 */ | 
 | 1210 | 	list_for_each_entry(class, hash_head, hash_entry) | 
 | 1211 | 		if (class->key == key) | 
 | 1212 | 			goto out_unlock_set; | 
 | 1213 | 	/* | 
 | 1214 | 	 * Allocate a new key from the static array, and add it to | 
 | 1215 | 	 * the hash: | 
 | 1216 | 	 */ | 
 | 1217 | 	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 
 | 1218 | 		__raw_spin_unlock(&hash_lock); | 
 | 1219 | 		debug_locks_off(); | 
 | 1220 | 		printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | 
 | 1221 | 		printk("turning off the locking correctness validator.\n"); | 
 | 1222 | 		return NULL; | 
 | 1223 | 	} | 
 | 1224 | 	class = lock_classes + nr_lock_classes++; | 
 | 1225 | 	debug_atomic_inc(&nr_unused_locks); | 
 | 1226 | 	class->key = key; | 
 | 1227 | 	class->name = lock->name; | 
 | 1228 | 	class->subclass = subclass; | 
 | 1229 | 	INIT_LIST_HEAD(&class->lock_entry); | 
 | 1230 | 	INIT_LIST_HEAD(&class->locks_before); | 
 | 1231 | 	INIT_LIST_HEAD(&class->locks_after); | 
 | 1232 | 	class->name_version = count_matching_names(class); | 
 | 1233 | 	/* | 
 | 1234 | 	 * We use RCU's safe list-add method to make | 
 | 1235 | 	 * parallel walking of the hash-list safe: | 
 | 1236 | 	 */ | 
 | 1237 | 	list_add_tail_rcu(&class->hash_entry, hash_head); | 
 | 1238 |  | 
 | 1239 | 	if (verbose(class)) { | 
 | 1240 | 		__raw_spin_unlock(&hash_lock); | 
 | 1241 | 		printk("\nnew class %p: %s", class->key, class->name); | 
 | 1242 | 		if (class->name_version > 1) | 
 | 1243 | 			printk("#%d", class->name_version); | 
 | 1244 | 		printk("\n"); | 
 | 1245 | 		dump_stack(); | 
 | 1246 | 		__raw_spin_lock(&hash_lock); | 
 | 1247 | 	} | 
 | 1248 | out_unlock_set: | 
 | 1249 | 	__raw_spin_unlock(&hash_lock); | 
 | 1250 |  | 
| Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 1251 | 	if (!subclass || force) | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1252 | 		lock->class_cache = class; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1253 |  | 
 | 1254 | 	DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 
 | 1255 |  | 
 | 1256 | 	return class; | 
 | 1257 | } | 
 | 1258 |  | 
 | 1259 | /* | 
 | 1260 |  * Look up a dependency chain. If the key is not present yet then | 
 | 1261 |  * add it and return 0 - in this case the new dependency chain is | 
 | 1262 |  * validated. If the key is already hashed, return 1. | 
 | 1263 |  */ | 
 | 1264 | static inline int lookup_chain_cache(u64 chain_key) | 
 | 1265 | { | 
 | 1266 | 	struct list_head *hash_head = chainhashentry(chain_key); | 
 | 1267 | 	struct lock_chain *chain; | 
 | 1268 |  | 
 | 1269 | 	DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | 
 | 1270 | 	/* | 
 | 1271 | 	 * We can walk it lock-free, because entries only get added | 
 | 1272 | 	 * to the hash: | 
 | 1273 | 	 */ | 
 | 1274 | 	list_for_each_entry(chain, hash_head, entry) { | 
 | 1275 | 		if (chain->chain_key == chain_key) { | 
 | 1276 | cache_hit: | 
 | 1277 | 			debug_atomic_inc(&chain_lookup_hits); | 
 | 1278 | 			/* | 
 | 1279 | 			 * In the debugging case, force redundant checking | 
 | 1280 | 			 * by returning 1: | 
 | 1281 | 			 */ | 
 | 1282 | #ifdef CONFIG_DEBUG_LOCKDEP | 
 | 1283 | 			__raw_spin_lock(&hash_lock); | 
 | 1284 | 			return 1; | 
 | 1285 | #endif | 
 | 1286 | 			return 0; | 
 | 1287 | 		} | 
 | 1288 | 	} | 
 | 1289 | 	/* | 
 | 1290 | 	 * Allocate a new chain entry from the static array, and add | 
 | 1291 | 	 * it to the hash: | 
 | 1292 | 	 */ | 
 | 1293 | 	__raw_spin_lock(&hash_lock); | 
 | 1294 | 	/* | 
 | 1295 | 	 * We have to walk the chain again locked - to avoid duplicates: | 
 | 1296 | 	 */ | 
 | 1297 | 	list_for_each_entry(chain, hash_head, entry) { | 
 | 1298 | 		if (chain->chain_key == chain_key) { | 
 | 1299 | 			__raw_spin_unlock(&hash_lock); | 
 | 1300 | 			goto cache_hit; | 
 | 1301 | 		} | 
 | 1302 | 	} | 
 | 1303 | 	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { | 
 | 1304 | 		__raw_spin_unlock(&hash_lock); | 
 | 1305 | 		debug_locks_off(); | 
 | 1306 | 		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); | 
 | 1307 | 		printk("turning off the locking correctness validator.\n"); | 
 | 1308 | 		return 0; | 
 | 1309 | 	} | 
 | 1310 | 	chain = lock_chains + nr_lock_chains++; | 
 | 1311 | 	chain->chain_key = chain_key; | 
 | 1312 | 	list_add_tail_rcu(&chain->entry, hash_head); | 
 | 1313 | 	debug_atomic_inc(&chain_lookup_misses); | 
 | 1314 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 1315 | 	if (current->hardirq_context) | 
 | 1316 | 		nr_hardirq_chains++; | 
 | 1317 | 	else { | 
 | 1318 | 		if (current->softirq_context) | 
 | 1319 | 			nr_softirq_chains++; | 
 | 1320 | 		else | 
 | 1321 | 			nr_process_chains++; | 
 | 1322 | 	} | 
 | 1323 | #else | 
 | 1324 | 	nr_process_chains++; | 
 | 1325 | #endif | 
 | 1326 |  | 
 | 1327 | 	return 1; | 
 | 1328 | } | 
 | 1329 |  | 
 | 1330 | /* | 
 | 1331 |  * We are building curr_chain_key incrementally, so double-check | 
 | 1332 |  * it from scratch, to make sure that it's done correctly: | 
 | 1333 |  */ | 
 | 1334 | static void check_chain_key(struct task_struct *curr) | 
 | 1335 | { | 
 | 1336 | #ifdef CONFIG_DEBUG_LOCKDEP | 
 | 1337 | 	struct held_lock *hlock, *prev_hlock = NULL; | 
 | 1338 | 	unsigned int i, id; | 
 | 1339 | 	u64 chain_key = 0; | 
 | 1340 |  | 
 | 1341 | 	for (i = 0; i < curr->lockdep_depth; i++) { | 
 | 1342 | 		hlock = curr->held_locks + i; | 
 | 1343 | 		if (chain_key != hlock->prev_chain_key) { | 
 | 1344 | 			debug_locks_off(); | 
 | 1345 | 			printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 
 | 1346 | 				curr->lockdep_depth, i, | 
 | 1347 | 				(unsigned long long)chain_key, | 
 | 1348 | 				(unsigned long long)hlock->prev_chain_key); | 
 | 1349 | 			WARN_ON(1); | 
 | 1350 | 			return; | 
 | 1351 | 		} | 
 | 1352 | 		id = hlock->class - lock_classes; | 
 | 1353 | 		DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS); | 
 | 1354 | 		if (prev_hlock && (prev_hlock->irq_context != | 
 | 1355 | 							hlock->irq_context)) | 
 | 1356 | 			chain_key = 0; | 
 | 1357 | 		chain_key = iterate_chain_key(chain_key, id); | 
 | 1358 | 		prev_hlock = hlock; | 
 | 1359 | 	} | 
 | 1360 | 	if (chain_key != curr->curr_chain_key) { | 
 | 1361 | 		debug_locks_off(); | 
 | 1362 | 		printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 
 | 1363 | 			curr->lockdep_depth, i, | 
 | 1364 | 			(unsigned long long)chain_key, | 
 | 1365 | 			(unsigned long long)curr->curr_chain_key); | 
 | 1366 | 		WARN_ON(1); | 
 | 1367 | 	} | 
 | 1368 | #endif | 
 | 1369 | } | 
 | 1370 |  | 
 | 1371 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 1372 |  | 
 | 1373 | /* | 
 | 1374 |  * print irq inversion bug: | 
 | 1375 |  */ | 
 | 1376 | static int | 
 | 1377 | print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | 
 | 1378 | 			struct held_lock *this, int forwards, | 
 | 1379 | 			const char *irqclass) | 
 | 1380 | { | 
 | 1381 | 	__raw_spin_unlock(&hash_lock); | 
 | 1382 | 	debug_locks_off(); | 
 | 1383 | 	if (debug_locks_silent) | 
 | 1384 | 		return 0; | 
 | 1385 |  | 
 | 1386 | 	printk("\n=========================================================\n"); | 
 | 1387 | 	printk(  "[ INFO: possible irq lock inversion dependency detected ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 1388 | 	print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1389 | 	printk(  "---------------------------------------------------------\n"); | 
 | 1390 | 	printk("%s/%d just changed the state of lock:\n", | 
 | 1391 | 		curr->comm, curr->pid); | 
 | 1392 | 	print_lock(this); | 
 | 1393 | 	if (forwards) | 
 | 1394 | 		printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | 
 | 1395 | 	else | 
 | 1396 | 		printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | 
 | 1397 | 	print_lock_name(other); | 
 | 1398 | 	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 
 | 1399 |  | 
 | 1400 | 	printk("\nother info that might help us debug this:\n"); | 
 | 1401 | 	lockdep_print_held_locks(curr); | 
 | 1402 |  | 
 | 1403 | 	printk("\nthe first lock's dependencies:\n"); | 
 | 1404 | 	print_lock_dependencies(this->class, 0); | 
 | 1405 |  | 
 | 1406 | 	printk("\nthe second lock's dependencies:\n"); | 
 | 1407 | 	print_lock_dependencies(other, 0); | 
 | 1408 |  | 
 | 1409 | 	printk("\nstack backtrace:\n"); | 
 | 1410 | 	dump_stack(); | 
 | 1411 |  | 
 | 1412 | 	return 0; | 
 | 1413 | } | 
 | 1414 |  | 
 | 1415 | /* | 
 | 1416 |  * Prove that in the forwards-direction subgraph starting at <this> | 
 | 1417 |  * there is no lock matching <mask>: | 
 | 1418 |  */ | 
 | 1419 | static int | 
 | 1420 | check_usage_forwards(struct task_struct *curr, struct held_lock *this, | 
 | 1421 | 		     enum lock_usage_bit bit, const char *irqclass) | 
 | 1422 | { | 
 | 1423 | 	int ret; | 
 | 1424 |  | 
 | 1425 | 	find_usage_bit = bit; | 
 | 1426 | 	/* fills in <forwards_match> */ | 
 | 1427 | 	ret = find_usage_forwards(this->class, 0); | 
 | 1428 | 	if (!ret || ret == 1) | 
 | 1429 | 		return ret; | 
 | 1430 |  | 
 | 1431 | 	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); | 
 | 1432 | } | 
 | 1433 |  | 
 | 1434 | /* | 
 | 1435 |  * Prove that in the backwards-direction subgraph starting at <this> | 
 | 1436 |  * there is no lock matching <mask>: | 
 | 1437 |  */ | 
 | 1438 | static int | 
 | 1439 | check_usage_backwards(struct task_struct *curr, struct held_lock *this, | 
 | 1440 | 		      enum lock_usage_bit bit, const char *irqclass) | 
 | 1441 | { | 
 | 1442 | 	int ret; | 
 | 1443 |  | 
 | 1444 | 	find_usage_bit = bit; | 
 | 1445 | 	/* fills in <backwards_match> */ | 
 | 1446 | 	ret = find_usage_backwards(this->class, 0); | 
 | 1447 | 	if (!ret || ret == 1) | 
 | 1448 | 		return ret; | 
 | 1449 |  | 
 | 1450 | 	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); | 
 | 1451 | } | 
 | 1452 |  | 
 | 1453 | static inline void print_irqtrace_events(struct task_struct *curr) | 
 | 1454 | { | 
 | 1455 | 	printk("irq event stamp: %u\n", curr->irq_events); | 
 | 1456 | 	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event); | 
 | 1457 | 	print_ip_sym(curr->hardirq_enable_ip); | 
 | 1458 | 	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); | 
 | 1459 | 	print_ip_sym(curr->hardirq_disable_ip); | 
 | 1460 | 	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event); | 
 | 1461 | 	print_ip_sym(curr->softirq_enable_ip); | 
 | 1462 | 	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); | 
 | 1463 | 	print_ip_sym(curr->softirq_disable_ip); | 
 | 1464 | } | 
 | 1465 |  | 
 | 1466 | #else | 
 | 1467 | static inline void print_irqtrace_events(struct task_struct *curr) | 
 | 1468 | { | 
 | 1469 | } | 
 | 1470 | #endif | 
 | 1471 |  | 
 | 1472 | static int | 
 | 1473 | print_usage_bug(struct task_struct *curr, struct held_lock *this, | 
 | 1474 | 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) | 
 | 1475 | { | 
 | 1476 | 	__raw_spin_unlock(&hash_lock); | 
 | 1477 | 	debug_locks_off(); | 
 | 1478 | 	if (debug_locks_silent) | 
 | 1479 | 		return 0; | 
 | 1480 |  | 
 | 1481 | 	printk("\n=================================\n"); | 
 | 1482 | 	printk(  "[ INFO: inconsistent lock state ]\n"); | 
| Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 1483 | 	print_kernel_version(); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1484 | 	printk(  "---------------------------------\n"); | 
 | 1485 |  | 
 | 1486 | 	printk("inconsistent {%s} -> {%s} usage.\n", | 
 | 1487 | 		usage_str[prev_bit], usage_str[new_bit]); | 
 | 1488 |  | 
 | 1489 | 	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", | 
 | 1490 | 		curr->comm, curr->pid, | 
 | 1491 | 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, | 
 | 1492 | 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, | 
 | 1493 | 		trace_hardirqs_enabled(curr), | 
 | 1494 | 		trace_softirqs_enabled(curr)); | 
 | 1495 | 	print_lock(this); | 
 | 1496 |  | 
 | 1497 | 	printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 
 | 1498 | 	print_stack_trace(this->class->usage_traces + prev_bit, 1); | 
 | 1499 |  | 
 | 1500 | 	print_irqtrace_events(curr); | 
 | 1501 | 	printk("\nother info that might help us debug this:\n"); | 
 | 1502 | 	lockdep_print_held_locks(curr); | 
 | 1503 |  | 
 | 1504 | 	printk("\nstack backtrace:\n"); | 
 | 1505 | 	dump_stack(); | 
 | 1506 |  | 
 | 1507 | 	return 0; | 
 | 1508 | } | 
 | 1509 |  | 
 | 1510 | /* | 
 | 1511 |  * Print out an error if an invalid bit is set: | 
 | 1512 |  */ | 
 | 1513 | static inline int | 
 | 1514 | valid_state(struct task_struct *curr, struct held_lock *this, | 
 | 1515 | 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | 
 | 1516 | { | 
 | 1517 | 	if (unlikely(this->class->usage_mask & (1 << bad_bit))) | 
 | 1518 | 		return print_usage_bug(curr, this, bad_bit, new_bit); | 
 | 1519 | 	return 1; | 
 | 1520 | } | 
 | 1521 |  | 
 | 1522 | #define STRICT_READ_CHECKS	1 | 
 | 1523 |  | 
 | 1524 | /* | 
 | 1525 |  * Mark a lock with a usage bit, and validate the state transition: | 
 | 1526 |  */ | 
 | 1527 | static int mark_lock(struct task_struct *curr, struct held_lock *this, | 
 | 1528 | 		     enum lock_usage_bit new_bit, unsigned long ip) | 
 | 1529 | { | 
 | 1530 | 	unsigned int new_mask = 1 << new_bit, ret = 1; | 
 | 1531 |  | 
 | 1532 | 	/* | 
 | 1533 | 	 * If already set then do not dirty the cacheline, | 
 | 1534 | 	 * nor do any checks: | 
 | 1535 | 	 */ | 
 | 1536 | 	if (likely(this->class->usage_mask & new_mask)) | 
 | 1537 | 		return 1; | 
 | 1538 |  | 
 | 1539 | 	__raw_spin_lock(&hash_lock); | 
 | 1540 | 	/* | 
 | 1541 | 	 * Make sure we didnt race: | 
 | 1542 | 	 */ | 
 | 1543 | 	if (unlikely(this->class->usage_mask & new_mask)) { | 
 | 1544 | 		__raw_spin_unlock(&hash_lock); | 
 | 1545 | 		return 1; | 
 | 1546 | 	} | 
 | 1547 |  | 
 | 1548 | 	this->class->usage_mask |= new_mask; | 
 | 1549 |  | 
 | 1550 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 1551 | 	if (new_bit == LOCK_ENABLED_HARDIRQS || | 
 | 1552 | 			new_bit == LOCK_ENABLED_HARDIRQS_READ) | 
 | 1553 | 		ip = curr->hardirq_enable_ip; | 
 | 1554 | 	else if (new_bit == LOCK_ENABLED_SOFTIRQS || | 
 | 1555 | 			new_bit == LOCK_ENABLED_SOFTIRQS_READ) | 
 | 1556 | 		ip = curr->softirq_enable_ip; | 
 | 1557 | #endif | 
 | 1558 | 	if (!save_trace(this->class->usage_traces + new_bit)) | 
 | 1559 | 		return 0; | 
 | 1560 |  | 
 | 1561 | 	switch (new_bit) { | 
 | 1562 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 1563 | 	case LOCK_USED_IN_HARDIRQ: | 
 | 1564 | 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 
 | 1565 | 			return 0; | 
 | 1566 | 		if (!valid_state(curr, this, new_bit, | 
 | 1567 | 				 LOCK_ENABLED_HARDIRQS_READ)) | 
 | 1568 | 			return 0; | 
 | 1569 | 		/* | 
 | 1570 | 		 * just marked it hardirq-safe, check that this lock | 
 | 1571 | 		 * took no hardirq-unsafe lock in the past: | 
 | 1572 | 		 */ | 
 | 1573 | 		if (!check_usage_forwards(curr, this, | 
 | 1574 | 					  LOCK_ENABLED_HARDIRQS, "hard")) | 
 | 1575 | 			return 0; | 
 | 1576 | #if STRICT_READ_CHECKS | 
 | 1577 | 		/* | 
 | 1578 | 		 * just marked it hardirq-safe, check that this lock | 
 | 1579 | 		 * took no hardirq-unsafe-read lock in the past: | 
 | 1580 | 		 */ | 
 | 1581 | 		if (!check_usage_forwards(curr, this, | 
 | 1582 | 				LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 
 | 1583 | 			return 0; | 
 | 1584 | #endif | 
 | 1585 | 		if (hardirq_verbose(this->class)) | 
 | 1586 | 			ret = 2; | 
 | 1587 | 		break; | 
 | 1588 | 	case LOCK_USED_IN_SOFTIRQ: | 
 | 1589 | 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 
 | 1590 | 			return 0; | 
 | 1591 | 		if (!valid_state(curr, this, new_bit, | 
 | 1592 | 				 LOCK_ENABLED_SOFTIRQS_READ)) | 
 | 1593 | 			return 0; | 
 | 1594 | 		/* | 
 | 1595 | 		 * just marked it softirq-safe, check that this lock | 
 | 1596 | 		 * took no softirq-unsafe lock in the past: | 
 | 1597 | 		 */ | 
 | 1598 | 		if (!check_usage_forwards(curr, this, | 
 | 1599 | 					  LOCK_ENABLED_SOFTIRQS, "soft")) | 
 | 1600 | 			return 0; | 
 | 1601 | #if STRICT_READ_CHECKS | 
 | 1602 | 		/* | 
 | 1603 | 		 * just marked it softirq-safe, check that this lock | 
 | 1604 | 		 * took no softirq-unsafe-read lock in the past: | 
 | 1605 | 		 */ | 
 | 1606 | 		if (!check_usage_forwards(curr, this, | 
 | 1607 | 				LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | 
 | 1608 | 			return 0; | 
 | 1609 | #endif | 
 | 1610 | 		if (softirq_verbose(this->class)) | 
 | 1611 | 			ret = 2; | 
 | 1612 | 		break; | 
 | 1613 | 	case LOCK_USED_IN_HARDIRQ_READ: | 
 | 1614 | 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 
 | 1615 | 			return 0; | 
 | 1616 | 		/* | 
 | 1617 | 		 * just marked it hardirq-read-safe, check that this lock | 
 | 1618 | 		 * took no hardirq-unsafe lock in the past: | 
 | 1619 | 		 */ | 
 | 1620 | 		if (!check_usage_forwards(curr, this, | 
 | 1621 | 					  LOCK_ENABLED_HARDIRQS, "hard")) | 
 | 1622 | 			return 0; | 
 | 1623 | 		if (hardirq_verbose(this->class)) | 
 | 1624 | 			ret = 2; | 
 | 1625 | 		break; | 
 | 1626 | 	case LOCK_USED_IN_SOFTIRQ_READ: | 
 | 1627 | 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 
 | 1628 | 			return 0; | 
 | 1629 | 		/* | 
 | 1630 | 		 * just marked it softirq-read-safe, check that this lock | 
 | 1631 | 		 * took no softirq-unsafe lock in the past: | 
 | 1632 | 		 */ | 
 | 1633 | 		if (!check_usage_forwards(curr, this, | 
 | 1634 | 					  LOCK_ENABLED_SOFTIRQS, "soft")) | 
 | 1635 | 			return 0; | 
 | 1636 | 		if (softirq_verbose(this->class)) | 
 | 1637 | 			ret = 2; | 
 | 1638 | 		break; | 
 | 1639 | 	case LOCK_ENABLED_HARDIRQS: | 
 | 1640 | 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | 
 | 1641 | 			return 0; | 
 | 1642 | 		if (!valid_state(curr, this, new_bit, | 
 | 1643 | 				 LOCK_USED_IN_HARDIRQ_READ)) | 
 | 1644 | 			return 0; | 
 | 1645 | 		/* | 
 | 1646 | 		 * just marked it hardirq-unsafe, check that no hardirq-safe | 
 | 1647 | 		 * lock in the system ever took it in the past: | 
 | 1648 | 		 */ | 
 | 1649 | 		if (!check_usage_backwards(curr, this, | 
 | 1650 | 					   LOCK_USED_IN_HARDIRQ, "hard")) | 
 | 1651 | 			return 0; | 
 | 1652 | #if STRICT_READ_CHECKS | 
 | 1653 | 		/* | 
 | 1654 | 		 * just marked it hardirq-unsafe, check that no | 
 | 1655 | 		 * hardirq-safe-read lock in the system ever took | 
 | 1656 | 		 * it in the past: | 
 | 1657 | 		 */ | 
 | 1658 | 		if (!check_usage_backwards(curr, this, | 
 | 1659 | 				   LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | 
 | 1660 | 			return 0; | 
 | 1661 | #endif | 
 | 1662 | 		if (hardirq_verbose(this->class)) | 
 | 1663 | 			ret = 2; | 
 | 1664 | 		break; | 
 | 1665 | 	case LOCK_ENABLED_SOFTIRQS: | 
 | 1666 | 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | 
 | 1667 | 			return 0; | 
 | 1668 | 		if (!valid_state(curr, this, new_bit, | 
 | 1669 | 				 LOCK_USED_IN_SOFTIRQ_READ)) | 
 | 1670 | 			return 0; | 
 | 1671 | 		/* | 
 | 1672 | 		 * just marked it softirq-unsafe, check that no softirq-safe | 
 | 1673 | 		 * lock in the system ever took it in the past: | 
 | 1674 | 		 */ | 
 | 1675 | 		if (!check_usage_backwards(curr, this, | 
 | 1676 | 					   LOCK_USED_IN_SOFTIRQ, "soft")) | 
 | 1677 | 			return 0; | 
 | 1678 | #if STRICT_READ_CHECKS | 
 | 1679 | 		/* | 
 | 1680 | 		 * just marked it softirq-unsafe, check that no | 
 | 1681 | 		 * softirq-safe-read lock in the system ever took | 
 | 1682 | 		 * it in the past: | 
 | 1683 | 		 */ | 
 | 1684 | 		if (!check_usage_backwards(curr, this, | 
 | 1685 | 				   LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | 
 | 1686 | 			return 0; | 
 | 1687 | #endif | 
 | 1688 | 		if (softirq_verbose(this->class)) | 
 | 1689 | 			ret = 2; | 
 | 1690 | 		break; | 
 | 1691 | 	case LOCK_ENABLED_HARDIRQS_READ: | 
 | 1692 | 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | 
 | 1693 | 			return 0; | 
 | 1694 | #if STRICT_READ_CHECKS | 
 | 1695 | 		/* | 
 | 1696 | 		 * just marked it hardirq-read-unsafe, check that no | 
 | 1697 | 		 * hardirq-safe lock in the system ever took it in the past: | 
 | 1698 | 		 */ | 
 | 1699 | 		if (!check_usage_backwards(curr, this, | 
 | 1700 | 					   LOCK_USED_IN_HARDIRQ, "hard")) | 
 | 1701 | 			return 0; | 
 | 1702 | #endif | 
 | 1703 | 		if (hardirq_verbose(this->class)) | 
 | 1704 | 			ret = 2; | 
 | 1705 | 		break; | 
 | 1706 | 	case LOCK_ENABLED_SOFTIRQS_READ: | 
 | 1707 | 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | 
 | 1708 | 			return 0; | 
 | 1709 | #if STRICT_READ_CHECKS | 
 | 1710 | 		/* | 
 | 1711 | 		 * just marked it softirq-read-unsafe, check that no | 
 | 1712 | 		 * softirq-safe lock in the system ever took it in the past: | 
 | 1713 | 		 */ | 
 | 1714 | 		if (!check_usage_backwards(curr, this, | 
 | 1715 | 					   LOCK_USED_IN_SOFTIRQ, "soft")) | 
 | 1716 | 			return 0; | 
 | 1717 | #endif | 
 | 1718 | 		if (softirq_verbose(this->class)) | 
 | 1719 | 			ret = 2; | 
 | 1720 | 		break; | 
 | 1721 | #endif | 
 | 1722 | 	case LOCK_USED: | 
 | 1723 | 		/* | 
 | 1724 | 		 * Add it to the global list of classes: | 
 | 1725 | 		 */ | 
 | 1726 | 		list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes); | 
 | 1727 | 		debug_atomic_dec(&nr_unused_locks); | 
 | 1728 | 		break; | 
 | 1729 | 	default: | 
 | 1730 | 		debug_locks_off(); | 
 | 1731 | 		WARN_ON(1); | 
 | 1732 | 		return 0; | 
 | 1733 | 	} | 
 | 1734 |  | 
 | 1735 | 	__raw_spin_unlock(&hash_lock); | 
 | 1736 |  | 
 | 1737 | 	/* | 
 | 1738 | 	 * We must printk outside of the hash_lock: | 
 | 1739 | 	 */ | 
 | 1740 | 	if (ret == 2) { | 
 | 1741 | 		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); | 
 | 1742 | 		print_lock(this); | 
 | 1743 | 		print_irqtrace_events(curr); | 
 | 1744 | 		dump_stack(); | 
 | 1745 | 	} | 
 | 1746 |  | 
 | 1747 | 	return ret; | 
 | 1748 | } | 
 | 1749 |  | 
 | 1750 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 1751 | /* | 
 | 1752 |  * Mark all held locks with a usage bit: | 
 | 1753 |  */ | 
 | 1754 | static int | 
 | 1755 | mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip) | 
 | 1756 | { | 
 | 1757 | 	enum lock_usage_bit usage_bit; | 
 | 1758 | 	struct held_lock *hlock; | 
 | 1759 | 	int i; | 
 | 1760 |  | 
 | 1761 | 	for (i = 0; i < curr->lockdep_depth; i++) { | 
 | 1762 | 		hlock = curr->held_locks + i; | 
 | 1763 |  | 
 | 1764 | 		if (hardirq) { | 
 | 1765 | 			if (hlock->read) | 
 | 1766 | 				usage_bit = LOCK_ENABLED_HARDIRQS_READ; | 
 | 1767 | 			else | 
 | 1768 | 				usage_bit = LOCK_ENABLED_HARDIRQS; | 
 | 1769 | 		} else { | 
 | 1770 | 			if (hlock->read) | 
 | 1771 | 				usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | 
 | 1772 | 			else | 
 | 1773 | 				usage_bit = LOCK_ENABLED_SOFTIRQS; | 
 | 1774 | 		} | 
 | 1775 | 		if (!mark_lock(curr, hlock, usage_bit, ip)) | 
 | 1776 | 			return 0; | 
 | 1777 | 	} | 
 | 1778 |  | 
 | 1779 | 	return 1; | 
 | 1780 | } | 
 | 1781 |  | 
 | 1782 | /* | 
 | 1783 |  * Debugging helper: via this flag we know that we are in | 
 | 1784 |  * 'early bootup code', and will warn about any invalid irqs-on event: | 
 | 1785 |  */ | 
 | 1786 | static int early_boot_irqs_enabled; | 
 | 1787 |  | 
 | 1788 | void early_boot_irqs_off(void) | 
 | 1789 | { | 
 | 1790 | 	early_boot_irqs_enabled = 0; | 
 | 1791 | } | 
 | 1792 |  | 
 | 1793 | void early_boot_irqs_on(void) | 
 | 1794 | { | 
 | 1795 | 	early_boot_irqs_enabled = 1; | 
 | 1796 | } | 
 | 1797 |  | 
 | 1798 | /* | 
 | 1799 |  * Hardirqs will be enabled: | 
 | 1800 |  */ | 
 | 1801 | void trace_hardirqs_on(void) | 
 | 1802 | { | 
 | 1803 | 	struct task_struct *curr = current; | 
 | 1804 | 	unsigned long ip; | 
 | 1805 |  | 
 | 1806 | 	if (unlikely(!debug_locks || current->lockdep_recursion)) | 
 | 1807 | 		return; | 
 | 1808 |  | 
 | 1809 | 	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) | 
 | 1810 | 		return; | 
 | 1811 |  | 
 | 1812 | 	if (unlikely(curr->hardirqs_enabled)) { | 
 | 1813 | 		debug_atomic_inc(&redundant_hardirqs_on); | 
 | 1814 | 		return; | 
 | 1815 | 	} | 
 | 1816 | 	/* we'll do an OFF -> ON transition: */ | 
 | 1817 | 	curr->hardirqs_enabled = 1; | 
 | 1818 | 	ip = (unsigned long) __builtin_return_address(0); | 
 | 1819 |  | 
 | 1820 | 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
 | 1821 | 		return; | 
 | 1822 | 	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | 
 | 1823 | 		return; | 
 | 1824 | 	/* | 
 | 1825 | 	 * We are going to turn hardirqs on, so set the | 
 | 1826 | 	 * usage bit for all held locks: | 
 | 1827 | 	 */ | 
 | 1828 | 	if (!mark_held_locks(curr, 1, ip)) | 
 | 1829 | 		return; | 
 | 1830 | 	/* | 
 | 1831 | 	 * If we have softirqs enabled, then set the usage | 
 | 1832 | 	 * bit for all held locks. (disabled hardirqs prevented | 
 | 1833 | 	 * this bit from being set before) | 
 | 1834 | 	 */ | 
 | 1835 | 	if (curr->softirqs_enabled) | 
 | 1836 | 		if (!mark_held_locks(curr, 0, ip)) | 
 | 1837 | 			return; | 
 | 1838 |  | 
 | 1839 | 	curr->hardirq_enable_ip = ip; | 
 | 1840 | 	curr->hardirq_enable_event = ++curr->irq_events; | 
 | 1841 | 	debug_atomic_inc(&hardirqs_on_events); | 
 | 1842 | } | 
 | 1843 |  | 
 | 1844 | EXPORT_SYMBOL(trace_hardirqs_on); | 
 | 1845 |  | 
 | 1846 | /* | 
 | 1847 |  * Hardirqs were disabled: | 
 | 1848 |  */ | 
 | 1849 | void trace_hardirqs_off(void) | 
 | 1850 | { | 
 | 1851 | 	struct task_struct *curr = current; | 
 | 1852 |  | 
 | 1853 | 	if (unlikely(!debug_locks || current->lockdep_recursion)) | 
 | 1854 | 		return; | 
 | 1855 |  | 
 | 1856 | 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
 | 1857 | 		return; | 
 | 1858 |  | 
 | 1859 | 	if (curr->hardirqs_enabled) { | 
 | 1860 | 		/* | 
 | 1861 | 		 * We have done an ON -> OFF transition: | 
 | 1862 | 		 */ | 
 | 1863 | 		curr->hardirqs_enabled = 0; | 
 | 1864 | 		curr->hardirq_disable_ip = _RET_IP_; | 
 | 1865 | 		curr->hardirq_disable_event = ++curr->irq_events; | 
 | 1866 | 		debug_atomic_inc(&hardirqs_off_events); | 
 | 1867 | 	} else | 
 | 1868 | 		debug_atomic_inc(&redundant_hardirqs_off); | 
 | 1869 | } | 
 | 1870 |  | 
 | 1871 | EXPORT_SYMBOL(trace_hardirqs_off); | 
 | 1872 |  | 
 | 1873 | /* | 
 | 1874 |  * Softirqs will be enabled: | 
 | 1875 |  */ | 
 | 1876 | void trace_softirqs_on(unsigned long ip) | 
 | 1877 | { | 
 | 1878 | 	struct task_struct *curr = current; | 
 | 1879 |  | 
 | 1880 | 	if (unlikely(!debug_locks)) | 
 | 1881 | 		return; | 
 | 1882 |  | 
 | 1883 | 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
 | 1884 | 		return; | 
 | 1885 |  | 
 | 1886 | 	if (curr->softirqs_enabled) { | 
 | 1887 | 		debug_atomic_inc(&redundant_softirqs_on); | 
 | 1888 | 		return; | 
 | 1889 | 	} | 
 | 1890 |  | 
 | 1891 | 	/* | 
 | 1892 | 	 * We'll do an OFF -> ON transition: | 
 | 1893 | 	 */ | 
 | 1894 | 	curr->softirqs_enabled = 1; | 
 | 1895 | 	curr->softirq_enable_ip = ip; | 
 | 1896 | 	curr->softirq_enable_event = ++curr->irq_events; | 
 | 1897 | 	debug_atomic_inc(&softirqs_on_events); | 
 | 1898 | 	/* | 
 | 1899 | 	 * We are going to turn softirqs on, so set the | 
 | 1900 | 	 * usage bit for all held locks, if hardirqs are | 
 | 1901 | 	 * enabled too: | 
 | 1902 | 	 */ | 
 | 1903 | 	if (curr->hardirqs_enabled) | 
 | 1904 | 		mark_held_locks(curr, 0, ip); | 
 | 1905 | } | 
 | 1906 |  | 
 | 1907 | /* | 
 | 1908 |  * Softirqs were disabled: | 
 | 1909 |  */ | 
 | 1910 | void trace_softirqs_off(unsigned long ip) | 
 | 1911 | { | 
 | 1912 | 	struct task_struct *curr = current; | 
 | 1913 |  | 
 | 1914 | 	if (unlikely(!debug_locks)) | 
 | 1915 | 		return; | 
 | 1916 |  | 
 | 1917 | 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
 | 1918 | 		return; | 
 | 1919 |  | 
 | 1920 | 	if (curr->softirqs_enabled) { | 
 | 1921 | 		/* | 
 | 1922 | 		 * We have done an ON -> OFF transition: | 
 | 1923 | 		 */ | 
 | 1924 | 		curr->softirqs_enabled = 0; | 
 | 1925 | 		curr->softirq_disable_ip = ip; | 
 | 1926 | 		curr->softirq_disable_event = ++curr->irq_events; | 
 | 1927 | 		debug_atomic_inc(&softirqs_off_events); | 
 | 1928 | 		DEBUG_LOCKS_WARN_ON(!softirq_count()); | 
 | 1929 | 	} else | 
 | 1930 | 		debug_atomic_inc(&redundant_softirqs_off); | 
 | 1931 | } | 
 | 1932 |  | 
 | 1933 | #endif | 
 | 1934 |  | 
 | 1935 | /* | 
 | 1936 |  * Initialize a lock instance's lock-class mapping info: | 
 | 1937 |  */ | 
 | 1938 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 
| Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 1939 | 		      struct lock_class_key *key, int subclass) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1940 | { | 
 | 1941 | 	if (unlikely(!debug_locks)) | 
 | 1942 | 		return; | 
 | 1943 |  | 
 | 1944 | 	if (DEBUG_LOCKS_WARN_ON(!key)) | 
 | 1945 | 		return; | 
 | 1946 | 	if (DEBUG_LOCKS_WARN_ON(!name)) | 
 | 1947 | 		return; | 
 | 1948 | 	/* | 
 | 1949 | 	 * Sanity check, the lock-class key must be persistent: | 
 | 1950 | 	 */ | 
 | 1951 | 	if (!static_obj(key)) { | 
 | 1952 | 		printk("BUG: key %p not in .data!\n", key); | 
 | 1953 | 		DEBUG_LOCKS_WARN_ON(1); | 
 | 1954 | 		return; | 
 | 1955 | 	} | 
 | 1956 | 	lock->name = name; | 
 | 1957 | 	lock->key = key; | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1958 | 	lock->class_cache = NULL; | 
| Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 1959 | 	if (subclass) | 
 | 1960 | 		register_lock_class(lock, subclass, 1); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1961 | } | 
 | 1962 |  | 
 | 1963 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 
 | 1964 |  | 
 | 1965 | /* | 
 | 1966 |  * This gets called for every mutex_lock*()/spin_lock*() operation. | 
 | 1967 |  * We maintain the dependency maps and validate the locking attempt: | 
 | 1968 |  */ | 
 | 1969 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 
 | 1970 | 			  int trylock, int read, int check, int hardirqs_off, | 
 | 1971 | 			  unsigned long ip) | 
 | 1972 | { | 
 | 1973 | 	struct task_struct *curr = current; | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1974 | 	struct lock_class *class = NULL; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1975 | 	struct held_lock *hlock; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1976 | 	unsigned int depth, id; | 
 | 1977 | 	int chain_head = 0; | 
 | 1978 | 	u64 chain_key; | 
 | 1979 |  | 
 | 1980 | 	if (unlikely(!debug_locks)) | 
 | 1981 | 		return 0; | 
 | 1982 |  | 
 | 1983 | 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
 | 1984 | 		return 0; | 
 | 1985 |  | 
 | 1986 | 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { | 
 | 1987 | 		debug_locks_off(); | 
 | 1988 | 		printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | 
 | 1989 | 		printk("turning off the locking correctness validator.\n"); | 
 | 1990 | 		return 0; | 
 | 1991 | 	} | 
 | 1992 |  | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 1993 | 	if (!subclass) | 
 | 1994 | 		class = lock->class_cache; | 
 | 1995 | 	/* | 
 | 1996 | 	 * Not cached yet or subclass? | 
 | 1997 | 	 */ | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1998 | 	if (unlikely(!class)) { | 
| Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 1999 | 		class = register_lock_class(lock, subclass, 0); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2000 | 		if (!class) | 
 | 2001 | 			return 0; | 
 | 2002 | 	} | 
 | 2003 | 	debug_atomic_inc((atomic_t *)&class->ops); | 
 | 2004 | 	if (very_verbose(class)) { | 
 | 2005 | 		printk("\nacquire class [%p] %s", class->key, class->name); | 
 | 2006 | 		if (class->name_version > 1) | 
 | 2007 | 			printk("#%d", class->name_version); | 
 | 2008 | 		printk("\n"); | 
 | 2009 | 		dump_stack(); | 
 | 2010 | 	} | 
 | 2011 |  | 
 | 2012 | 	/* | 
 | 2013 | 	 * Add the lock to the list of currently held locks. | 
 | 2014 | 	 * (we dont increase the depth just yet, up until the | 
 | 2015 | 	 * dependency checks are done) | 
 | 2016 | 	 */ | 
 | 2017 | 	depth = curr->lockdep_depth; | 
 | 2018 | 	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) | 
 | 2019 | 		return 0; | 
 | 2020 |  | 
 | 2021 | 	hlock = curr->held_locks + depth; | 
 | 2022 |  | 
 | 2023 | 	hlock->class = class; | 
 | 2024 | 	hlock->acquire_ip = ip; | 
 | 2025 | 	hlock->instance = lock; | 
 | 2026 | 	hlock->trylock = trylock; | 
 | 2027 | 	hlock->read = read; | 
 | 2028 | 	hlock->check = check; | 
 | 2029 | 	hlock->hardirqs_off = hardirqs_off; | 
 | 2030 |  | 
 | 2031 | 	if (check != 2) | 
 | 2032 | 		goto out_calc_hash; | 
 | 2033 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 2034 | 	/* | 
 | 2035 | 	 * If non-trylock use in a hardirq or softirq context, then | 
 | 2036 | 	 * mark the lock as used in these contexts: | 
 | 2037 | 	 */ | 
 | 2038 | 	if (!trylock) { | 
 | 2039 | 		if (read) { | 
 | 2040 | 			if (curr->hardirq_context) | 
 | 2041 | 				if (!mark_lock(curr, hlock, | 
 | 2042 | 						LOCK_USED_IN_HARDIRQ_READ, ip)) | 
 | 2043 | 					return 0; | 
 | 2044 | 			if (curr->softirq_context) | 
 | 2045 | 				if (!mark_lock(curr, hlock, | 
 | 2046 | 						LOCK_USED_IN_SOFTIRQ_READ, ip)) | 
 | 2047 | 					return 0; | 
 | 2048 | 		} else { | 
 | 2049 | 			if (curr->hardirq_context) | 
 | 2050 | 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip)) | 
 | 2051 | 					return 0; | 
 | 2052 | 			if (curr->softirq_context) | 
 | 2053 | 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip)) | 
 | 2054 | 					return 0; | 
 | 2055 | 		} | 
 | 2056 | 	} | 
 | 2057 | 	if (!hardirqs_off) { | 
 | 2058 | 		if (read) { | 
 | 2059 | 			if (!mark_lock(curr, hlock, | 
 | 2060 | 					LOCK_ENABLED_HARDIRQS_READ, ip)) | 
 | 2061 | 				return 0; | 
 | 2062 | 			if (curr->softirqs_enabled) | 
 | 2063 | 				if (!mark_lock(curr, hlock, | 
 | 2064 | 						LOCK_ENABLED_SOFTIRQS_READ, ip)) | 
 | 2065 | 					return 0; | 
 | 2066 | 		} else { | 
 | 2067 | 			if (!mark_lock(curr, hlock, | 
 | 2068 | 					LOCK_ENABLED_HARDIRQS, ip)) | 
 | 2069 | 				return 0; | 
 | 2070 | 			if (curr->softirqs_enabled) | 
 | 2071 | 				if (!mark_lock(curr, hlock, | 
 | 2072 | 						LOCK_ENABLED_SOFTIRQS, ip)) | 
 | 2073 | 					return 0; | 
 | 2074 | 		} | 
 | 2075 | 	} | 
 | 2076 | #endif | 
 | 2077 | 	/* mark it as used: */ | 
 | 2078 | 	if (!mark_lock(curr, hlock, LOCK_USED, ip)) | 
 | 2079 | 		return 0; | 
 | 2080 | out_calc_hash: | 
 | 2081 | 	/* | 
 | 2082 | 	 * Calculate the chain hash: it's the combined has of all the | 
 | 2083 | 	 * lock keys along the dependency chain. We save the hash value | 
 | 2084 | 	 * at every step so that we can get the current hash easily | 
 | 2085 | 	 * after unlock. The chain hash is then used to cache dependency | 
 | 2086 | 	 * results. | 
 | 2087 | 	 * | 
 | 2088 | 	 * The 'key ID' is what is the most compact key value to drive | 
 | 2089 | 	 * the hash, not class->key. | 
 | 2090 | 	 */ | 
 | 2091 | 	id = class - lock_classes; | 
 | 2092 | 	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 
 | 2093 | 		return 0; | 
 | 2094 |  | 
 | 2095 | 	chain_key = curr->curr_chain_key; | 
 | 2096 | 	if (!depth) { | 
 | 2097 | 		if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) | 
 | 2098 | 			return 0; | 
 | 2099 | 		chain_head = 1; | 
 | 2100 | 	} | 
 | 2101 |  | 
 | 2102 | 	hlock->prev_chain_key = chain_key; | 
 | 2103 |  | 
 | 2104 | #ifdef CONFIG_TRACE_IRQFLAGS | 
 | 2105 | 	/* | 
 | 2106 | 	 * Keep track of points where we cross into an interrupt context: | 
 | 2107 | 	 */ | 
 | 2108 | 	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + | 
 | 2109 | 				curr->softirq_context; | 
 | 2110 | 	if (depth) { | 
 | 2111 | 		struct held_lock *prev_hlock; | 
 | 2112 |  | 
 | 2113 | 		prev_hlock = curr->held_locks + depth-1; | 
 | 2114 | 		/* | 
 | 2115 | 		 * If we cross into another context, reset the | 
 | 2116 | 		 * hash key (this also prevents the checking and the | 
 | 2117 | 		 * adding of the dependency to 'prev'): | 
 | 2118 | 		 */ | 
 | 2119 | 		if (prev_hlock->irq_context != hlock->irq_context) { | 
 | 2120 | 			chain_key = 0; | 
 | 2121 | 			chain_head = 1; | 
 | 2122 | 		} | 
 | 2123 | 	} | 
 | 2124 | #endif | 
 | 2125 | 	chain_key = iterate_chain_key(chain_key, id); | 
 | 2126 | 	curr->curr_chain_key = chain_key; | 
 | 2127 |  | 
 | 2128 | 	/* | 
 | 2129 | 	 * Trylock needs to maintain the stack of held locks, but it | 
 | 2130 | 	 * does not add new dependencies, because trylock can be done | 
 | 2131 | 	 * in any order. | 
 | 2132 | 	 * | 
 | 2133 | 	 * We look up the chain_key and do the O(N^2) check and update of | 
 | 2134 | 	 * the dependencies only if this is a new dependency chain. | 
 | 2135 | 	 * (If lookup_chain_cache() returns with 1 it acquires | 
 | 2136 | 	 * hash_lock for us) | 
 | 2137 | 	 */ | 
 | 2138 | 	if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { | 
 | 2139 | 		/* | 
 | 2140 | 		 * Check whether last held lock: | 
 | 2141 | 		 * | 
 | 2142 | 		 * - is irq-safe, if this lock is irq-unsafe | 
 | 2143 | 		 * - is softirq-safe, if this lock is hardirq-unsafe | 
 | 2144 | 		 * | 
 | 2145 | 		 * And check whether the new lock's dependency graph | 
 | 2146 | 		 * could lead back to the previous lock. | 
 | 2147 | 		 * | 
 | 2148 | 		 * any of these scenarios could lead to a deadlock. If | 
 | 2149 | 		 * All validations | 
 | 2150 | 		 */ | 
 | 2151 | 		int ret = check_deadlock(curr, hlock, lock, read); | 
 | 2152 |  | 
 | 2153 | 		if (!ret) | 
 | 2154 | 			return 0; | 
 | 2155 | 		/* | 
 | 2156 | 		 * Mark recursive read, as we jump over it when | 
 | 2157 | 		 * building dependencies (just like we jump over | 
 | 2158 | 		 * trylock entries): | 
 | 2159 | 		 */ | 
 | 2160 | 		if (ret == 2) | 
 | 2161 | 			hlock->read = 2; | 
 | 2162 | 		/* | 
 | 2163 | 		 * Add dependency only if this lock is not the head | 
 | 2164 | 		 * of the chain, and if it's not a secondary read-lock: | 
 | 2165 | 		 */ | 
 | 2166 | 		if (!chain_head && ret != 2) | 
 | 2167 | 			if (!check_prevs_add(curr, hlock)) | 
 | 2168 | 				return 0; | 
 | 2169 | 		__raw_spin_unlock(&hash_lock); | 
 | 2170 | 	} | 
 | 2171 | 	curr->lockdep_depth++; | 
 | 2172 | 	check_chain_key(curr); | 
 | 2173 | 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { | 
 | 2174 | 		debug_locks_off(); | 
 | 2175 | 		printk("BUG: MAX_LOCK_DEPTH too low!\n"); | 
 | 2176 | 		printk("turning off the locking correctness validator.\n"); | 
 | 2177 | 		return 0; | 
 | 2178 | 	} | 
 | 2179 | 	if (unlikely(curr->lockdep_depth > max_lockdep_depth)) | 
 | 2180 | 		max_lockdep_depth = curr->lockdep_depth; | 
 | 2181 |  | 
 | 2182 | 	return 1; | 
 | 2183 | } | 
 | 2184 |  | 
 | 2185 | static int | 
 | 2186 | print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, | 
 | 2187 | 			   unsigned long ip) | 
 | 2188 | { | 
 | 2189 | 	if (!debug_locks_off()) | 
 | 2190 | 		return 0; | 
 | 2191 | 	if (debug_locks_silent) | 
 | 2192 | 		return 0; | 
 | 2193 |  | 
 | 2194 | 	printk("\n=====================================\n"); | 
 | 2195 | 	printk(  "[ BUG: bad unlock balance detected! ]\n"); | 
 | 2196 | 	printk(  "-------------------------------------\n"); | 
 | 2197 | 	printk("%s/%d is trying to release lock (", | 
 | 2198 | 		curr->comm, curr->pid); | 
 | 2199 | 	print_lockdep_cache(lock); | 
 | 2200 | 	printk(") at:\n"); | 
 | 2201 | 	print_ip_sym(ip); | 
 | 2202 | 	printk("but there are no more locks to release!\n"); | 
 | 2203 | 	printk("\nother info that might help us debug this:\n"); | 
 | 2204 | 	lockdep_print_held_locks(curr); | 
 | 2205 |  | 
 | 2206 | 	printk("\nstack backtrace:\n"); | 
 | 2207 | 	dump_stack(); | 
 | 2208 |  | 
 | 2209 | 	return 0; | 
 | 2210 | } | 
 | 2211 |  | 
 | 2212 | /* | 
 | 2213 |  * Common debugging checks for both nested and non-nested unlock: | 
 | 2214 |  */ | 
 | 2215 | static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | 
 | 2216 | 			unsigned long ip) | 
 | 2217 | { | 
 | 2218 | 	if (unlikely(!debug_locks)) | 
 | 2219 | 		return 0; | 
 | 2220 | 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
 | 2221 | 		return 0; | 
 | 2222 |  | 
 | 2223 | 	if (curr->lockdep_depth <= 0) | 
 | 2224 | 		return print_unlock_inbalance_bug(curr, lock, ip); | 
 | 2225 |  | 
 | 2226 | 	return 1; | 
 | 2227 | } | 
 | 2228 |  | 
 | 2229 | /* | 
 | 2230 |  * Remove the lock to the list of currently held locks in a | 
 | 2231 |  * potentially non-nested (out of order) manner. This is a | 
 | 2232 |  * relatively rare operation, as all the unlock APIs default | 
 | 2233 |  * to nested mode (which uses lock_release()): | 
 | 2234 |  */ | 
 | 2235 | static int | 
 | 2236 | lock_release_non_nested(struct task_struct *curr, | 
 | 2237 | 			struct lockdep_map *lock, unsigned long ip) | 
 | 2238 | { | 
 | 2239 | 	struct held_lock *hlock, *prev_hlock; | 
 | 2240 | 	unsigned int depth; | 
 | 2241 | 	int i; | 
 | 2242 |  | 
 | 2243 | 	/* | 
 | 2244 | 	 * Check whether the lock exists in the current stack | 
 | 2245 | 	 * of held locks: | 
 | 2246 | 	 */ | 
 | 2247 | 	depth = curr->lockdep_depth; | 
 | 2248 | 	if (DEBUG_LOCKS_WARN_ON(!depth)) | 
 | 2249 | 		return 0; | 
 | 2250 |  | 
 | 2251 | 	prev_hlock = NULL; | 
 | 2252 | 	for (i = depth-1; i >= 0; i--) { | 
 | 2253 | 		hlock = curr->held_locks + i; | 
 | 2254 | 		/* | 
 | 2255 | 		 * We must not cross into another context: | 
 | 2256 | 		 */ | 
 | 2257 | 		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | 
 | 2258 | 			break; | 
 | 2259 | 		if (hlock->instance == lock) | 
 | 2260 | 			goto found_it; | 
 | 2261 | 		prev_hlock = hlock; | 
 | 2262 | 	} | 
 | 2263 | 	return print_unlock_inbalance_bug(curr, lock, ip); | 
 | 2264 |  | 
 | 2265 | found_it: | 
 | 2266 | 	/* | 
 | 2267 | 	 * We have the right lock to unlock, 'hlock' points to it. | 
 | 2268 | 	 * Now we remove it from the stack, and add back the other | 
 | 2269 | 	 * entries (if any), recalculating the hash along the way: | 
 | 2270 | 	 */ | 
 | 2271 | 	curr->lockdep_depth = i; | 
 | 2272 | 	curr->curr_chain_key = hlock->prev_chain_key; | 
 | 2273 |  | 
 | 2274 | 	for (i++; i < depth; i++) { | 
 | 2275 | 		hlock = curr->held_locks + i; | 
 | 2276 | 		if (!__lock_acquire(hlock->instance, | 
 | 2277 | 			hlock->class->subclass, hlock->trylock, | 
 | 2278 | 				hlock->read, hlock->check, hlock->hardirqs_off, | 
 | 2279 | 				hlock->acquire_ip)) | 
 | 2280 | 			return 0; | 
 | 2281 | 	} | 
 | 2282 |  | 
 | 2283 | 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) | 
 | 2284 | 		return 0; | 
 | 2285 | 	return 1; | 
 | 2286 | } | 
 | 2287 |  | 
 | 2288 | /* | 
 | 2289 |  * Remove the lock to the list of currently held locks - this gets | 
 | 2290 |  * called on mutex_unlock()/spin_unlock*() (or on a failed | 
 | 2291 |  * mutex_lock_interruptible()). This is done for unlocks that nest | 
 | 2292 |  * perfectly. (i.e. the current top of the lock-stack is unlocked) | 
 | 2293 |  */ | 
 | 2294 | static int lock_release_nested(struct task_struct *curr, | 
 | 2295 | 			       struct lockdep_map *lock, unsigned long ip) | 
 | 2296 | { | 
 | 2297 | 	struct held_lock *hlock; | 
 | 2298 | 	unsigned int depth; | 
 | 2299 |  | 
 | 2300 | 	/* | 
 | 2301 | 	 * Pop off the top of the lock stack: | 
 | 2302 | 	 */ | 
 | 2303 | 	depth = curr->lockdep_depth - 1; | 
 | 2304 | 	hlock = curr->held_locks + depth; | 
 | 2305 |  | 
 | 2306 | 	/* | 
 | 2307 | 	 * Is the unlock non-nested: | 
 | 2308 | 	 */ | 
 | 2309 | 	if (hlock->instance != lock) | 
 | 2310 | 		return lock_release_non_nested(curr, lock, ip); | 
 | 2311 | 	curr->lockdep_depth--; | 
 | 2312 |  | 
 | 2313 | 	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) | 
 | 2314 | 		return 0; | 
 | 2315 |  | 
 | 2316 | 	curr->curr_chain_key = hlock->prev_chain_key; | 
 | 2317 |  | 
 | 2318 | #ifdef CONFIG_DEBUG_LOCKDEP | 
 | 2319 | 	hlock->prev_chain_key = 0; | 
 | 2320 | 	hlock->class = NULL; | 
 | 2321 | 	hlock->acquire_ip = 0; | 
 | 2322 | 	hlock->irq_context = 0; | 
 | 2323 | #endif | 
 | 2324 | 	return 1; | 
 | 2325 | } | 
 | 2326 |  | 
 | 2327 | /* | 
 | 2328 |  * Remove the lock to the list of currently held locks - this gets | 
 | 2329 |  * called on mutex_unlock()/spin_unlock*() (or on a failed | 
 | 2330 |  * mutex_lock_interruptible()). This is done for unlocks that nest | 
 | 2331 |  * perfectly. (i.e. the current top of the lock-stack is unlocked) | 
 | 2332 |  */ | 
 | 2333 | static void | 
 | 2334 | __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | 
 | 2335 | { | 
 | 2336 | 	struct task_struct *curr = current; | 
 | 2337 |  | 
 | 2338 | 	if (!check_unlock(curr, lock, ip)) | 
 | 2339 | 		return; | 
 | 2340 |  | 
 | 2341 | 	if (nested) { | 
 | 2342 | 		if (!lock_release_nested(curr, lock, ip)) | 
 | 2343 | 			return; | 
 | 2344 | 	} else { | 
 | 2345 | 		if (!lock_release_non_nested(curr, lock, ip)) | 
 | 2346 | 			return; | 
 | 2347 | 	} | 
 | 2348 |  | 
 | 2349 | 	check_chain_key(curr); | 
 | 2350 | } | 
 | 2351 |  | 
 | 2352 | /* | 
 | 2353 |  * Check whether we follow the irq-flags state precisely: | 
 | 2354 |  */ | 
 | 2355 | static void check_flags(unsigned long flags) | 
 | 2356 | { | 
 | 2357 | #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) | 
 | 2358 | 	if (!debug_locks) | 
 | 2359 | 		return; | 
 | 2360 |  | 
 | 2361 | 	if (irqs_disabled_flags(flags)) | 
 | 2362 | 		DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled); | 
 | 2363 | 	else | 
 | 2364 | 		DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled); | 
 | 2365 |  | 
 | 2366 | 	/* | 
 | 2367 | 	 * We dont accurately track softirq state in e.g. | 
 | 2368 | 	 * hardirq contexts (such as on 4KSTACKS), so only | 
 | 2369 | 	 * check if not in hardirq contexts: | 
 | 2370 | 	 */ | 
 | 2371 | 	if (!hardirq_count()) { | 
 | 2372 | 		if (softirq_count()) | 
 | 2373 | 			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); | 
 | 2374 | 		else | 
 | 2375 | 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); | 
 | 2376 | 	} | 
 | 2377 |  | 
 | 2378 | 	if (!debug_locks) | 
 | 2379 | 		print_irqtrace_events(current); | 
 | 2380 | #endif | 
 | 2381 | } | 
 | 2382 |  | 
 | 2383 | /* | 
 | 2384 |  * We are not always called with irqs disabled - do that here, | 
 | 2385 |  * and also avoid lockdep recursion: | 
 | 2386 |  */ | 
 | 2387 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 
 | 2388 | 		  int trylock, int read, int check, unsigned long ip) | 
 | 2389 | { | 
 | 2390 | 	unsigned long flags; | 
 | 2391 |  | 
 | 2392 | 	if (unlikely(current->lockdep_recursion)) | 
 | 2393 | 		return; | 
 | 2394 |  | 
 | 2395 | 	raw_local_irq_save(flags); | 
 | 2396 | 	check_flags(flags); | 
 | 2397 |  | 
 | 2398 | 	current->lockdep_recursion = 1; | 
 | 2399 | 	__lock_acquire(lock, subclass, trylock, read, check, | 
 | 2400 | 		       irqs_disabled_flags(flags), ip); | 
 | 2401 | 	current->lockdep_recursion = 0; | 
 | 2402 | 	raw_local_irq_restore(flags); | 
 | 2403 | } | 
 | 2404 |  | 
 | 2405 | EXPORT_SYMBOL_GPL(lock_acquire); | 
 | 2406 |  | 
 | 2407 | void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | 
 | 2408 | { | 
 | 2409 | 	unsigned long flags; | 
 | 2410 |  | 
 | 2411 | 	if (unlikely(current->lockdep_recursion)) | 
 | 2412 | 		return; | 
 | 2413 |  | 
 | 2414 | 	raw_local_irq_save(flags); | 
 | 2415 | 	check_flags(flags); | 
 | 2416 | 	current->lockdep_recursion = 1; | 
 | 2417 | 	__lock_release(lock, nested, ip); | 
 | 2418 | 	current->lockdep_recursion = 0; | 
 | 2419 | 	raw_local_irq_restore(flags); | 
 | 2420 | } | 
 | 2421 |  | 
 | 2422 | EXPORT_SYMBOL_GPL(lock_release); | 
 | 2423 |  | 
 | 2424 | /* | 
 | 2425 |  * Used by the testsuite, sanitize the validator state | 
 | 2426 |  * after a simulated failure: | 
 | 2427 |  */ | 
 | 2428 |  | 
 | 2429 | void lockdep_reset(void) | 
 | 2430 | { | 
 | 2431 | 	unsigned long flags; | 
 | 2432 |  | 
 | 2433 | 	raw_local_irq_save(flags); | 
 | 2434 | 	current->curr_chain_key = 0; | 
 | 2435 | 	current->lockdep_depth = 0; | 
 | 2436 | 	current->lockdep_recursion = 0; | 
 | 2437 | 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); | 
 | 2438 | 	nr_hardirq_chains = 0; | 
 | 2439 | 	nr_softirq_chains = 0; | 
 | 2440 | 	nr_process_chains = 0; | 
 | 2441 | 	debug_locks = 1; | 
 | 2442 | 	raw_local_irq_restore(flags); | 
 | 2443 | } | 
 | 2444 |  | 
 | 2445 | static void zap_class(struct lock_class *class) | 
 | 2446 | { | 
 | 2447 | 	int i; | 
 | 2448 |  | 
 | 2449 | 	/* | 
 | 2450 | 	 * Remove all dependencies this lock is | 
 | 2451 | 	 * involved in: | 
 | 2452 | 	 */ | 
 | 2453 | 	for (i = 0; i < nr_list_entries; i++) { | 
 | 2454 | 		if (list_entries[i].class == class) | 
 | 2455 | 			list_del_rcu(&list_entries[i].entry); | 
 | 2456 | 	} | 
 | 2457 | 	/* | 
 | 2458 | 	 * Unhash the class and remove it from the all_lock_classes list: | 
 | 2459 | 	 */ | 
 | 2460 | 	list_del_rcu(&class->hash_entry); | 
 | 2461 | 	list_del_rcu(&class->lock_entry); | 
 | 2462 |  | 
 | 2463 | } | 
 | 2464 |  | 
 | 2465 | static inline int within(void *addr, void *start, unsigned long size) | 
 | 2466 | { | 
 | 2467 | 	return addr >= start && addr < start + size; | 
 | 2468 | } | 
 | 2469 |  | 
 | 2470 | void lockdep_free_key_range(void *start, unsigned long size) | 
 | 2471 | { | 
 | 2472 | 	struct lock_class *class, *next; | 
 | 2473 | 	struct list_head *head; | 
 | 2474 | 	unsigned long flags; | 
 | 2475 | 	int i; | 
 | 2476 |  | 
 | 2477 | 	raw_local_irq_save(flags); | 
 | 2478 | 	__raw_spin_lock(&hash_lock); | 
 | 2479 |  | 
 | 2480 | 	/* | 
 | 2481 | 	 * Unhash all classes that were created by this module: | 
 | 2482 | 	 */ | 
 | 2483 | 	for (i = 0; i < CLASSHASH_SIZE; i++) { | 
 | 2484 | 		head = classhash_table + i; | 
 | 2485 | 		if (list_empty(head)) | 
 | 2486 | 			continue; | 
 | 2487 | 		list_for_each_entry_safe(class, next, head, hash_entry) | 
 | 2488 | 			if (within(class->key, start, size)) | 
 | 2489 | 				zap_class(class); | 
 | 2490 | 	} | 
 | 2491 |  | 
 | 2492 | 	__raw_spin_unlock(&hash_lock); | 
 | 2493 | 	raw_local_irq_restore(flags); | 
 | 2494 | } | 
 | 2495 |  | 
 | 2496 | void lockdep_reset_lock(struct lockdep_map *lock) | 
 | 2497 | { | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2498 | 	struct lock_class *class, *next; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2499 | 	struct list_head *head; | 
 | 2500 | 	unsigned long flags; | 
 | 2501 | 	int i, j; | 
 | 2502 |  | 
 | 2503 | 	raw_local_irq_save(flags); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2504 |  | 
 | 2505 | 	/* | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2506 | 	 * Remove all classes this lock might have: | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2507 | 	 */ | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2508 | 	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | 
 | 2509 | 		/* | 
 | 2510 | 		 * If the class exists we look it up and zap it: | 
 | 2511 | 		 */ | 
 | 2512 | 		class = look_up_lock_class(lock, j); | 
 | 2513 | 		if (class) | 
 | 2514 | 			zap_class(class); | 
 | 2515 | 	} | 
 | 2516 | 	/* | 
 | 2517 | 	 * Debug check: in the end all mapped classes should | 
 | 2518 | 	 * be gone. | 
 | 2519 | 	 */ | 
 | 2520 | 	__raw_spin_lock(&hash_lock); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2521 | 	for (i = 0; i < CLASSHASH_SIZE; i++) { | 
 | 2522 | 		head = classhash_table + i; | 
 | 2523 | 		if (list_empty(head)) | 
 | 2524 | 			continue; | 
 | 2525 | 		list_for_each_entry_safe(class, next, head, hash_entry) { | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2526 | 			if (unlikely(class == lock->class_cache)) { | 
 | 2527 | 				__raw_spin_unlock(&hash_lock); | 
 | 2528 | 				DEBUG_LOCKS_WARN_ON(1); | 
 | 2529 | 				goto out_restore; | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2530 | 			} | 
 | 2531 | 		} | 
 | 2532 | 	} | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2533 | 	__raw_spin_unlock(&hash_lock); | 
| Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2534 |  | 
 | 2535 | out_restore: | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2536 | 	raw_local_irq_restore(flags); | 
 | 2537 | } | 
 | 2538 |  | 
 | 2539 | void __init lockdep_init(void) | 
 | 2540 | { | 
 | 2541 | 	int i; | 
 | 2542 |  | 
 | 2543 | 	/* | 
 | 2544 | 	 * Some architectures have their own start_kernel() | 
 | 2545 | 	 * code which calls lockdep_init(), while we also | 
 | 2546 | 	 * call lockdep_init() from the start_kernel() itself, | 
 | 2547 | 	 * and we want to initialize the hashes only once: | 
 | 2548 | 	 */ | 
 | 2549 | 	if (lockdep_initialized) | 
 | 2550 | 		return; | 
 | 2551 |  | 
 | 2552 | 	for (i = 0; i < CLASSHASH_SIZE; i++) | 
 | 2553 | 		INIT_LIST_HEAD(classhash_table + i); | 
 | 2554 |  | 
 | 2555 | 	for (i = 0; i < CHAINHASH_SIZE; i++) | 
 | 2556 | 		INIT_LIST_HEAD(chainhash_table + i); | 
 | 2557 |  | 
 | 2558 | 	lockdep_initialized = 1; | 
 | 2559 | } | 
 | 2560 |  | 
 | 2561 | void __init lockdep_info(void) | 
 | 2562 | { | 
 | 2563 | 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); | 
 | 2564 |  | 
 | 2565 | 	printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES); | 
 | 2566 | 	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH); | 
 | 2567 | 	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS); | 
 | 2568 | 	printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE); | 
 | 2569 | 	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES); | 
 | 2570 | 	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS); | 
 | 2571 | 	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE); | 
 | 2572 |  | 
 | 2573 | 	printk(" memory used by lock dependency info: %lu kB\n", | 
 | 2574 | 		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + | 
 | 2575 | 		sizeof(struct list_head) * CLASSHASH_SIZE + | 
 | 2576 | 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + | 
 | 2577 | 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + | 
 | 2578 | 		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); | 
 | 2579 |  | 
 | 2580 | 	printk(" per task-struct memory footprint: %lu bytes\n", | 
 | 2581 | 		sizeof(struct held_lock) * MAX_LOCK_DEPTH); | 
 | 2582 |  | 
 | 2583 | #ifdef CONFIG_DEBUG_LOCKDEP | 
 | 2584 | 	if (lockdep_init_error) | 
 | 2585 | 		printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n"); | 
 | 2586 | #endif | 
 | 2587 | } | 
 | 2588 |  | 
 | 2589 | static inline int in_range(const void *start, const void *addr, const void *end) | 
 | 2590 | { | 
 | 2591 | 	return addr >= start && addr <= end; | 
 | 2592 | } | 
 | 2593 |  | 
 | 2594 | static void | 
 | 2595 | print_freed_lock_bug(struct task_struct *curr, const void *mem_from, | 
| Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 2596 | 		     const void *mem_to, struct held_lock *hlock) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2597 | { | 
 | 2598 | 	if (!debug_locks_off()) | 
 | 2599 | 		return; | 
 | 2600 | 	if (debug_locks_silent) | 
 | 2601 | 		return; | 
 | 2602 |  | 
 | 2603 | 	printk("\n=========================\n"); | 
 | 2604 | 	printk(  "[ BUG: held lock freed! ]\n"); | 
 | 2605 | 	printk(  "-------------------------\n"); | 
 | 2606 | 	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", | 
 | 2607 | 		curr->comm, curr->pid, mem_from, mem_to-1); | 
| Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 2608 | 	print_lock(hlock); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2609 | 	lockdep_print_held_locks(curr); | 
 | 2610 |  | 
 | 2611 | 	printk("\nstack backtrace:\n"); | 
 | 2612 | 	dump_stack(); | 
 | 2613 | } | 
 | 2614 |  | 
 | 2615 | /* | 
 | 2616 |  * Called when kernel memory is freed (or unmapped), or if a lock | 
 | 2617 |  * is destroyed or reinitialized - this code checks whether there is | 
 | 2618 |  * any held lock in the memory range of <from> to <to>: | 
 | 2619 |  */ | 
 | 2620 | void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | 
 | 2621 | { | 
 | 2622 | 	const void *mem_to = mem_from + mem_len, *lock_from, *lock_to; | 
 | 2623 | 	struct task_struct *curr = current; | 
 | 2624 | 	struct held_lock *hlock; | 
 | 2625 | 	unsigned long flags; | 
 | 2626 | 	int i; | 
 | 2627 |  | 
 | 2628 | 	if (unlikely(!debug_locks)) | 
 | 2629 | 		return; | 
 | 2630 |  | 
 | 2631 | 	local_irq_save(flags); | 
 | 2632 | 	for (i = 0; i < curr->lockdep_depth; i++) { | 
 | 2633 | 		hlock = curr->held_locks + i; | 
 | 2634 |  | 
 | 2635 | 		lock_from = (void *)hlock->instance; | 
 | 2636 | 		lock_to = (void *)(hlock->instance + 1); | 
 | 2637 |  | 
 | 2638 | 		if (!in_range(mem_from, lock_from, mem_to) && | 
 | 2639 | 					!in_range(mem_from, lock_to, mem_to)) | 
 | 2640 | 			continue; | 
 | 2641 |  | 
| Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 2642 | 		print_freed_lock_bug(curr, mem_from, mem_to, hlock); | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2643 | 		break; | 
 | 2644 | 	} | 
 | 2645 | 	local_irq_restore(flags); | 
 | 2646 | } | 
 | 2647 |  | 
 | 2648 | static void print_held_locks_bug(struct task_struct *curr) | 
 | 2649 | { | 
 | 2650 | 	if (!debug_locks_off()) | 
 | 2651 | 		return; | 
 | 2652 | 	if (debug_locks_silent) | 
 | 2653 | 		return; | 
 | 2654 |  | 
 | 2655 | 	printk("\n=====================================\n"); | 
 | 2656 | 	printk(  "[ BUG: lock held at task exit time! ]\n"); | 
 | 2657 | 	printk(  "-------------------------------------\n"); | 
 | 2658 | 	printk("%s/%d is exiting with locks still held!\n", | 
 | 2659 | 		curr->comm, curr->pid); | 
 | 2660 | 	lockdep_print_held_locks(curr); | 
 | 2661 |  | 
 | 2662 | 	printk("\nstack backtrace:\n"); | 
 | 2663 | 	dump_stack(); | 
 | 2664 | } | 
 | 2665 |  | 
 | 2666 | void debug_check_no_locks_held(struct task_struct *task) | 
 | 2667 | { | 
 | 2668 | 	if (unlikely(task->lockdep_depth > 0)) | 
 | 2669 | 		print_held_locks_bug(task); | 
 | 2670 | } | 
 | 2671 |  | 
 | 2672 | void debug_show_all_locks(void) | 
 | 2673 | { | 
 | 2674 | 	struct task_struct *g, *p; | 
 | 2675 | 	int count = 10; | 
 | 2676 | 	int unlock = 1; | 
 | 2677 |  | 
 | 2678 | 	printk("\nShowing all locks held in the system:\n"); | 
 | 2679 |  | 
 | 2680 | 	/* | 
 | 2681 | 	 * Here we try to get the tasklist_lock as hard as possible, | 
 | 2682 | 	 * if not successful after 2 seconds we ignore it (but keep | 
 | 2683 | 	 * trying). This is to enable a debug printout even if a | 
 | 2684 | 	 * tasklist_lock-holding task deadlocks or crashes. | 
 | 2685 | 	 */ | 
 | 2686 | retry: | 
 | 2687 | 	if (!read_trylock(&tasklist_lock)) { | 
 | 2688 | 		if (count == 10) | 
 | 2689 | 			printk("hm, tasklist_lock locked, retrying... "); | 
 | 2690 | 		if (count) { | 
 | 2691 | 			count--; | 
 | 2692 | 			printk(" #%d", 10-count); | 
 | 2693 | 			mdelay(200); | 
 | 2694 | 			goto retry; | 
 | 2695 | 		} | 
 | 2696 | 		printk(" ignoring it.\n"); | 
 | 2697 | 		unlock = 0; | 
 | 2698 | 	} | 
 | 2699 | 	if (count != 10) | 
 | 2700 | 		printk(" locked it.\n"); | 
 | 2701 |  | 
 | 2702 | 	do_each_thread(g, p) { | 
 | 2703 | 		if (p->lockdep_depth) | 
 | 2704 | 			lockdep_print_held_locks(p); | 
 | 2705 | 		if (!unlock) | 
 | 2706 | 			if (read_trylock(&tasklist_lock)) | 
 | 2707 | 				unlock = 1; | 
 | 2708 | 	} while_each_thread(g, p); | 
 | 2709 |  | 
 | 2710 | 	printk("\n"); | 
 | 2711 | 	printk("=============================================\n\n"); | 
 | 2712 |  | 
 | 2713 | 	if (unlock) | 
 | 2714 | 		read_unlock(&tasklist_lock); | 
 | 2715 | } | 
 | 2716 |  | 
 | 2717 | EXPORT_SYMBOL_GPL(debug_show_all_locks); | 
 | 2718 |  | 
 | 2719 | void debug_show_held_locks(struct task_struct *task) | 
 | 2720 | { | 
 | 2721 | 	lockdep_print_held_locks(task); | 
 | 2722 | } | 
 | 2723 |  | 
 | 2724 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 
 | 2725 |  |