| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * kernel/lockdep_internals.h | 
|  | 3 | * | 
|  | 4 | * Runtime locking correctness validator | 
|  | 5 | * | 
|  | 6 | * lockdep subsystem internal functions and variables. | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | /* | 
| Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 10 | * Lock-class usage-state bits: | 
|  | 11 | */ | 
|  | 12 | enum lock_usage_bit { | 
| Peter Zijlstra | d7b1b02 | 2009-01-22 14:38:38 +0100 | [diff] [blame] | 13 | #define LOCKDEP_STATE(__STATE)		\ | 
|  | 14 | LOCK_USED_IN_##__STATE,		\ | 
|  | 15 | LOCK_USED_IN_##__STATE##_READ,	\ | 
|  | 16 | LOCK_ENABLED_##__STATE,		\ | 
|  | 17 | LOCK_ENABLED_##__STATE##_READ, | 
|  | 18 | #include "lockdep_states.h" | 
|  | 19 | #undef LOCKDEP_STATE | 
|  | 20 | LOCK_USED, | 
| Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 21 | LOCK_USAGE_STATES | 
|  | 22 | }; | 
|  | 23 |  | 
|  | 24 | /* | 
|  | 25 | * Usage-state bitmasks: | 
|  | 26 | */ | 
| Peter Zijlstra | d7b1b02 | 2009-01-22 14:38:38 +0100 | [diff] [blame] | 27 | #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE), | 
|  | 28 |  | 
|  | 29 | enum { | 
|  | 30 | #define LOCKDEP_STATE(__STATE)						\ | 
|  | 31 | __LOCKF(USED_IN_##__STATE)					\ | 
|  | 32 | __LOCKF(USED_IN_##__STATE##_READ)				\ | 
|  | 33 | __LOCKF(ENABLED_##__STATE)					\ | 
|  | 34 | __LOCKF(ENABLED_##__STATE##_READ) | 
|  | 35 | #include "lockdep_states.h" | 
|  | 36 | #undef LOCKDEP_STATE | 
|  | 37 | __LOCKF(USED) | 
|  | 38 | }; | 
| Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 39 |  | 
|  | 40 | #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) | 
|  | 41 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | 
|  | 42 |  | 
| Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 43 | #define LOCKF_ENABLED_IRQ_READ \ | 
|  | 44 | (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) | 
|  | 45 | #define LOCKF_USED_IN_IRQ_READ \ | 
|  | 46 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | 
|  | 47 |  | 
|  | 48 | /* | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 49 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies | 
|  | 50 | * we track. | 
|  | 51 | * | 
|  | 52 | * We use the per-lock dependency maps in two ways: we grow it by adding | 
|  | 53 | * every to-be-taken lock to all currently held lock's own dependency | 
|  | 54 | * table (if it's not there yet), and we check it for lock order | 
|  | 55 | * conflicts and deadlocks. | 
|  | 56 | */ | 
| Ingo Molnar | d80c19d | 2009-05-12 16:29:13 +0200 | [diff] [blame] | 57 | #define MAX_LOCKDEP_ENTRIES	16384UL | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 58 |  | 
| Ingo Molnar | d80c19d | 2009-05-12 16:29:13 +0200 | [diff] [blame] | 59 | #define MAX_LOCKDEP_CHAINS_BITS	15 | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 60 | #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS) | 
|  | 61 |  | 
| Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 62 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) | 
|  | 63 |  | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 64 | /* | 
|  | 65 | * Stack-trace: tightly packed array of stack backtrace | 
|  | 66 | * addresses. Protected by the hash_lock. | 
|  | 67 | */ | 
| Ingo Molnar | 9bb25bf | 2006-09-12 20:35:50 -0700 | [diff] [blame] | 68 | #define MAX_STACK_TRACE_ENTRIES	262144UL | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 69 |  | 
|  | 70 | extern struct list_head all_lock_classes; | 
| Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 71 | extern struct lock_chain lock_chains[]; | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 72 |  | 
| Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 73 | #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) | 
|  | 74 |  | 
|  | 75 | extern void get_usage_chars(struct lock_class *class, | 
|  | 76 | char usage[LOCK_USAGE_CHARS]); | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 77 |  | 
|  | 78 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); | 
|  | 79 |  | 
| Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 80 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); | 
|  | 81 |  | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 82 | extern unsigned long nr_lock_classes; | 
|  | 83 | extern unsigned long nr_list_entries; | 
|  | 84 | extern unsigned long nr_lock_chains; | 
| Huang, Ying | cd1a28e | 2008-06-23 11:20:54 +0800 | [diff] [blame] | 85 | extern int nr_chain_hlocks; | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 86 | extern unsigned long nr_stack_trace_entries; | 
|  | 87 |  | 
|  | 88 | extern unsigned int nr_hardirq_chains; | 
|  | 89 | extern unsigned int nr_softirq_chains; | 
|  | 90 | extern unsigned int nr_process_chains; | 
|  | 91 | extern unsigned int max_lockdep_depth; | 
|  | 92 | extern unsigned int max_recursion_depth; | 
|  | 93 |  | 
| Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 94 | extern unsigned int max_bfs_queue_depth; | 
|  | 95 |  | 
| Ingo Molnar | d6672c5 | 2008-08-01 11:23:50 +0200 | [diff] [blame] | 96 | #ifdef CONFIG_PROVE_LOCKING | 
| David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 97 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | 
|  | 98 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | 
| Ingo Molnar | d6672c5 | 2008-08-01 11:23:50 +0200 | [diff] [blame] | 99 | #else | 
|  | 100 | static inline unsigned long | 
|  | 101 | lockdep_count_forward_deps(struct lock_class *class) | 
|  | 102 | { | 
|  | 103 | return 0; | 
|  | 104 | } | 
|  | 105 | static inline unsigned long | 
|  | 106 | lockdep_count_backward_deps(struct lock_class *class) | 
|  | 107 | { | 
|  | 108 | return 0; | 
|  | 109 | } | 
|  | 110 | #endif | 
| David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 111 |  | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 112 | #ifdef CONFIG_DEBUG_LOCKDEP | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 113 |  | 
|  | 114 | #include <asm/local.h> | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 115 | /* | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 116 | * Various lockdep statistics. | 
|  | 117 | * We want them per cpu as they are often accessed in fast path | 
|  | 118 | * and we want to avoid too much cache bouncing. | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 119 | */ | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 120 | struct lockdep_stats { | 
|  | 121 | int	chain_lookup_hits; | 
|  | 122 | int	chain_lookup_misses; | 
|  | 123 | int	hardirqs_on_events; | 
|  | 124 | int	hardirqs_off_events; | 
|  | 125 | int	redundant_hardirqs_on; | 
|  | 126 | int	redundant_hardirqs_off; | 
|  | 127 | int	softirqs_on_events; | 
|  | 128 | int	softirqs_off_events; | 
|  | 129 | int	redundant_softirqs_on; | 
|  | 130 | int	redundant_softirqs_off; | 
|  | 131 | int	nr_unused_locks; | 
|  | 132 | int	nr_cyclic_checks; | 
|  | 133 | int	nr_cyclic_check_recursions; | 
|  | 134 | int	nr_find_usage_forwards_checks; | 
|  | 135 | int	nr_find_usage_forwards_recursions; | 
|  | 136 | int	nr_find_usage_backwards_checks; | 
|  | 137 | int	nr_find_usage_backwards_recursions; | 
|  | 138 | }; | 
|  | 139 |  | 
|  | 140 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | 
|  | 141 |  | 
| Frederic Weisbecker | ba697f4 | 2010-05-04 04:47:25 +0200 | [diff] [blame] | 142 | #define __debug_atomic_inc(ptr)					\ | 
|  | 143 | this_cpu_inc(lockdep_stats.ptr); | 
|  | 144 |  | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 145 | #define debug_atomic_inc(ptr)			{		\ | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 146 | WARN_ON_ONCE(!irqs_disabled());				\ | 
| Frederic Weisbecker | 54d47a2 | 2010-05-04 04:54:47 +0200 | [diff] [blame] | 147 | __this_cpu_inc(lockdep_stats.ptr);			\ | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 148 | } | 
|  | 149 |  | 
|  | 150 | #define debug_atomic_dec(ptr)			{		\ | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 151 | WARN_ON_ONCE(!irqs_disabled());				\ | 
| Frederic Weisbecker | 54d47a2 | 2010-05-04 04:54:47 +0200 | [diff] [blame] | 152 | __this_cpu_dec(lockdep_stats.ptr);			\ | 
| Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
|  | 155 | #define debug_atomic_read(ptr)		({				\ | 
|  | 156 | struct lockdep_stats *__cpu_lockdep_stats;			\ | 
|  | 157 | unsigned long long __total = 0;					\ | 
|  | 158 | int __cpu;							\ | 
|  | 159 | for_each_possible_cpu(__cpu) {					\ | 
|  | 160 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\ | 
|  | 161 | __total += __cpu_lockdep_stats->ptr;			\ | 
|  | 162 | }								\ | 
|  | 163 | __total;							\ | 
|  | 164 | }) | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 165 | #else | 
| Frederic Weisbecker | ba697f4 | 2010-05-04 04:47:25 +0200 | [diff] [blame] | 166 | # define __debug_atomic_inc(ptr)	do { } while (0) | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 167 | # define debug_atomic_inc(ptr)		do { } while (0) | 
|  | 168 | # define debug_atomic_dec(ptr)		do { } while (0) | 
|  | 169 | # define debug_atomic_read(ptr)		0 | 
|  | 170 | #endif |