| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	Routines to manage notifier chains for passing status changes to any | 
 | 3 |  *	interested routines. We need this instead of hard coded call lists so | 
 | 4 |  *	that modules can poke their nose into the innards. The network devices | 
 | 5 |  *	needed them so here they are for the rest of you. | 
 | 6 |  * | 
 | 7 |  *				Alan Cox <Alan.Cox@linux.org> | 
 | 8 |  */ | 
 | 9 |   | 
 | 10 | #ifndef _LINUX_NOTIFIER_H | 
 | 11 | #define _LINUX_NOTIFIER_H | 
 | 12 | #include <linux/errno.h> | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 13 | #include <linux/mutex.h> | 
 | 14 | #include <linux/rwsem.h> | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 15 | #include <linux/srcu.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 17 | /* | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 18 |  * Notifier chains are of four types: | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 19 |  * | 
 | 20 |  *	Atomic notifier chains: Chain callbacks run in interrupt/atomic | 
 | 21 |  *		context. Callouts are not allowed to block. | 
 | 22 |  *	Blocking notifier chains: Chain callbacks run in process context. | 
 | 23 |  *		Callouts are allowed to block. | 
 | 24 |  *	Raw notifier chains: There are no restrictions on callbacks, | 
 | 25 |  *		registration, or unregistration.  All locking and protection | 
 | 26 |  *		must be provided by the caller. | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 27 |  *	SRCU notifier chains: A variant of blocking notifier chains, with | 
 | 28 |  *		the same restrictions. | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 29 |  * | 
 | 30 |  * atomic_notifier_chain_register() may be called from an atomic context, | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 31 |  * but blocking_notifier_chain_register() and srcu_notifier_chain_register() | 
 | 32 |  * must be called from a process context.  Ditto for the corresponding | 
 | 33 |  * _unregister() routines. | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 34 |  * | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 35 |  * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(), | 
 | 36 |  * and srcu_notifier_chain_unregister() _must not_ be called from within | 
 | 37 |  * the call chain. | 
 | 38 |  * | 
 | 39 |  * SRCU notifier chains are an alternative form of blocking notifier chains. | 
 | 40 |  * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for | 
 | 41 |  * protection of the chain links.  This means there is _very_ low overhead | 
 | 42 |  * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. | 
 | 43 |  * As compensation, srcu_notifier_chain_unregister() is rather expensive. | 
 | 44 |  * SRCU notifier chains should be used when the chain will be called very | 
 | 45 |  * often but notifier_blocks will seldom be removed.  Also, SRCU notifier | 
 | 46 |  * chains are slightly more difficult to use because they require special | 
 | 47 |  * runtime initialization. | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 48 |  */ | 
 | 49 |  | 
 | 50 | struct notifier_block { | 
 | 51 | 	int (*notifier_call)(struct notifier_block *, unsigned long, void *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | 	struct notifier_block *next; | 
 | 53 | 	int priority; | 
 | 54 | }; | 
 | 55 |  | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 56 | struct atomic_notifier_head { | 
 | 57 | 	spinlock_t lock; | 
 | 58 | 	struct notifier_block *head; | 
 | 59 | }; | 
 | 60 |  | 
 | 61 | struct blocking_notifier_head { | 
 | 62 | 	struct rw_semaphore rwsem; | 
 | 63 | 	struct notifier_block *head; | 
 | 64 | }; | 
 | 65 |  | 
 | 66 | struct raw_notifier_head { | 
 | 67 | 	struct notifier_block *head; | 
 | 68 | }; | 
 | 69 |  | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 70 | struct srcu_notifier_head { | 
 | 71 | 	struct mutex mutex; | 
 | 72 | 	struct srcu_struct srcu; | 
 | 73 | 	struct notifier_block *head; | 
 | 74 | }; | 
 | 75 |  | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do {	\ | 
 | 77 | 		spin_lock_init(&(name)->lock);	\ | 
 | 78 | 		(name)->head = NULL;		\ | 
 | 79 | 	} while (0) | 
 | 80 | #define BLOCKING_INIT_NOTIFIER_HEAD(name) do {	\ | 
 | 81 | 		init_rwsem(&(name)->rwsem);	\ | 
 | 82 | 		(name)->head = NULL;		\ | 
 | 83 | 	} while (0) | 
 | 84 | #define RAW_INIT_NOTIFIER_HEAD(name) do {	\ | 
 | 85 | 		(name)->head = NULL;		\ | 
 | 86 | 	} while (0) | 
 | 87 |  | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 88 | /* srcu_notifier_heads must be initialized and cleaned up dynamically */ | 
 | 89 | extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); | 
 | 90 | #define srcu_cleanup_notifier_head(name)	\ | 
 | 91 | 		cleanup_srcu_struct(&(name)->srcu); | 
 | 92 |  | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 93 | #define ATOMIC_NOTIFIER_INIT(name) {				\ | 
| Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 94 | 		.lock = __SPIN_LOCK_UNLOCKED(name.lock),	\ | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 95 | 		.head = NULL } | 
 | 96 | #define BLOCKING_NOTIFIER_INIT(name) {				\ | 
 | 97 | 		.rwsem = __RWSEM_INITIALIZER((name).rwsem),	\ | 
 | 98 | 		.head = NULL } | 
 | 99 | #define RAW_NOTIFIER_INIT(name)	{				\ | 
 | 100 | 		.head = NULL } | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 101 | /* srcu_notifier_heads cannot be initialized statically */ | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 102 |  | 
 | 103 | #define ATOMIC_NOTIFIER_HEAD(name)				\ | 
 | 104 | 	struct atomic_notifier_head name =			\ | 
 | 105 | 		ATOMIC_NOTIFIER_INIT(name) | 
 | 106 | #define BLOCKING_NOTIFIER_HEAD(name)				\ | 
 | 107 | 	struct blocking_notifier_head name =			\ | 
 | 108 | 		BLOCKING_NOTIFIER_INIT(name) | 
 | 109 | #define RAW_NOTIFIER_HEAD(name)					\ | 
 | 110 | 	struct raw_notifier_head name =				\ | 
 | 111 | 		RAW_NOTIFIER_INIT(name) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 |  | 
 | 113 | #ifdef __KERNEL__ | 
 | 114 |  | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 115 | extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, | 
 | 116 | 		struct notifier_block *nb); | 
 | 117 | extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, | 
 | 118 | 		struct notifier_block *nb); | 
 | 119 | extern int raw_notifier_chain_register(struct raw_notifier_head *nh, | 
 | 120 | 		struct notifier_block *nb); | 
 | 121 | extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, | 
 | 122 | 		struct notifier_block *nb); | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 123 |  | 
| Nadia Derbey | 6546bc4 | 2008-04-29 01:00:45 -0700 | [diff] [blame] | 124 | extern int blocking_notifier_chain_cond_register( | 
 | 125 | 		struct blocking_notifier_head *nh, | 
 | 126 | 		struct notifier_block *nb); | 
 | 127 |  | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 128 | extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, | 
 | 129 | 		struct notifier_block *nb); | 
 | 130 | extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, | 
 | 131 | 		struct notifier_block *nb); | 
 | 132 | extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, | 
 | 133 | 		struct notifier_block *nb); | 
 | 134 | extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, | 
 | 135 | 		struct notifier_block *nb); | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 136 |  | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 137 | extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 138 | 		unsigned long val, void *v); | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 139 | extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, | 
 | 140 | 	unsigned long val, void *v, int nr_to_call, int *nr_calls); | 
 | 141 | extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 142 | 		unsigned long val, void *v); | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 143 | extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, | 
 | 144 | 	unsigned long val, void *v, int nr_to_call, int *nr_calls); | 
 | 145 | extern int raw_notifier_call_chain(struct raw_notifier_head *nh, | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 146 | 		unsigned long val, void *v); | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 147 | extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, | 
 | 148 | 	unsigned long val, void *v, int nr_to_call, int *nr_calls); | 
 | 149 | extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 150 | 		unsigned long val, void *v); | 
| Gautham R Shenoy | 6f7cc11 | 2007-05-09 02:34:02 -0700 | [diff] [blame] | 151 | extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, | 
 | 152 | 	unsigned long val, void *v, int nr_to_call, int *nr_calls); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 |  | 
 | 154 | #define NOTIFY_DONE		0x0000		/* Don't care */ | 
 | 155 | #define NOTIFY_OK		0x0001		/* Suits me */ | 
 | 156 | #define NOTIFY_STOP_MASK	0x8000		/* Don't call further */ | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 157 | #define NOTIFY_BAD		(NOTIFY_STOP_MASK|0x0002) | 
 | 158 | 						/* Bad/Veto action */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | /* | 
 | 160 |  * Clean way to return from the notifier and stop further calls. | 
 | 161 |  */ | 
 | 162 | #define NOTIFY_STOP		(NOTIFY_OK|NOTIFY_STOP_MASK) | 
 | 163 |  | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 164 | /* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ | 
 | 165 | static inline int notifier_from_errno(int err) | 
 | 166 | { | 
 | 167 | 	return NOTIFY_STOP_MASK | (NOTIFY_OK - err); | 
 | 168 | } | 
 | 169 |  | 
 | 170 | /* Restore (negative) errno value from notify return value. */ | 
 | 171 | static inline int notifier_to_errno(int ret) | 
 | 172 | { | 
 | 173 | 	ret &= ~NOTIFY_STOP_MASK; | 
 | 174 | 	return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0; | 
 | 175 | } | 
 | 176 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | /* | 
 | 178 |  *	Declared notifiers so far. I can imagine quite a few more chains | 
 | 179 |  *	over time (eg laptop power reset chains, reboot chain (to clean  | 
 | 180 |  *	device units up), device [un]mount chain, module load/unload chain, | 
 | 181 |  *	low memory chain, screenblank chain (for plug in modular screenblankers)  | 
 | 182 |  *	VC switch chains (for loadable kernel svgalib VC switch helpers) etc... | 
 | 183 |  */ | 
 | 184 |   | 
 | 185 | /* netdevice notifier chain */ | 
 | 186 | #define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */ | 
 | 187 | #define NETDEV_DOWN	0x0002 | 
 | 188 | #define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface | 
 | 189 | 				   detected a hardware crash and restarted | 
 | 190 | 				   - we can use this eg to kick tcp sessions | 
 | 191 | 				   once done */ | 
 | 192 | #define NETDEV_CHANGE	0x0004	/* Notify device state change */ | 
 | 193 | #define NETDEV_REGISTER 0x0005 | 
 | 194 | #define NETDEV_UNREGISTER	0x0006 | 
 | 195 | #define NETDEV_CHANGEMTU	0x0007 | 
 | 196 | #define NETDEV_CHANGEADDR	0x0008 | 
 | 197 | #define NETDEV_GOING_DOWN	0x0009 | 
 | 198 | #define NETDEV_CHANGENAME	0x000A | 
| Stephen Hemminger | d8a33ac | 2005-05-29 14:13:47 -0700 | [diff] [blame] | 199 | #define NETDEV_FEAT_CHANGE	0x000B | 
| Or Gerlitz | c1da4ac | 2008-06-13 18:12:00 -0700 | [diff] [blame] | 200 | #define NETDEV_BONDING_FAILOVER 0x000C | 
| Johannes Berg | 3b8bcfd | 2009-05-30 01:39:53 +0200 | [diff] [blame] | 201 | #define NETDEV_PRE_UP		0x000D | 
| Moni Shoua | 75c7850 | 2009-09-15 02:37:40 -0700 | [diff] [blame] | 202 | #define NETDEV_BONDING_OLDTYPE  0x000E | 
 | 203 | #define NETDEV_BONDING_NEWTYPE  0x000F | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  | 
 | 205 | #define SYS_DOWN	0x0001	/* Notify of system down */ | 
 | 206 | #define SYS_RESTART	SYS_DOWN | 
 | 207 | #define SYS_HALT	0x0002	/* Notify of system halt */ | 
 | 208 | #define SYS_POWER_OFF	0x0003	/* Notify of system power off */ | 
 | 209 |  | 
 | 210 | #define NETLINK_URELEASE	0x0001	/* Unicast netlink socket released */ | 
 | 211 |  | 
 | 212 | #define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */ | 
 | 213 | #define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */ | 
 | 214 | #define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */ | 
 | 215 | #define CPU_DOWN_PREPARE	0x0005 /* CPU (unsigned)v going down */ | 
 | 216 | #define CPU_DOWN_FAILED		0x0006 /* CPU (unsigned)v NOT going down */ | 
 | 217 | #define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */ | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 218 | #define CPU_DYING		0x0008 /* CPU (unsigned)v not running any task, | 
| Manfred Spraul | e545a61 | 2008-09-07 16:57:22 +0200 | [diff] [blame] | 219 | 					* not handling interrupts, soon dead. | 
 | 220 | 					* Called on the dying cpu, interrupts | 
 | 221 | 					* are already disabled. Must not | 
 | 222 | 					* sleep, must not fail */ | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 223 | #define CPU_POST_DEAD		0x0009 /* CPU (unsigned)v dead, cpu_hotplug | 
 | 224 | 					* lock is dropped */ | 
| Manfred Spraul | e545a61 | 2008-09-07 16:57:22 +0200 | [diff] [blame] | 225 | #define CPU_STARTING		0x000A /* CPU (unsigned)v soon running. | 
 | 226 | 					* Called on the new cpu, just before | 
 | 227 | 					* enabling interrupts. Must not sleep, | 
 | 228 | 					* must not fail */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 230 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | 
 | 231 |  * operation in progress | 
 | 232 |  */ | 
 | 233 | #define CPU_TASKS_FROZEN	0x0010 | 
 | 234 |  | 
 | 235 | #define CPU_ONLINE_FROZEN	(CPU_ONLINE | CPU_TASKS_FROZEN) | 
 | 236 | #define CPU_UP_PREPARE_FROZEN	(CPU_UP_PREPARE | CPU_TASKS_FROZEN) | 
 | 237 | #define CPU_UP_CANCELED_FROZEN	(CPU_UP_CANCELED | CPU_TASKS_FROZEN) | 
 | 238 | #define CPU_DOWN_PREPARE_FROZEN	(CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) | 
 | 239 | #define CPU_DOWN_FAILED_FROZEN	(CPU_DOWN_FAILED | CPU_TASKS_FROZEN) | 
 | 240 | #define CPU_DEAD_FROZEN		(CPU_DEAD | CPU_TASKS_FROZEN) | 
| Avi Kivity | db912f9 | 2007-05-24 12:23:10 +0300 | [diff] [blame] | 241 | #define CPU_DYING_FROZEN	(CPU_DYING | CPU_TASKS_FROZEN) | 
| Manfred Spraul | e545a61 | 2008-09-07 16:57:22 +0200 | [diff] [blame] | 242 | #define CPU_STARTING_FROZEN	(CPU_STARTING | CPU_TASKS_FROZEN) | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 243 |  | 
| Rafael J. Wysocki | b10d911 | 2007-07-19 01:47:36 -0700 | [diff] [blame] | 244 | /* Hibernation and suspend events */ | 
 | 245 | #define PM_HIBERNATION_PREPARE	0x0001 /* Going to hibernate */ | 
 | 246 | #define PM_POST_HIBERNATION	0x0002 /* Hibernation finished */ | 
 | 247 | #define PM_SUSPEND_PREPARE	0x0003 /* Going to suspend the system */ | 
 | 248 | #define PM_POST_SUSPEND		0x0004 /* Suspend finished */ | 
| Alan Stern | c3e94d8 | 2007-11-19 23:38:25 +0100 | [diff] [blame] | 249 | #define PM_RESTORE_PREPARE	0x0005 /* Going to restore a saved image */ | 
 | 250 | #define PM_POST_RESTORE		0x0006 /* Restore failed */ | 
| Rafael J. Wysocki | b10d911 | 2007-07-19 01:47:36 -0700 | [diff] [blame] | 251 |  | 
| Samuel Thibault | 41ab439 | 2007-10-18 23:39:12 -0700 | [diff] [blame] | 252 | /* Console keyboard events. | 
 | 253 |  * Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and | 
 | 254 |  * KBD_KEYSYM. */ | 
 | 255 | #define KBD_KEYCODE		0x0001 /* Keyboard keycode, called before any other */ | 
 | 256 | #define KBD_UNBOUND_KEYCODE	0x0002 /* Keyboard keycode which is not bound to any other */ | 
 | 257 | #define KBD_UNICODE		0x0003 /* Keyboard unicode */ | 
 | 258 | #define KBD_KEYSYM		0x0004 /* Keyboard keysym */ | 
 | 259 | #define KBD_POST_KEYSYM		0x0005 /* Called after keyboard keysym interpretation */ | 
 | 260 |  | 
| Alexey Dobriyan | fe9d4f5 | 2007-10-18 23:39:16 -0700 | [diff] [blame] | 261 | extern struct blocking_notifier_head reboot_notifier_list; | 
 | 262 |  | 
| Samuel Thibault | b293d75 | 2007-10-18 23:39:17 -0700 | [diff] [blame] | 263 | /* Virtual Terminal events. */ | 
 | 264 | #define VT_ALLOCATE		0x0001 /* Console got allocated */ | 
 | 265 | #define VT_DEALLOCATE		0x0002 /* Console will be deallocated */ | 
 | 266 | #define VT_WRITE		0x0003 /* A char got output */ | 
 | 267 | #define VT_UPDATE		0x0004 /* A bigger update occurred */ | 
| Karl Dahlke | 0341a4d | 2008-04-28 02:14:25 -0700 | [diff] [blame] | 268 | #define VT_PREWRITE		0x0005 /* A char is about to be written to the console */ | 
| Samuel Thibault | b293d75 | 2007-10-18 23:39:17 -0700 | [diff] [blame] | 269 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | #endif /* __KERNEL__ */ | 
 | 271 | #endif /* _LINUX_NOTIFIER_H */ |