| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_IRQ_H | 
 | 2 | #define _LINUX_IRQ_H | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  | 
 | 4 | /* | 
 | 5 |  * Please do not include this file in generic code.  There is currently | 
 | 6 |  * no requirement for any architecture to implement anything held | 
 | 7 |  * within this file. | 
 | 8 |  * | 
 | 9 |  * Thanks. --rmk | 
 | 10 |  */ | 
 | 11 |  | 
| Adrian Bunk | 23f9b31 | 2005-12-21 02:27:50 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 14 | #ifndef CONFIG_S390 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  | 
 | 16 | #include <linux/linkage.h> | 
 | 17 | #include <linux/cache.h> | 
 | 18 | #include <linux/spinlock.h> | 
 | 19 | #include <linux/cpumask.h> | 
| Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 20 | #include <linux/gfp.h> | 
| Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 21 | #include <linux/irqreturn.h> | 
| Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame] | 22 | #include <linux/irqnr.h> | 
| David Howells | 77904fd | 2007-02-28 20:13:26 -0800 | [diff] [blame] | 23 | #include <linux/errno.h> | 
| Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 24 | #include <linux/topology.h> | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 25 | #include <linux/wait.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  | 
 | 27 | #include <asm/irq.h> | 
 | 28 | #include <asm/ptrace.h> | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 29 | #include <asm/irq_regs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
| Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 31 | struct seq_file; | 
| Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 32 | struct module; | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 33 | struct irq_desc; | 
| Thomas Gleixner | 7812957 | 2011-02-10 15:14:20 +0100 | [diff] [blame] | 34 | struct irq_data; | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 35 | typedef	void (*irq_flow_handler_t)(unsigned int irq, | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 36 | 					    struct irq_desc *desc); | 
| Thomas Gleixner | 7812957 | 2011-02-10 15:14:20 +0100 | [diff] [blame] | 37 | typedef	void (*irq_preflow_handler_t)(struct irq_data *data); | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 38 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | /* | 
 | 40 |  * IRQ line status. | 
| Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 41 |  * | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 42 |  * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h | 
| Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 43 |  * | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 44 |  * IRQ_TYPE_NONE		- default, unspecified type | 
 | 45 |  * IRQ_TYPE_EDGE_RISING		- rising edge triggered | 
 | 46 |  * IRQ_TYPE_EDGE_FALLING	- falling edge triggered | 
 | 47 |  * IRQ_TYPE_EDGE_BOTH		- rising and falling edge triggered | 
 | 48 |  * IRQ_TYPE_LEVEL_HIGH		- high level triggered | 
 | 49 |  * IRQ_TYPE_LEVEL_LOW		- low level triggered | 
 | 50 |  * IRQ_TYPE_LEVEL_MASK		- Mask to filter out the level bits | 
 | 51 |  * IRQ_TYPE_SENSE_MASK		- Mask for all the above bits | 
 | 52 |  * IRQ_TYPE_PROBE		- Special flag for probing in progress | 
 | 53 |  * | 
 | 54 |  * Bits which can be modified via irq_set/clear/modify_status_flags() | 
 | 55 |  * IRQ_LEVEL			- Interrupt is level type. Will be also | 
 | 56 |  *				  updated in the code when the above trigger | 
| Geert Uytterhoeven | 0911f12 | 2011-04-10 11:01:51 +0200 | [diff] [blame] | 57 |  *				  bits are modified via irq_set_irq_type() | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 58 |  * IRQ_PER_CPU			- Mark an interrupt PER_CPU. Will protect | 
 | 59 |  *				  it from affinity setting | 
 | 60 |  * IRQ_NOPROBE			- Interrupt cannot be probed by autoprobing | 
 | 61 |  * IRQ_NOREQUEST		- Interrupt cannot be requested via | 
 | 62 |  *				  request_irq() | 
| Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 63 |  * IRQ_NOTHREAD			- Interrupt cannot be threaded | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 64 |  * IRQ_NOAUTOEN			- Interrupt is not automatically enabled in | 
 | 65 |  *				  request/setup_irq() | 
 | 66 |  * IRQ_NO_BALANCING		- Interrupt cannot be balanced (affinity set) | 
 | 67 |  * IRQ_MOVE_PCNTXT		- Interrupt can be migrated from process context | 
 | 68 |  * IRQ_NESTED_TRHEAD		- Interrupt nests into another thread | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 69 |  * IRQ_PER_CPU_DEVID		- Dev_id is a per-cpu variable | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  */ | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 71 | enum { | 
 | 72 | 	IRQ_TYPE_NONE		= 0x00000000, | 
 | 73 | 	IRQ_TYPE_EDGE_RISING	= 0x00000001, | 
 | 74 | 	IRQ_TYPE_EDGE_FALLING	= 0x00000002, | 
 | 75 | 	IRQ_TYPE_EDGE_BOTH	= (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), | 
 | 76 | 	IRQ_TYPE_LEVEL_HIGH	= 0x00000004, | 
 | 77 | 	IRQ_TYPE_LEVEL_LOW	= 0x00000008, | 
 | 78 | 	IRQ_TYPE_LEVEL_MASK	= (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), | 
 | 79 | 	IRQ_TYPE_SENSE_MASK	= 0x0000000f, | 
| Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 80 |  | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 81 | 	IRQ_TYPE_PROBE		= 0x00000010, | 
| Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 82 |  | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 83 | 	IRQ_LEVEL		= (1 <<  8), | 
 | 84 | 	IRQ_PER_CPU		= (1 <<  9), | 
 | 85 | 	IRQ_NOPROBE		= (1 << 10), | 
 | 86 | 	IRQ_NOREQUEST		= (1 << 11), | 
 | 87 | 	IRQ_NOAUTOEN		= (1 << 12), | 
 | 88 | 	IRQ_NO_BALANCING	= (1 << 13), | 
 | 89 | 	IRQ_MOVE_PCNTXT		= (1 << 14), | 
 | 90 | 	IRQ_NESTED_THREAD	= (1 << 15), | 
| Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 91 | 	IRQ_NOTHREAD		= (1 << 16), | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 92 | 	IRQ_PER_CPU_DEVID	= (1 << 17), | 
| Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 93 | }; | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 94 |  | 
| Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 95 | #define IRQF_MODIFY_MASK	\ | 
 | 96 | 	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 
| Thomas Gleixner | 872434d | 2011-02-05 16:25:25 +0100 | [diff] [blame] | 97 | 	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 98 | 	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) | 
| Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 99 |  | 
| Thomas Gleixner | 8f53f92 | 2011-02-08 16:50:00 +0100 | [diff] [blame] | 100 | #define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING) | 
 | 101 |  | 
| Thomas Gleixner | 3b8249e | 2011-02-07 16:02:20 +0100 | [diff] [blame] | 102 | /* | 
 | 103 |  * Return value for chip->irq_set_affinity() | 
 | 104 |  * | 
 | 105 |  * IRQ_SET_MASK_OK	- OK, core updates irq_data.affinity | 
 | 106 |  * IRQ_SET_MASK_NOCPY	- OK, chip did update irq_data.affinity | 
 | 107 |  */ | 
 | 108 | enum { | 
 | 109 | 	IRQ_SET_MASK_OK = 0, | 
 | 110 | 	IRQ_SET_MASK_OK_NOCOPY, | 
 | 111 | }; | 
 | 112 |  | 
| Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 113 | struct msi_desc; | 
| Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 114 | struct irq_domain; | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 115 |  | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 116 | /** | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 117 |  * struct irq_data - per irq and irq chip data passed down to chip functions | 
 | 118 |  * @irq:		interrupt number | 
| Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 119 |  * @hwirq:		hardware interrupt number, local to the interrupt domain | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 120 |  * @node:		node index useful for balancing | 
| Randy Dunlap | 30398bf6 | 2011-03-18 09:33:56 -0700 | [diff] [blame] | 121 |  * @state_use_accessors: status information for irq chip functions. | 
| Thomas Gleixner | 91c4991 | 2011-02-03 20:48:29 +0100 | [diff] [blame] | 122 |  *			Use accessor functions to deal with it | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 123 |  * @chip:		low level interrupt hardware access | 
| Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 124 |  * @domain:		Interrupt translation domain; responsible for mapping | 
 | 125 |  *			between hwirq number and linux irq number. | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 126 |  * @handler_data:	per-IRQ data for the irq_chip methods | 
 | 127 |  * @chip_data:		platform-specific per-chip private data for the chip | 
 | 128 |  *			methods, to allow shared chip implementations | 
 | 129 |  * @msi_desc:		MSI descriptor | 
 | 130 |  * @affinity:		IRQ affinity on SMP | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 131 |  * | 
 | 132 |  * The fields here need to overlay the ones in irq_desc until we | 
 | 133 |  * cleaned up the direct references and switched everything over to | 
 | 134 |  * irq_data. | 
 | 135 |  */ | 
 | 136 | struct irq_data { | 
 | 137 | 	unsigned int		irq; | 
| Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 138 | 	unsigned long		hwirq; | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 139 | 	unsigned int		node; | 
| Thomas Gleixner | 91c4991 | 2011-02-03 20:48:29 +0100 | [diff] [blame] | 140 | 	unsigned int		state_use_accessors; | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 141 | 	struct irq_chip		*chip; | 
| Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 142 | 	struct irq_domain	*domain; | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 143 | 	void			*handler_data; | 
 | 144 | 	void			*chip_data; | 
 | 145 | 	struct msi_desc		*msi_desc; | 
 | 146 | #ifdef CONFIG_SMP | 
 | 147 | 	cpumask_var_t		affinity; | 
 | 148 | #endif | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 149 | }; | 
 | 150 |  | 
| Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 151 | /* | 
 | 152 |  * Bit masks for irq_data.state | 
 | 153 |  * | 
| Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 154 |  * IRQD_TRIGGER_MASK		- Mask for the trigger type bits | 
| Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 155 |  * IRQD_SETAFFINITY_PENDING	- Affinity setting is pending | 
| Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 156 |  * IRQD_NO_BALANCING		- Balancing disabled for this IRQ | 
 | 157 |  * IRQD_PER_CPU			- Interrupt is per cpu | 
| Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 158 |  * IRQD_AFFINITY_SET		- Interrupt affinity was set | 
| Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 159 |  * IRQD_LEVEL			- Interrupt is level triggered | 
| Thomas Gleixner | 7f94226 | 2011-02-10 19:46:26 +0100 | [diff] [blame] | 160 |  * IRQD_WAKEUP_STATE		- Interrupt is configured for wakeup | 
 | 161 |  *				  from suspend | 
| Thomas Gleixner | e1ef824 | 2011-02-10 22:25:31 +0100 | [diff] [blame] | 162 |  * IRDQ_MOVE_PCNTXT		- Interrupt can be moved in process | 
 | 163 |  *				  context | 
| Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 164 |  * IRQD_IRQ_DISABLED		- Disabled state of the interrupt | 
 | 165 |  * IRQD_IRQ_MASKED		- Masked state of the interrupt | 
 | 166 |  * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt | 
| Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 167 |  */ | 
 | 168 | enum { | 
| Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 169 | 	IRQD_TRIGGER_MASK		= 0xf, | 
| Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 170 | 	IRQD_SETAFFINITY_PENDING	= (1 <<  8), | 
 | 171 | 	IRQD_NO_BALANCING		= (1 << 10), | 
 | 172 | 	IRQD_PER_CPU			= (1 << 11), | 
| Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 173 | 	IRQD_AFFINITY_SET		= (1 << 12), | 
| Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 174 | 	IRQD_LEVEL			= (1 << 13), | 
| Thomas Gleixner | 7f94226 | 2011-02-10 19:46:26 +0100 | [diff] [blame] | 175 | 	IRQD_WAKEUP_STATE		= (1 << 14), | 
| Thomas Gleixner | e1ef824 | 2011-02-10 22:25:31 +0100 | [diff] [blame] | 176 | 	IRQD_MOVE_PCNTXT		= (1 << 15), | 
| Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 177 | 	IRQD_IRQ_DISABLED		= (1 << 16), | 
| Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 178 | 	IRQD_IRQ_MASKED			= (1 << 17), | 
 | 179 | 	IRQD_IRQ_INPROGRESS		= (1 << 18), | 
| Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 180 | }; | 
 | 181 |  | 
 | 182 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 
 | 183 | { | 
 | 184 | 	return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | 
 | 185 | } | 
 | 186 |  | 
| Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 187 | static inline bool irqd_is_per_cpu(struct irq_data *d) | 
 | 188 | { | 
 | 189 | 	return d->state_use_accessors & IRQD_PER_CPU; | 
 | 190 | } | 
 | 191 |  | 
 | 192 | static inline bool irqd_can_balance(struct irq_data *d) | 
 | 193 | { | 
 | 194 | 	return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | 
 | 195 | } | 
 | 196 |  | 
| Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 197 | static inline bool irqd_affinity_was_set(struct irq_data *d) | 
 | 198 | { | 
 | 199 | 	return d->state_use_accessors & IRQD_AFFINITY_SET; | 
 | 200 | } | 
 | 201 |  | 
| Thomas Gleixner | ee38c04 | 2011-03-28 17:11:13 +0200 | [diff] [blame] | 202 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) | 
 | 203 | { | 
 | 204 | 	d->state_use_accessors |= IRQD_AFFINITY_SET; | 
 | 205 | } | 
 | 206 |  | 
| Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 207 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | 
 | 208 | { | 
 | 209 | 	return d->state_use_accessors & IRQD_TRIGGER_MASK; | 
 | 210 | } | 
 | 211 |  | 
 | 212 | /* | 
 | 213 |  * Must only be called inside irq_chip.irq_set_type() functions. | 
 | 214 |  */ | 
 | 215 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | 
 | 216 | { | 
 | 217 | 	d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | 
 | 218 | 	d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | 
 | 219 | } | 
 | 220 |  | 
 | 221 | static inline bool irqd_is_level_type(struct irq_data *d) | 
 | 222 | { | 
 | 223 | 	return d->state_use_accessors & IRQD_LEVEL; | 
 | 224 | } | 
 | 225 |  | 
| Thomas Gleixner | 7f94226 | 2011-02-10 19:46:26 +0100 | [diff] [blame] | 226 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | 
 | 227 | { | 
 | 228 | 	return d->state_use_accessors & IRQD_WAKEUP_STATE; | 
 | 229 | } | 
 | 230 |  | 
| Thomas Gleixner | e1ef824 | 2011-02-10 22:25:31 +0100 | [diff] [blame] | 231 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | 
 | 232 | { | 
 | 233 | 	return d->state_use_accessors & IRQD_MOVE_PCNTXT; | 
 | 234 | } | 
 | 235 |  | 
| Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 236 | static inline bool irqd_irq_disabled(struct irq_data *d) | 
 | 237 | { | 
 | 238 | 	return d->state_use_accessors & IRQD_IRQ_DISABLED; | 
 | 239 | } | 
 | 240 |  | 
| Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 241 | static inline bool irqd_irq_masked(struct irq_data *d) | 
 | 242 | { | 
 | 243 | 	return d->state_use_accessors & IRQD_IRQ_MASKED; | 
 | 244 | } | 
 | 245 |  | 
 | 246 | static inline bool irqd_irq_inprogress(struct irq_data *d) | 
 | 247 | { | 
 | 248 | 	return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 
 | 249 | } | 
 | 250 |  | 
| Thomas Gleixner | 9cff60d | 2011-03-28 16:41:14 +0200 | [diff] [blame] | 251 | /* | 
 | 252 |  * Functions for chained handlers which can be enabled/disabled by the | 
 | 253 |  * standard disable_irq/enable_irq calls. Must be called with | 
 | 254 |  * irq_desc->lock held. | 
 | 255 |  */ | 
 | 256 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | 
 | 257 | { | 
 | 258 | 	d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | 
 | 259 | } | 
 | 260 |  | 
 | 261 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | 
 | 262 | { | 
 | 263 | 	d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | 
 | 264 | } | 
 | 265 |  | 
| Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 266 | /** | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 267 |  * struct irq_chip - hardware interrupt chip descriptor | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 268 |  * | 
 | 269 |  * @name:		name for /proc/interrupts | 
| Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame] | 270 |  * @irq_startup:	start up the interrupt (defaults to ->enable if NULL) | 
 | 271 |  * @irq_shutdown:	shut down the interrupt (defaults to ->disable if NULL) | 
 | 272 |  * @irq_enable:		enable the interrupt (defaults to chip->unmask if NULL) | 
 | 273 |  * @irq_disable:	disable the interrupt | 
 | 274 |  * @irq_ack:		start of a new interrupt | 
 | 275 |  * @irq_mask:		mask an interrupt source | 
 | 276 |  * @irq_mask_ack:	ack and mask an interrupt source | 
 | 277 |  * @irq_unmask:		unmask an interrupt source | 
 | 278 |  * @irq_eoi:		end of interrupt | 
 | 279 |  * @irq_set_affinity:	set the CPU affinity on SMP machines | 
 | 280 |  * @irq_retrigger:	resend an IRQ to the CPU | 
 | 281 |  * @irq_set_type:	set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 
 | 282 |  * @irq_set_wake:	enable/disable power-management wake-on of an IRQ | 
 | 283 |  * @irq_bus_lock:	function to lock access to slow bus (i2c) chips | 
 | 284 |  * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 
| David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 285 |  * @irq_cpu_online:	configure an interrupt source for a secondary CPU | 
 | 286 |  * @irq_cpu_offline:	un-configure an interrupt source for a secondary CPU | 
| Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 287 |  * @irq_suspend:	function called from core code on suspend once per chip | 
 | 288 |  * @irq_resume:		function called from core code on resume once per chip | 
 | 289 |  * @irq_pm_shutdown:	function called from core code on shutdown once per chip | 
| Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 290 |  * @irq_print_chip:	optional to print special chip info in show_interrupts | 
| Thomas Gleixner | 2bff17a | 2011-02-10 13:08:38 +0100 | [diff] [blame] | 291 |  * @flags:		chip specific flags | 
| Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 292 |  * | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 293 |  * @release:		release function solely used by UML | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 |  */ | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 295 | struct irq_chip { | 
 | 296 | 	const char	*name; | 
| Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame] | 297 | 	unsigned int	(*irq_startup)(struct irq_data *data); | 
 | 298 | 	void		(*irq_shutdown)(struct irq_data *data); | 
 | 299 | 	void		(*irq_enable)(struct irq_data *data); | 
 | 300 | 	void		(*irq_disable)(struct irq_data *data); | 
 | 301 |  | 
 | 302 | 	void		(*irq_ack)(struct irq_data *data); | 
 | 303 | 	void		(*irq_mask)(struct irq_data *data); | 
 | 304 | 	void		(*irq_mask_ack)(struct irq_data *data); | 
 | 305 | 	void		(*irq_unmask)(struct irq_data *data); | 
 | 306 | 	void		(*irq_eoi)(struct irq_data *data); | 
 | 307 |  | 
 | 308 | 	int		(*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | 
 | 309 | 	int		(*irq_retrigger)(struct irq_data *data); | 
 | 310 | 	int		(*irq_set_type)(struct irq_data *data, unsigned int flow_type); | 
 | 311 | 	int		(*irq_set_wake)(struct irq_data *data, unsigned int on); | 
 | 312 |  | 
 | 313 | 	void		(*irq_bus_lock)(struct irq_data *data); | 
 | 314 | 	void		(*irq_bus_sync_unlock)(struct irq_data *data); | 
 | 315 |  | 
| David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 316 | 	void		(*irq_cpu_online)(struct irq_data *data); | 
 | 317 | 	void		(*irq_cpu_offline)(struct irq_data *data); | 
 | 318 |  | 
| Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 319 | 	void		(*irq_suspend)(struct irq_data *data); | 
 | 320 | 	void		(*irq_resume)(struct irq_data *data); | 
 | 321 | 	void		(*irq_pm_shutdown)(struct irq_data *data); | 
 | 322 |  | 
| Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 323 | 	void		(*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 
 | 324 |  | 
| Thomas Gleixner | 2bff17a | 2011-02-10 13:08:38 +0100 | [diff] [blame] | 325 | 	unsigned long	flags; | 
 | 326 |  | 
| Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 327 | 	/* Currently used only by UML, might disappear one day.*/ | 
 | 328 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 329 | 	void		(*release)(unsigned int irq, void *dev_id); | 
| Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 330 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | }; | 
 | 332 |  | 
| Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 333 | /* | 
 | 334 |  * irq_chip specific flags | 
 | 335 |  * | 
| Thomas Gleixner | 77694b4 | 2011-02-15 10:33:57 +0100 | [diff] [blame] | 336 |  * IRQCHIP_SET_TYPE_MASKED:	Mask before calling chip.irq_set_type() | 
 | 337 |  * IRQCHIP_EOI_IF_HANDLED:	Only issue irq_eoi() when irq was handled | 
| Thomas Gleixner | d209a69 | 2011-03-11 21:22:14 +0100 | [diff] [blame] | 338 |  * IRQCHIP_MASK_ON_SUSPEND:	Mask non wake irqs in the suspend path | 
| Thomas Gleixner | b3d4223 | 2011-03-27 16:05:36 +0200 | [diff] [blame] | 339 |  * IRQCHIP_ONOFFLINE_ENABLED:	Only call irq_on/off_line callbacks | 
 | 340 |  *				when irq enabled | 
| Santosh Shilimkar | 60f96b4 | 2011-09-09 13:59:35 +0530 | [diff] [blame] | 341 |  * IRQCHIP_SKIP_SET_WAKE:	Skip chip.irq_set_wake(), for this irq chip | 
| Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 342 |  */ | 
 | 343 | enum { | 
 | 344 | 	IRQCHIP_SET_TYPE_MASKED		= (1 <<  0), | 
| Thomas Gleixner | 77694b4 | 2011-02-15 10:33:57 +0100 | [diff] [blame] | 345 | 	IRQCHIP_EOI_IF_HANDLED		= (1 <<  1), | 
| Thomas Gleixner | d209a69 | 2011-03-11 21:22:14 +0100 | [diff] [blame] | 346 | 	IRQCHIP_MASK_ON_SUSPEND		= (1 <<  2), | 
| Thomas Gleixner | b3d4223 | 2011-03-27 16:05:36 +0200 | [diff] [blame] | 347 | 	IRQCHIP_ONOFFLINE_ENABLED	= (1 <<  3), | 
| Santosh Shilimkar | 60f96b4 | 2011-09-09 13:59:35 +0530 | [diff] [blame] | 348 | 	IRQCHIP_SKIP_SET_WAKE		= (1 <<  4), | 
| Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 349 | }; | 
 | 350 |  | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 351 | /* This include will go away once we isolated irq_desc usage to core code */ | 
 | 352 | #include <linux/irqdesc.h> | 
| Thomas Gleixner | c6b7674 | 2008-10-15 14:31:29 +0200 | [diff] [blame] | 353 |  | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 354 | /* | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 355 |  * Pick up the arch-dependent methods: | 
 | 356 |  */ | 
 | 357 | #include <asm/hw_irq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 359 | #ifndef NR_IRQS_LEGACY | 
 | 360 | # define NR_IRQS_LEGACY 0 | 
 | 361 | #endif | 
 | 362 |  | 
| Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 363 | #ifndef ARCH_IRQ_INIT_FLAGS | 
 | 364 | # define ARCH_IRQ_INIT_FLAGS	0 | 
 | 365 | #endif | 
 | 366 |  | 
| Thomas Gleixner | c1594b7 | 2011-02-07 22:11:30 +0100 | [diff] [blame] | 367 | #define IRQ_DEFAULT_INIT_FLAGS	ARCH_IRQ_INIT_FLAGS | 
| Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 368 |  | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 369 | struct irqaction; | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 370 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 371 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 372 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); | 
 | 373 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 |  | 
| David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 375 | extern void irq_cpu_online(void); | 
 | 376 | extern void irq_cpu_offline(void); | 
| David Daney | c2d0c55 | 2011-03-25 12:38:50 -0700 | [diff] [blame] | 377 | extern int __irq_set_affinity_locked(struct irq_data *data,  const struct cpumask *cpumask); | 
| David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 378 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | #ifdef CONFIG_GENERIC_HARDIRQS | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 380 |  | 
| Thomas Gleixner | 3a3856d | 2010-10-04 13:47:12 +0200 | [diff] [blame] | 381 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 382 | void irq_move_irq(struct irq_data *data); | 
 | 383 | void irq_move_masked_irq(struct irq_data *data); | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 384 | #else | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 385 | static inline void irq_move_irq(struct irq_data *data) { } | 
 | 386 | static inline void irq_move_masked_irq(struct irq_data *data) { } | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 387 | #endif | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 388 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | extern int no_irq_affinity; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 |  | 
| Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 391 | /* | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 392 |  * Built-in IRQ handlers for various IRQ types, | 
| Krzysztof Halasa | bebd04c | 2009-11-15 18:57:24 +0100 | [diff] [blame] | 393 |  * callable via desc->handle_irq() | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 394 |  */ | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 395 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 
 | 396 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 
 | 397 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 
| Thomas Gleixner | 0521c8f | 2011-03-28 16:13:24 +0200 | [diff] [blame] | 398 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 399 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 
 | 400 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 401 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 402 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 
| Mark Brown | 31b47cf | 2009-08-24 20:28:04 +0100 | [diff] [blame] | 403 | extern void handle_nested_irq(unsigned int irq); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 404 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 405 | /* Handling of unhandled and spurious interrupts: */ | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 406 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 
| Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 407 | 			   irqreturn_t action_ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 |  | 
| Thomas Gleixner | a4633ad | 2006-06-29 02:24:48 -0700 | [diff] [blame] | 409 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 410 | /* Enable/disable irq debugging output: */ | 
 | 411 | extern int noirqdebug_setup(char *str); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 413 | /* Checks whether the interrupt can be requested by request_irq(): */ | 
 | 414 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | 
 | 415 |  | 
| Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 416 | /* Dummy irq-chip implementations: */ | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 417 | extern struct irq_chip no_irq_chip; | 
| Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 418 | extern struct irq_chip dummy_irq_chip; | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 419 |  | 
 | 420 | extern void | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 421 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 422 | 			      irq_flow_handler_t handle, const char *name); | 
 | 423 |  | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 424 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 
 | 425 | 					    irq_flow_handler_t handle) | 
 | 426 | { | 
 | 427 | 	irq_set_chip_and_handler_name(irq, chip, handle, NULL); | 
 | 428 | } | 
 | 429 |  | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 430 | extern int irq_set_percpu_devid(unsigned int irq); | 
 | 431 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 432 | extern void | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 433 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 434 | 		  const char *name); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 435 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 436 | static inline void | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 437 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 438 | { | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 439 | 	__irq_set_handler(irq, handle, 0, NULL); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 440 | } | 
 | 441 |  | 
 | 442 | /* | 
 | 443 |  * Set a highlevel chained flow handler for a given IRQ. | 
 | 444 |  * (a chained handler is automatically enabled and set to | 
| Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 445 |  *  IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 446 |  */ | 
 | 447 | static inline void | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 448 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 449 | { | 
| Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 450 | 	__irq_set_handler(irq, handle, 1, NULL); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 451 | } | 
 | 452 |  | 
| Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 453 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 
 | 454 |  | 
 | 455 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 
 | 456 | { | 
 | 457 | 	irq_modify_status(irq, 0, set); | 
 | 458 | } | 
 | 459 |  | 
 | 460 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | 
 | 461 | { | 
 | 462 | 	irq_modify_status(irq, clr, 0); | 
 | 463 | } | 
 | 464 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 465 | static inline void irq_set_noprobe(unsigned int irq) | 
| Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 466 | { | 
 | 467 | 	irq_modify_status(irq, 0, IRQ_NOPROBE); | 
 | 468 | } | 
 | 469 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 470 | static inline void irq_set_probe(unsigned int irq) | 
| Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 471 | { | 
 | 472 | 	irq_modify_status(irq, IRQ_NOPROBE, 0); | 
 | 473 | } | 
| Ralf Baechle | 46f4f8f | 2008-02-08 04:22:01 -0800 | [diff] [blame] | 474 |  | 
| Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 475 | static inline void irq_set_nothread(unsigned int irq) | 
 | 476 | { | 
 | 477 | 	irq_modify_status(irq, 0, IRQ_NOTHREAD); | 
 | 478 | } | 
 | 479 |  | 
 | 480 | static inline void irq_set_thread(unsigned int irq) | 
 | 481 | { | 
 | 482 | 	irq_modify_status(irq, IRQ_NOTHREAD, 0); | 
 | 483 | } | 
 | 484 |  | 
| Thomas Gleixner | 6f91a52 | 2011-02-14 13:33:16 +0100 | [diff] [blame] | 485 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 
 | 486 | { | 
 | 487 | 	if (nest) | 
 | 488 | 		irq_set_status_flags(irq, IRQ_NESTED_THREAD); | 
 | 489 | 	else | 
 | 490 | 		irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | 
 | 491 | } | 
 | 492 |  | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 493 | static inline void irq_set_percpu_devid_flags(unsigned int irq) | 
 | 494 | { | 
 | 495 | 	irq_set_status_flags(irq, | 
 | 496 | 			     IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | | 
 | 497 | 			     IRQ_NOPROBE | IRQ_PER_CPU_DEVID); | 
 | 498 | } | 
 | 499 |  | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 500 | /* Handle dynamic irq creation and destruction */ | 
| Yinghai Lu | d047f53 | 2009-04-27 18:02:23 -0700 | [diff] [blame] | 501 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 502 | extern int create_irq(void); | 
 | 503 | extern void destroy_irq(unsigned int irq); | 
| Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 504 |  | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 505 | /* | 
 | 506 |  * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and | 
 | 507 |  * irq_free_desc instead. | 
 | 508 |  */ | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 509 | extern void dynamic_irq_cleanup(unsigned int irq); | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 510 | static inline void dynamic_irq_init(unsigned int irq) | 
 | 511 | { | 
 | 512 | 	dynamic_irq_cleanup(irq); | 
 | 513 | } | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 514 |  | 
 | 515 | /* Set/get chip/data for an IRQ: */ | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 516 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); | 
 | 517 | extern int irq_set_handler_data(unsigned int irq, void *data); | 
 | 518 | extern int irq_set_chip_data(unsigned int irq, void *data); | 
 | 519 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); | 
 | 520 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); | 
| Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 521 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 
| Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 522 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 523 | static inline struct irq_chip *irq_get_chip(unsigned int irq) | 
| Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 524 | { | 
 | 525 | 	struct irq_data *d = irq_get_irq_data(irq); | 
 | 526 | 	return d ? d->chip : NULL; | 
 | 527 | } | 
 | 528 |  | 
 | 529 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | 
 | 530 | { | 
 | 531 | 	return d->chip; | 
 | 532 | } | 
 | 533 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 534 | static inline void *irq_get_chip_data(unsigned int irq) | 
| Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 535 | { | 
 | 536 | 	struct irq_data *d = irq_get_irq_data(irq); | 
 | 537 | 	return d ? d->chip_data : NULL; | 
 | 538 | } | 
 | 539 |  | 
 | 540 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | 
 | 541 | { | 
 | 542 | 	return d->chip_data; | 
 | 543 | } | 
 | 544 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 545 | static inline void *irq_get_handler_data(unsigned int irq) | 
| Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 546 | { | 
 | 547 | 	struct irq_data *d = irq_get_irq_data(irq); | 
 | 548 | 	return d ? d->handler_data : NULL; | 
 | 549 | } | 
 | 550 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 551 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) | 
| Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 552 | { | 
 | 553 | 	return d->handler_data; | 
 | 554 | } | 
 | 555 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 556 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 
| Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 557 | { | 
 | 558 | 	struct irq_data *d = irq_get_irq_data(irq); | 
 | 559 | 	return d ? d->msi_desc : NULL; | 
 | 560 | } | 
 | 561 |  | 
 | 562 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | 
 | 563 | { | 
 | 564 | 	return d->msi_desc; | 
 | 565 | } | 
 | 566 |  | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 567 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 
 | 568 | 		struct module *owner); | 
 | 569 |  | 
| Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 570 | /* use macros to avoid needing export.h for THIS_MODULE */ | 
 | 571 | #define irq_alloc_descs(irq, from, cnt, node)	\ | 
 | 572 | 	__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) | 
 | 573 |  | 
 | 574 | #define irq_alloc_desc(node)			\ | 
 | 575 | 	irq_alloc_descs(-1, 0, 1, node) | 
 | 576 |  | 
 | 577 | #define irq_alloc_desc_at(at, node)		\ | 
 | 578 | 	irq_alloc_descs(at, at, 1, node) | 
 | 579 |  | 
 | 580 | #define irq_alloc_desc_from(from, node)		\ | 
 | 581 | 	irq_alloc_descs(-1, from, 1, node) | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 582 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 583 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 584 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 585 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 586 | static inline void irq_free_desc(unsigned int irq) | 
 | 587 | { | 
 | 588 | 	irq_free_descs(irq, 1); | 
 | 589 | } | 
 | 590 |  | 
| Paul Mundt | 639bd12 | 2010-10-26 16:19:13 +0900 | [diff] [blame] | 591 | static inline int irq_reserve_irq(unsigned int irq) | 
 | 592 | { | 
 | 593 | 	return irq_reserve_irqs(irq, 1); | 
 | 594 | } | 
 | 595 |  | 
| Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 596 | #ifndef irq_reg_writel | 
 | 597 | # define irq_reg_writel(val, addr)	writel(val, addr) | 
 | 598 | #endif | 
 | 599 | #ifndef irq_reg_readl | 
 | 600 | # define irq_reg_readl(addr)		readl(addr) | 
 | 601 | #endif | 
 | 602 |  | 
 | 603 | /** | 
 | 604 |  * struct irq_chip_regs - register offsets for struct irq_gci | 
 | 605 |  * @enable:	Enable register offset to reg_base | 
 | 606 |  * @disable:	Disable register offset to reg_base | 
 | 607 |  * @mask:	Mask register offset to reg_base | 
 | 608 |  * @ack:	Ack register offset to reg_base | 
 | 609 |  * @eoi:	Eoi register offset to reg_base | 
 | 610 |  * @type:	Type configuration register offset to reg_base | 
 | 611 |  * @polarity:	Polarity configuration register offset to reg_base | 
 | 612 |  */ | 
 | 613 | struct irq_chip_regs { | 
 | 614 | 	unsigned long		enable; | 
 | 615 | 	unsigned long		disable; | 
 | 616 | 	unsigned long		mask; | 
 | 617 | 	unsigned long		ack; | 
 | 618 | 	unsigned long		eoi; | 
 | 619 | 	unsigned long		type; | 
 | 620 | 	unsigned long		polarity; | 
 | 621 | }; | 
 | 622 |  | 
 | 623 | /** | 
 | 624 |  * struct irq_chip_type - Generic interrupt chip instance for a flow type | 
 | 625 |  * @chip:		The real interrupt chip which provides the callbacks | 
 | 626 |  * @regs:		Register offsets for this chip | 
 | 627 |  * @handler:		Flow handler associated with this chip | 
 | 628 |  * @type:		Chip can handle these flow types | 
 | 629 |  * | 
 | 630 |  * A irq_generic_chip can have several instances of irq_chip_type when | 
 | 631 |  * it requires different functions and register offsets for different | 
 | 632 |  * flow types. | 
 | 633 |  */ | 
 | 634 | struct irq_chip_type { | 
 | 635 | 	struct irq_chip		chip; | 
 | 636 | 	struct irq_chip_regs	regs; | 
 | 637 | 	irq_flow_handler_t	handler; | 
 | 638 | 	u32			type; | 
 | 639 | }; | 
 | 640 |  | 
 | 641 | /** | 
 | 642 |  * struct irq_chip_generic - Generic irq chip data structure | 
 | 643 |  * @lock:		Lock to protect register and cache data access | 
 | 644 |  * @reg_base:		Register base address (virtual) | 
 | 645 |  * @irq_base:		Interrupt base nr for this chip | 
 | 646 |  * @irq_cnt:		Number of interrupts handled by this chip | 
 | 647 |  * @mask_cache:		Cached mask register | 
 | 648 |  * @type_cache:		Cached type register | 
 | 649 |  * @polarity_cache:	Cached polarity register | 
 | 650 |  * @wake_enabled:	Interrupt can wakeup from suspend | 
 | 651 |  * @wake_active:	Interrupt is marked as an wakeup from suspend source | 
 | 652 |  * @num_ct:		Number of available irq_chip_type instances (usually 1) | 
 | 653 |  * @private:		Private data for non generic chip callbacks | 
| Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 654 |  * @list:		List head for keeping track of instances | 
| Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 655 |  * @chip_types:		Array of interrupt irq_chip_types | 
 | 656 |  * | 
 | 657 |  * Note, that irq_chip_generic can have multiple irq_chip_type | 
 | 658 |  * implementations which can be associated to a particular irq line of | 
 | 659 |  * an irq_chip_generic instance. That allows to share and protect | 
 | 660 |  * state in an irq_chip_generic instance when we need to implement | 
 | 661 |  * different flow mechanisms (level/edge) for it. | 
 | 662 |  */ | 
 | 663 | struct irq_chip_generic { | 
 | 664 | 	raw_spinlock_t		lock; | 
 | 665 | 	void __iomem		*reg_base; | 
 | 666 | 	unsigned int		irq_base; | 
 | 667 | 	unsigned int		irq_cnt; | 
 | 668 | 	u32			mask_cache; | 
 | 669 | 	u32			type_cache; | 
 | 670 | 	u32			polarity_cache; | 
 | 671 | 	u32			wake_enabled; | 
 | 672 | 	u32			wake_active; | 
 | 673 | 	unsigned int		num_ct; | 
 | 674 | 	void			*private; | 
| Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 675 | 	struct list_head	list; | 
| Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 676 | 	struct irq_chip_type	chip_types[0]; | 
 | 677 | }; | 
 | 678 |  | 
 | 679 | /** | 
 | 680 |  * enum irq_gc_flags - Initialization flags for generic irq chips | 
 | 681 |  * @IRQ_GC_INIT_MASK_CACHE:	Initialize the mask_cache by reading mask reg | 
 | 682 |  * @IRQ_GC_INIT_NESTED_LOCK:	Set the lock class of the irqs to nested for | 
 | 683 |  *				irq chips which need to call irq_set_wake() on | 
 | 684 |  *				the parent irq. Usually GPIO implementations | 
 | 685 |  */ | 
 | 686 | enum irq_gc_flags { | 
 | 687 | 	IRQ_GC_INIT_MASK_CACHE		= 1 << 0, | 
 | 688 | 	IRQ_GC_INIT_NESTED_LOCK		= 1 << 1, | 
 | 689 | }; | 
 | 690 |  | 
 | 691 | /* Generic chip callback functions */ | 
 | 692 | void irq_gc_noop(struct irq_data *d); | 
 | 693 | void irq_gc_mask_disable_reg(struct irq_data *d); | 
 | 694 | void irq_gc_mask_set_bit(struct irq_data *d); | 
 | 695 | void irq_gc_mask_clr_bit(struct irq_data *d); | 
 | 696 | void irq_gc_unmask_enable_reg(struct irq_data *d); | 
| Simon Guinot | 659fb32 | 2011-07-06 12:41:31 -0400 | [diff] [blame] | 697 | void irq_gc_ack_set_bit(struct irq_data *d); | 
 | 698 | void irq_gc_ack_clr_bit(struct irq_data *d); | 
| Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 699 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | 
 | 700 | void irq_gc_eoi(struct irq_data *d); | 
 | 701 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | 
 | 702 |  | 
 | 703 | /* Setup functions for irq_chip_generic */ | 
 | 704 | struct irq_chip_generic * | 
 | 705 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | 
 | 706 | 		       void __iomem *reg_base, irq_flow_handler_t handler); | 
 | 707 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | 
 | 708 | 			    enum irq_gc_flags flags, unsigned int clr, | 
 | 709 | 			    unsigned int set); | 
 | 710 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | 
| Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 711 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | 
 | 712 | 			     unsigned int clr, unsigned int set); | 
| Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 713 |  | 
 | 714 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | 
 | 715 | { | 
 | 716 | 	return container_of(d->chip, struct irq_chip_type, chip); | 
 | 717 | } | 
 | 718 |  | 
 | 719 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | 
 | 720 |  | 
 | 721 | #ifdef CONFIG_SMP | 
 | 722 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | 
 | 723 | { | 
 | 724 | 	raw_spin_lock(&gc->lock); | 
 | 725 | } | 
 | 726 |  | 
 | 727 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | 
 | 728 | { | 
 | 729 | 	raw_spin_unlock(&gc->lock); | 
 | 730 | } | 
 | 731 | #else | 
 | 732 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | 
 | 733 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | 
 | 734 | #endif | 
 | 735 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 736 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 738 | #endif /* !CONFIG_S390 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 740 | #endif /* _LINUX_IRQ_H */ |