| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_IRQ_H | 
 | 2 | #define _LINUX_IRQ_H | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  | 
 | 4 | /* | 
 | 5 |  * Please do not include this file in generic code.  There is currently | 
 | 6 |  * no requirement for any architecture to implement anything held | 
 | 7 |  * within this file. | 
 | 8 |  * | 
 | 9 |  * Thanks. --rmk | 
 | 10 |  */ | 
 | 11 |  | 
| Adrian Bunk | 23f9b31 | 2005-12-21 02:27:50 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 14 | #ifndef CONFIG_S390 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  | 
 | 16 | #include <linux/linkage.h> | 
 | 17 | #include <linux/cache.h> | 
 | 18 | #include <linux/spinlock.h> | 
 | 19 | #include <linux/cpumask.h> | 
| Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 20 | #include <linux/gfp.h> | 
| Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 21 | #include <linux/irqreturn.h> | 
| Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame] | 22 | #include <linux/irqnr.h> | 
| David Howells | 77904fd | 2007-02-28 20:13:26 -0800 | [diff] [blame] | 23 | #include <linux/errno.h> | 
| Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 24 | #include <linux/topology.h> | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 25 | #include <linux/wait.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  | 
 | 27 | #include <asm/irq.h> | 
 | 28 | #include <asm/ptrace.h> | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 29 | #include <asm/irq_regs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 31 | struct irq_desc; | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 32 | typedef	void (*irq_flow_handler_t)(unsigned int irq, | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 33 | 					    struct irq_desc *desc); | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 34 |  | 
 | 35 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* | 
 | 37 |  * IRQ line status. | 
| Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 38 |  * | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 39 |  * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h | 
| Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 40 |  * | 
 | 41 |  * IRQ types | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 |  */ | 
| Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 43 | #define IRQ_TYPE_NONE		0x00000000	/* Default, unspecified type */ | 
 | 44 | #define IRQ_TYPE_EDGE_RISING	0x00000001	/* Edge rising type */ | 
 | 45 | #define IRQ_TYPE_EDGE_FALLING	0x00000002	/* Edge falling type */ | 
 | 46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) | 
 | 47 | #define IRQ_TYPE_LEVEL_HIGH	0x00000004	/* Level high type */ | 
 | 48 | #define IRQ_TYPE_LEVEL_LOW	0x00000008	/* Level low type */ | 
 | 49 | #define IRQ_TYPE_SENSE_MASK	0x0000000f	/* Mask of the above */ | 
 | 50 | #define IRQ_TYPE_PROBE		0x00000010	/* Probing in progress */ | 
 | 51 |  | 
 | 52 | /* Internal flags */ | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 53 | #define IRQ_INPROGRESS		0x00000100	/* IRQ handler active - do not enter! */ | 
 | 54 | #define IRQ_DISABLED		0x00000200	/* IRQ disabled - do not enter! */ | 
 | 55 | #define IRQ_PENDING		0x00000400	/* IRQ pending - replay on enable */ | 
 | 56 | #define IRQ_REPLAY		0x00000800	/* IRQ has been replayed but not acked yet */ | 
 | 57 | #define IRQ_AUTODETECT		0x00001000	/* IRQ is being autodetected */ | 
 | 58 | #define IRQ_WAITING		0x00002000	/* IRQ not yet seen - for autodetection */ | 
 | 59 | #define IRQ_LEVEL		0x00004000	/* IRQ level triggered */ | 
 | 60 | #define IRQ_MASKED		0x00008000	/* IRQ masked - shouldn't be seen again */ | 
 | 61 | #define IRQ_PER_CPU		0x00010000	/* IRQ is per CPU */ | 
 | 62 | #define IRQ_NOPROBE		0x00020000	/* IRQ is not valid for probing */ | 
 | 63 | #define IRQ_NOREQUEST		0x00040000	/* IRQ cannot be requested */ | 
 | 64 | #define IRQ_NOAUTOEN		0x00080000	/* IRQ will not be enabled on request irq */ | 
| Ingo Molnar | d7e25f3 | 2007-02-16 01:28:24 -0800 | [diff] [blame] | 65 | #define IRQ_WAKEUP		0x00100000	/* IRQ triggers system wakeup */ | 
 | 66 | #define IRQ_MOVE_PENDING	0x00200000	/* need to re-target IRQ destination */ | 
 | 67 | #define IRQ_NO_BALANCING	0x00400000	/* IRQ is excluded from balancing */ | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 68 | #define IRQ_SPURIOUS_DISABLED	0x00800000	/* IRQ was disabled by the spurious trap */ | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 69 | #define IRQ_MOVE_PCNTXT		0x01000000	/* IRQ migration from process context */ | 
 | 70 | #define IRQ_AFFINITY_SET	0x02000000	/* IRQ affinity was set from userspace*/ | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 71 | #define IRQ_SUSPENDED		0x04000000	/* IRQ has gone through suspend sequence */ | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 72 | #define IRQ_ONESHOT		0x08000000	/* IRQ is not unmasked after hardirq */ | 
| Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 73 | #define IRQ_NESTED_THREAD	0x10000000	/* IRQ is nested into another, no own handler thread */ | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 74 |  | 
| Ingo Molnar | 0d7012a | 2006-06-29 02:24:43 -0700 | [diff] [blame] | 75 | #ifdef CONFIG_IRQ_PER_CPU | 
| Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 77 | # define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING) | 
| Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 78 | #else | 
 | 79 | # define CHECK_IRQ_PER_CPU(var) 0 | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 80 | # define IRQ_NO_BALANCING_MASK	IRQ_NO_BALANCING | 
| Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 81 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 83 | struct proc_dir_entry; | 
| Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 84 | struct msi_desc; | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 85 |  | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 86 | /** | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 87 |  * struct irq_chip - hardware interrupt chip descriptor | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 88 |  * | 
 | 89 |  * @name:		name for /proc/interrupts | 
 | 90 |  * @startup:		start up the interrupt (defaults to ->enable if NULL) | 
 | 91 |  * @shutdown:		shut down the interrupt (defaults to ->disable if NULL) | 
 | 92 |  * @enable:		enable the interrupt (defaults to chip->unmask if NULL) | 
| Mark Brown | 599faa0 | 2010-01-05 13:29:58 +0000 | [diff] [blame] | 93 |  * @disable:		disable the interrupt | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 94 |  * @ack:		start of a new interrupt | 
 | 95 |  * @mask:		mask an interrupt source | 
 | 96 |  * @mask_ack:		ack and mask an interrupt source | 
 | 97 |  * @unmask:		unmask an interrupt source | 
| Ingo Molnar | 47c2a3a | 2006-06-29 02:25:03 -0700 | [diff] [blame] | 98 |  * @eoi:		end of interrupt - chip level | 
 | 99 |  * @end:		end of interrupt - flow level | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 100 |  * @set_affinity:	set the CPU affinity on SMP machines | 
 | 101 |  * @retrigger:		resend an IRQ to the CPU | 
 | 102 |  * @set_type:		set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 
 | 103 |  * @set_wake:		enable/disable power-management wake-on of an IRQ | 
 | 104 |  * | 
| Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 105 |  * @bus_lock:		function to lock access to slow bus (i2c) chips | 
 | 106 |  * @bus_sync_unlock:	function to sync and unlock slow bus (i2c) chips | 
 | 107 |  * | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 108 |  * @release:		release function solely used by UML | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 109 |  * @typename:		obsoleted by name, kept as migration helper | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 |  */ | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 111 | struct irq_chip { | 
 | 112 | 	const char	*name; | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 113 | 	unsigned int	(*startup)(unsigned int irq); | 
 | 114 | 	void		(*shutdown)(unsigned int irq); | 
 | 115 | 	void		(*enable)(unsigned int irq); | 
 | 116 | 	void		(*disable)(unsigned int irq); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 117 |  | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 118 | 	void		(*ack)(unsigned int irq); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 119 | 	void		(*mask)(unsigned int irq); | 
 | 120 | 	void		(*mask_ack)(unsigned int irq); | 
 | 121 | 	void		(*unmask)(unsigned int irq); | 
| Ingo Molnar | 47c2a3a | 2006-06-29 02:25:03 -0700 | [diff] [blame] | 122 | 	void		(*eoi)(unsigned int irq); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 123 |  | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 124 | 	void		(*end)(unsigned int irq); | 
| Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 125 | 	int		(*set_affinity)(unsigned int irq, | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 126 | 					const struct cpumask *dest); | 
| Ingo Molnar | c0ad90a | 2006-06-29 02:24:44 -0700 | [diff] [blame] | 127 | 	int		(*retrigger)(unsigned int irq); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 128 | 	int		(*set_type)(unsigned int irq, unsigned int flow_type); | 
 | 129 | 	int		(*set_wake)(unsigned int irq, unsigned int on); | 
| Ingo Molnar | c0ad90a | 2006-06-29 02:24:44 -0700 | [diff] [blame] | 130 |  | 
| Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 131 | 	void		(*bus_lock)(unsigned int irq); | 
 | 132 | 	void		(*bus_sync_unlock)(unsigned int irq); | 
 | 133 |  | 
| Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 134 | 	/* Currently used only by UML, might disappear one day.*/ | 
 | 135 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 136 | 	void		(*release)(unsigned int irq, void *dev_id); | 
| Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 137 | #endif | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 138 | 	/* | 
 | 139 | 	 * For compatibility, ->typename is copied into ->name. | 
 | 140 | 	 * Will disappear. | 
 | 141 | 	 */ | 
 | 142 | 	const char	*typename; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | }; | 
 | 144 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 145 | struct timer_rand_state; | 
 | 146 | struct irq_2_iommu; | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 147 | /** | 
 | 148 |  * struct irq_desc - interrupt descriptor | 
| Randy Dunlap | 2ed1cdc | 2008-11-21 16:59:57 -0800 | [diff] [blame] | 149 |  * @irq:		interrupt number for this descriptor | 
| Yinghai Lu | 078a55d | 2008-12-18 16:57:52 -0800 | [diff] [blame] | 150 |  * @timer_rand_state:	pointer to timer rand state struct | 
 | 151 |  * @kstat_irqs:		irq stats per cpu | 
 | 152 |  * @irq_2_iommu:	iommu with this irq | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 153 |  * @handle_irq:		highlevel irq-events handler [if NULL, __do_IRQ()] | 
 | 154 |  * @chip:		low level interrupt hardware access | 
| Randy Dunlap | 472900b | 2007-02-16 01:28:25 -0800 | [diff] [blame] | 155 |  * @msi_desc:		MSI descriptor | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 156 |  * @handler_data:	per-IRQ data for the irq_chip methods | 
 | 157 |  * @chip_data:		platform-specific per-chip private data for the chip | 
 | 158 |  *			methods, to allow shared chip implementations | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 159 |  * @action:		the irq action chain | 
 | 160 |  * @status:		status information | 
 | 161 |  * @depth:		disable-depth, for nested irq_disable() calls | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 162 |  * @wake_depth:		enable depth, for multiple set_irq_wake() callers | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 163 |  * @irq_count:		stats field to detect stalled irqs | 
| Randy Dunlap | 5ac4d82 | 2007-07-31 00:39:03 -0700 | [diff] [blame] | 164 |  * @last_unhandled:	aging timer for unhandled count | 
| Richard Kennedy | e262a7b | 2008-11-23 14:34:43 +0000 | [diff] [blame] | 165 |  * @irqs_unhandled:	stats field for spurious unhandled interrupts | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 166 |  * @lock:		locking for SMP | 
 | 167 |  * @affinity:		IRQ affinity on SMP | 
| Randy Dunlap | ab33dcf | 2009-06-13 20:01:00 -0700 | [diff] [blame] | 168 |  * @node:		node index useful for balancing | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 169 |  * @pending_mask:	pending rebalanced interrupts | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 170 |  * @threads_active:	number of irqaction threads currently running | 
 | 171 |  * @wait_for_threads:	wait queue for sync_irq to wait for threaded handlers | 
| Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 172 |  * @dir:		/proc/irq/ procfs entry | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 173 |  * @name:		flow handler name for /proc/interrupts output | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  */ | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 175 | struct irq_desc { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 176 | 	unsigned int		irq; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 177 | 	struct timer_rand_state *timer_rand_state; | 
 | 178 | 	unsigned int            *kstat_irqs; | 
| Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 179 | #ifdef CONFIG_INTR_REMAP | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 180 | 	struct irq_2_iommu      *irq_2_iommu; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 181 | #endif | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 182 | 	irq_flow_handler_t	handle_irq; | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 183 | 	struct irq_chip		*chip; | 
| Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 184 | 	struct msi_desc		*msi_desc; | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 185 | 	void			*handler_data; | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 186 | 	void			*chip_data; | 
 | 187 | 	struct irqaction	*action;	/* IRQ action list */ | 
 | 188 | 	unsigned int		status;		/* IRQ status */ | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 189 |  | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 190 | 	unsigned int		depth;		/* nested irq disables */ | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 191 | 	unsigned int		wake_depth;	/* nested wake enables */ | 
| Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 192 | 	unsigned int		irq_count;	/* For detecting broken IRQs */ | 
| Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 193 | 	unsigned long		last_unhandled;	/* Aging timer for unhandled count */ | 
| Richard Kennedy | e262a7b | 2008-11-23 14:34:43 +0000 | [diff] [blame] | 194 | 	unsigned int		irqs_unhandled; | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 195 | 	raw_spinlock_t		lock; | 
| Ingo Molnar | a53da52 | 2006-06-29 02:24:38 -0700 | [diff] [blame] | 196 | #ifdef CONFIG_SMP | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 197 | 	cpumask_var_t		affinity; | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 198 | 	unsigned int		node; | 
| Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 199 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 200 | 	cpumask_var_t		pending_mask; | 
 | 201 | #endif | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 202 | #endif | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 203 | 	atomic_t		threads_active; | 
 | 204 | 	wait_queue_head_t       wait_for_threads; | 
| Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 205 | #ifdef CONFIG_PROC_FS | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 206 | 	struct proc_dir_entry	*dir; | 
| Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 207 | #endif | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 208 | 	const char		*name; | 
| Ravikiran G Thirumalai | e729aa1 | 2007-05-08 00:29:13 -0700 | [diff] [blame] | 209 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 211 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 212 | 					struct irq_desc *desc, int node); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 213 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 
| Yinghai Lu | 9059d8f | 2008-08-19 20:50:10 -0700 | [diff] [blame] | 214 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 215 | #ifndef CONFIG_SPARSE_IRQ | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 216 | extern struct irq_desc irq_desc[NR_IRQS]; | 
| Yinghai Lu | 15e957d | 2009-04-30 01:17:50 -0700 | [diff] [blame] | 217 | #endif | 
 | 218 |  | 
 | 219 | #ifdef CONFIG_NUMA_IRQ_DESC | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 220 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 
| Yinghai Lu | 15e957d | 2009-04-30 01:17:50 -0700 | [diff] [blame] | 221 | #else | 
 | 222 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | 
 | 223 | { | 
 | 224 | 	return desc; | 
 | 225 | } | 
 | 226 | #endif | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 227 |  | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 228 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 
| Thomas Gleixner | c6b7674 | 2008-10-15 14:31:29 +0200 | [diff] [blame] | 229 |  | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 230 | /* | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 231 |  * Pick up the arch-dependent methods: | 
 | 232 |  */ | 
 | 233 | #include <asm/hw_irq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 235 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 236 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 |  | 
 | 238 | #ifdef CONFIG_GENERIC_HARDIRQS | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 239 |  | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 240 | #ifdef CONFIG_SMP | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 241 |  | 
| Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 242 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 243 |  | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 244 | void move_native_irq(int irq); | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 245 | void move_masked_irq(int irq); | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 246 |  | 
| Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 247 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 248 |  | 
 | 249 | static inline void move_irq(int irq) | 
 | 250 | { | 
 | 251 | } | 
 | 252 |  | 
 | 253 | static inline void move_native_irq(int irq) | 
 | 254 | { | 
 | 255 | } | 
 | 256 |  | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 257 | static inline void move_masked_irq(int irq) | 
 | 258 | { | 
 | 259 | } | 
 | 260 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 261 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 262 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 263 | #else /* CONFIG_SMP */ | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 264 |  | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 265 | #define move_native_irq(x) | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 266 | #define move_masked_irq(x) | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 267 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 268 | #endif /* CONFIG_SMP */ | 
| Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 269 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | extern int no_irq_affinity; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 |  | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 272 | static inline int irq_balancing_disabled(unsigned int irq) | 
 | 273 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 274 | 	struct irq_desc *desc; | 
 | 275 |  | 
 | 276 | 	desc = irq_to_desc(irq); | 
 | 277 | 	return desc->status & IRQ_NO_BALANCING_MASK; | 
| Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 278 | } | 
 | 279 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 280 | /* Handle irq action chains: */ | 
| Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 281 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 282 |  | 
| Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 283 | /* | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 284 |  * Built-in IRQ handlers for various IRQ types, | 
| Krzysztof Halasa | bebd04c | 2009-11-15 18:57:24 +0100 | [diff] [blame] | 285 |  * callable via desc->handle_irq() | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 286 |  */ | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 287 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 
 | 288 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 
 | 289 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 
 | 290 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 
 | 291 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 
 | 292 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 
| Mark Brown | 31b47cf | 2009-08-24 20:28:04 +0100 | [diff] [blame] | 293 | extern void handle_nested_irq(unsigned int irq); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 294 |  | 
 | 295 | /* | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 296 |  * Monolithic do_IRQ implementation. | 
| Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 297 |  */ | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 298 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 299 | extern unsigned int __do_IRQ(unsigned int irq); | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 300 | #endif | 
| Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 301 |  | 
| Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 302 | /* | 
 | 303 |  * Architectures call this to let the generic IRQ layer | 
 | 304 |  * handle an interrupt. If the descriptor is attached to an | 
 | 305 |  * irqchip-style controller then we call the ->handle_irq() handler, | 
 | 306 |  * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | 
 | 307 |  */ | 
| Yinghai Lu | 46926b6 | 2008-08-19 20:50:15 -0700 | [diff] [blame] | 308 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | 
| Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 309 | { | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 310 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 311 | 	desc->handle_irq(irq, desc); | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 312 | #else | 
| Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 313 | 	if (likely(desc->handle_irq)) | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 314 | 		desc->handle_irq(irq, desc); | 
| Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 315 | 	else | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 316 | 		__do_IRQ(irq); | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 317 | #endif | 
| Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 318 | } | 
 | 319 |  | 
| Yinghai Lu | 46926b6 | 2008-08-19 20:50:15 -0700 | [diff] [blame] | 320 | static inline void generic_handle_irq(unsigned int irq) | 
 | 321 | { | 
 | 322 | 	generic_handle_irq_desc(irq, irq_to_desc(irq)); | 
 | 323 | } | 
 | 324 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 325 | /* Handling of unhandled and spurious interrupts: */ | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 326 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 
| Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 327 | 			   irqreturn_t action_ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 |  | 
| Thomas Gleixner | a4633ad | 2006-06-29 02:24:48 -0700 | [diff] [blame] | 329 | /* Resending of interrupts :*/ | 
 | 330 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 
 | 331 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 332 | /* Enable/disable irq debugging output: */ | 
 | 333 | extern int noirqdebug_setup(char *str); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 335 | /* Checks whether the interrupt can be requested by request_irq(): */ | 
 | 336 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | 
 | 337 |  | 
| Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 338 | /* Dummy irq-chip implementations: */ | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 339 | extern struct irq_chip no_irq_chip; | 
| Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 340 | extern struct irq_chip dummy_irq_chip; | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 341 |  | 
 | 342 | extern void | 
| Ingo Molnar | 145fc65 | 2006-10-19 23:28:28 -0700 | [diff] [blame] | 343 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 
 | 344 | 			 irq_flow_handler_t handle); | 
 | 345 | extern void | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 346 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 
 | 347 | 			      irq_flow_handler_t handle, const char *name); | 
 | 348 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 349 | extern void | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 350 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 
 | 351 | 		  const char *name); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 352 |  | 
| Kevin Hilman | b019e57 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 353 | /* caller has locked the irq_desc and both params are valid */ | 
 | 354 | static inline void __set_irq_handler_unlocked(int irq, | 
 | 355 | 					      irq_flow_handler_t handler) | 
 | 356 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 357 | 	struct irq_desc *desc; | 
 | 358 |  | 
 | 359 | 	desc = irq_to_desc(irq); | 
 | 360 | 	desc->handle_irq = handler; | 
| Kevin Hilman | b019e57 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 361 | } | 
 | 362 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 363 | /* | 
 | 364 |  * Set a highlevel flow handler for a given IRQ: | 
 | 365 |  */ | 
 | 366 | static inline void | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 367 | set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 368 | { | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 369 | 	__set_irq_handler(irq, handle, 0, NULL); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 370 | } | 
 | 371 |  | 
 | 372 | /* | 
 | 373 |  * Set a highlevel chained flow handler for a given IRQ. | 
 | 374 |  * (a chained handler is automatically enabled and set to | 
 | 375 |  *  IRQ_NOREQUEST and IRQ_NOPROBE) | 
 | 376 |  */ | 
 | 377 | static inline void | 
 | 378 | set_irq_chained_handler(unsigned int irq, | 
| David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 379 | 			irq_flow_handler_t handle) | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 380 | { | 
| Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 381 | 	__set_irq_handler(irq, handle, 1, NULL); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 382 | } | 
 | 383 |  | 
| Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 384 | extern void set_irq_nested_thread(unsigned int irq, int nest); | 
 | 385 |  | 
| Ralf Baechle | 46f4f8f | 2008-02-08 04:22:01 -0800 | [diff] [blame] | 386 | extern void set_irq_noprobe(unsigned int irq); | 
 | 387 | extern void set_irq_probe(unsigned int irq); | 
 | 388 |  | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 389 | /* Handle dynamic irq creation and destruction */ | 
| Yinghai Lu | d047f53 | 2009-04-27 18:02:23 -0700 | [diff] [blame] | 390 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 391 | extern int create_irq(void); | 
 | 392 | extern void destroy_irq(unsigned int irq); | 
| Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 393 |  | 
| Eric W. Biederman | 1f80025 | 2006-10-04 02:16:56 -0700 | [diff] [blame] | 394 | /* Test to see if a driver has successfully requested an irq */ | 
 | 395 | static inline int irq_has_action(unsigned int irq) | 
 | 396 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 397 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Eric W. Biederman | 1f80025 | 2006-10-04 02:16:56 -0700 | [diff] [blame] | 398 | 	return desc->action != NULL; | 
 | 399 | } | 
 | 400 |  | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 401 | /* Dynamic irq helper functions */ | 
 | 402 | extern void dynamic_irq_init(unsigned int irq); | 
| Brandon Phiilps | ced5b69 | 2010-02-10 01:20:06 -0800 | [diff] [blame] | 403 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 404 | extern void dynamic_irq_cleanup(unsigned int irq); | 
| Brandon Phiilps | ced5b69 | 2010-02-10 01:20:06 -0800 | [diff] [blame] | 405 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); | 
| Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 406 |  | 
 | 407 | /* Set/get chip/data for an IRQ: */ | 
| Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 408 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 
 | 409 | extern int set_irq_data(unsigned int irq, void *data); | 
 | 410 | extern int set_irq_chip_data(unsigned int irq, void *data); | 
 | 411 | extern int set_irq_type(unsigned int irq, unsigned int type); | 
| Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 412 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 
| Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 413 |  | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 414 | #define get_irq_chip(irq)	(irq_to_desc(irq)->chip) | 
 | 415 | #define get_irq_chip_data(irq)	(irq_to_desc(irq)->chip_data) | 
 | 416 | #define get_irq_data(irq)	(irq_to_desc(irq)->handler_data) | 
 | 417 | #define get_irq_msi(irq)	(irq_to_desc(irq)->msi_desc) | 
| Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 418 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 419 | #define get_irq_desc_chip(desc)		((desc)->chip) | 
 | 420 | #define get_irq_desc_chip_data(desc)	((desc)->chip_data) | 
 | 421 | #define get_irq_desc_data(desc)		((desc)->handler_data) | 
 | 422 | #define get_irq_desc_msi(desc)		((desc)->msi_desc) | 
 | 423 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 424 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 426 | #endif /* !CONFIG_S390 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 428 | #ifdef CONFIG_SMP | 
 | 429 | /** | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 430 |  * alloc_desc_masks - allocate cpumasks for irq_desc | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 431 |  * @desc:	pointer to irq_desc struct | 
| Randy Dunlap | ab33dcf | 2009-06-13 20:01:00 -0700 | [diff] [blame] | 432 |  * @node:	node which will be handling the cpumasks | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 433 |  * @boot:	true if need bootmem | 
 | 434 |  * | 
 | 435 |  * Allocates affinity and pending_mask cpumask if required. | 
 | 436 |  * Returns true if successful (or not required). | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 437 |  */ | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 438 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 
| Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 439 | 							bool boot) | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 440 | { | 
| Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 441 | 	gfp_t gfp = GFP_ATOMIC; | 
 | 442 |  | 
 | 443 | 	if (boot) | 
 | 444 | 		gfp = GFP_NOWAIT; | 
 | 445 |  | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 446 | #ifdef CONFIG_CPUMASK_OFFSTACK | 
| Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 447 | 	if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 448 | 		return false; | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 449 |  | 
 | 450 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
| Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 451 | 	if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 452 | 		free_cpumask_var(desc->affinity); | 
 | 453 | 		return false; | 
 | 454 | 	} | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 455 | #endif | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 456 | #endif | 
 | 457 | 	return true; | 
 | 458 | } | 
 | 459 |  | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 460 | static inline void init_desc_masks(struct irq_desc *desc) | 
 | 461 | { | 
 | 462 | 	cpumask_setall(desc->affinity); | 
 | 463 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 464 | 	cpumask_clear(desc->pending_mask); | 
 | 465 | #endif | 
 | 466 | } | 
 | 467 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 468 | /** | 
 | 469 |  * init_copy_desc_masks - copy cpumasks for irq_desc | 
 | 470 |  * @old_desc:	pointer to old irq_desc struct | 
 | 471 |  * @new_desc:	pointer to new irq_desc struct | 
 | 472 |  * | 
 | 473 |  * Insures affinity and pending_masks are copied to new irq_desc. | 
 | 474 |  * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | 
 | 475 |  * irq_desc struct so the copy is redundant. | 
 | 476 |  */ | 
 | 477 |  | 
 | 478 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 
 | 479 | 					struct irq_desc *new_desc) | 
 | 480 | { | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 481 | #ifdef CONFIG_CPUMASK_OFFSTACK | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 482 | 	cpumask_copy(new_desc->affinity, old_desc->affinity); | 
 | 483 |  | 
 | 484 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 485 | 	cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 
 | 486 | #endif | 
 | 487 | #endif | 
 | 488 | } | 
 | 489 |  | 
| Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 490 | static inline void free_desc_masks(struct irq_desc *old_desc, | 
 | 491 | 				   struct irq_desc *new_desc) | 
 | 492 | { | 
 | 493 | 	free_cpumask_var(old_desc->affinity); | 
 | 494 |  | 
 | 495 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 496 | 	free_cpumask_var(old_desc->pending_mask); | 
 | 497 | #endif | 
 | 498 | } | 
 | 499 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 500 | #else /* !CONFIG_SMP */ | 
 | 501 |  | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 502 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 503 | 								bool boot) | 
 | 504 | { | 
 | 505 | 	return true; | 
 | 506 | } | 
 | 507 |  | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 508 | static inline void init_desc_masks(struct irq_desc *desc) | 
 | 509 | { | 
 | 510 | } | 
 | 511 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 512 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 
 | 513 | 					struct irq_desc *new_desc) | 
 | 514 | { | 
 | 515 | } | 
 | 516 |  | 
| Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 517 | static inline void free_desc_masks(struct irq_desc *old_desc, | 
 | 518 | 				   struct irq_desc *new_desc) | 
 | 519 | { | 
 | 520 | } | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 521 | #endif	/* CONFIG_SMP */ | 
 | 522 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 523 | #endif /* _LINUX_IRQ_H */ |