| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_IRQDESC_H | 
|  | 2 | #define _LINUX_IRQDESC_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Core internal functions to deal with irq descriptors | 
|  | 6 | * | 
|  | 7 | * This include will move to kernel/irq once we cleaned up the tree. | 
|  | 8 | * For now it's included from <linux/irq.h> | 
|  | 9 | */ | 
|  | 10 |  | 
| Ben Hutchings | cd7eab4 | 2011-01-19 21:01:44 +0000 | [diff] [blame] | 11 | struct irq_affinity_notify; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 12 | struct proc_dir_entry; | 
|  | 13 | struct timer_rand_state; | 
| Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 14 | struct module; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 15 | /** | 
|  | 16 | * struct irq_desc - interrupt descriptor | 
|  | 17 | * @irq_data:		per irq and chip data passed down to chip functions | 
|  | 18 | * @timer_rand_state:	pointer to timer rand state struct | 
|  | 19 | * @kstat_irqs:		irq stats per cpu | 
| Geert Uytterhoeven | 7707677 | 2011-04-10 11:01:52 +0200 | [diff] [blame] | 20 | * @handle_irq:		highlevel irq-events handler | 
|  | 21 | * @preflow_handler:	handler called before the flow handler (currently used by sparc) | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 22 | * @action:		the irq action chain | 
|  | 23 | * @status:		status information | 
| Thomas Gleixner | dbec07b | 2011-02-07 20:19:55 +0100 | [diff] [blame] | 24 | * @core_internal_state__do_not_mess_with_it: core internal status information | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 25 | * @depth:		disable-depth, for nested irq_disable() calls | 
| Geert Uytterhoeven | 0911f12 | 2011-04-10 11:01:51 +0200 | [diff] [blame] | 26 | * @wake_depth:		enable depth, for multiple irq_set_irq_wake() callers | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 27 | * @irq_count:		stats field to detect stalled irqs | 
|  | 28 | * @last_unhandled:	aging timer for unhandled count | 
|  | 29 | * @irqs_unhandled:	stats field for spurious unhandled interrupts | 
|  | 30 | * @lock:		locking for SMP | 
| Geert Uytterhoeven | 7707677 | 2011-04-10 11:01:52 +0200 | [diff] [blame] | 31 | * @affinity_hint:	hint to user space for preferred irq affinity | 
| Ben Hutchings | cd7eab4 | 2011-01-19 21:01:44 +0000 | [diff] [blame] | 32 | * @affinity_notify:	context for notification of affinity changes | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 33 | * @pending_mask:	pending rebalanced interrupts | 
| Thomas Gleixner | b5faba2 | 2011-02-23 23:52:13 +0000 | [diff] [blame] | 34 | * @threads_oneshot:	bitfield to handle shared oneshot threads | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 35 | * @threads_active:	number of irqaction threads currently running | 
|  | 36 | * @wait_for_threads:	wait queue for sync_irq to wait for threaded handlers | 
|  | 37 | * @dir:		/proc/irq/ procfs entry | 
|  | 38 | * @name:		flow handler name for /proc/interrupts output | 
|  | 39 | */ | 
|  | 40 | struct irq_desc { | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 41 | struct irq_data		irq_data; | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 42 | unsigned int __percpu	*kstat_irqs; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 43 | irq_flow_handler_t	handle_irq; | 
| Thomas Gleixner | 7812957 | 2011-02-10 15:14:20 +0100 | [diff] [blame] | 44 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | 
|  | 45 | irq_preflow_handler_t	preflow_handler; | 
|  | 46 | #endif | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 47 | struct irqaction	*action;	/* IRQ action list */ | 
| Thomas Gleixner | a6967ca | 2011-02-10 22:01:25 +0100 | [diff] [blame] | 48 | unsigned int		status_use_accessors; | 
| Thomas Gleixner | dbec07b | 2011-02-07 20:19:55 +0100 | [diff] [blame] | 49 | unsigned int		core_internal_state__do_not_mess_with_it; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 50 | unsigned int		depth;		/* nested irq disables */ | 
|  | 51 | unsigned int		wake_depth;	/* nested wake enables */ | 
|  | 52 | unsigned int		irq_count;	/* For detecting broken IRQs */ | 
|  | 53 | unsigned long		last_unhandled;	/* Aging timer for unhandled count */ | 
|  | 54 | unsigned int		irqs_unhandled; | 
|  | 55 | raw_spinlock_t		lock; | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 56 | struct cpumask		*percpu_enabled; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 57 | #ifdef CONFIG_SMP | 
|  | 58 | const struct cpumask	*affinity_hint; | 
| Ben Hutchings | cd7eab4 | 2011-01-19 21:01:44 +0000 | [diff] [blame] | 59 | struct irq_affinity_notify *affinity_notify; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 60 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 61 | cpumask_var_t		pending_mask; | 
|  | 62 | #endif | 
|  | 63 | #endif | 
| Thomas Gleixner | b5faba2 | 2011-02-23 23:52:13 +0000 | [diff] [blame] | 64 | unsigned long		threads_oneshot; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 65 | atomic_t		threads_active; | 
|  | 66 | wait_queue_head_t       wait_for_threads; | 
|  | 67 | #ifdef CONFIG_PROC_FS | 
|  | 68 | struct proc_dir_entry	*dir; | 
|  | 69 | #endif | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 70 | struct module		*owner; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 71 | const char		*name; | 
|  | 72 | } ____cacheline_internodealigned_in_smp; | 
|  | 73 |  | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 74 | #ifndef CONFIG_SPARSE_IRQ | 
|  | 75 | extern struct irq_desc irq_desc[NR_IRQS]; | 
|  | 76 | #endif | 
|  | 77 |  | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 78 | #ifdef CONFIG_GENERIC_HARDIRQS | 
|  | 79 |  | 
| Thomas Gleixner | d9936bb | 2011-03-11 14:15:35 +0100 | [diff] [blame] | 80 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) | 
|  | 81 | { | 
|  | 82 | return &desc->irq_data; | 
|  | 83 | } | 
|  | 84 |  | 
| Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 85 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) | 
|  | 86 | { | 
|  | 87 | return desc->irq_data.chip; | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | 
|  | 91 | { | 
|  | 92 | return desc->irq_data.chip_data; | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | 
|  | 96 | { | 
|  | 97 | return desc->irq_data.handler_data; | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | 
|  | 101 | { | 
|  | 102 | return desc->irq_data.msi_desc; | 
|  | 103 | } | 
|  | 104 |  | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 105 | /* | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 106 | * Architectures call this to let the generic IRQ layer | 
|  | 107 | * handle an interrupt. If the descriptor is attached to an | 
|  | 108 | * irqchip-style controller then we call the ->handle_irq() handler, | 
|  | 109 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | 
|  | 110 | */ | 
|  | 111 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | 
|  | 112 | { | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 113 | desc->handle_irq(irq, desc); | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 114 | } | 
|  | 115 |  | 
| Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 116 | int generic_handle_irq(unsigned int irq); | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 117 |  | 
|  | 118 | /* Test to see if a driver has successfully requested an irq */ | 
|  | 119 | static inline int irq_has_action(unsigned int irq) | 
|  | 120 | { | 
|  | 121 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 122 | return desc->action != NULL; | 
|  | 123 | } | 
|  | 124 |  | 
| Thomas Gleixner | a2e8461 | 2011-03-23 13:10:31 +0100 | [diff] [blame] | 125 | /* caller has locked the irq_desc and both params are valid */ | 
|  | 126 | static inline void __irq_set_handler_locked(unsigned int irq, | 
|  | 127 | irq_flow_handler_t handler) | 
|  | 128 | { | 
|  | 129 | struct irq_desc *desc; | 
|  | 130 |  | 
|  | 131 | desc = irq_to_desc(irq); | 
|  | 132 | desc->handle_irq = handler; | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | /* caller has locked the irq_desc and both params are valid */ | 
|  | 136 | static inline void | 
|  | 137 | __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | 
|  | 138 | irq_flow_handler_t handler, const char *name) | 
|  | 139 | { | 
|  | 140 | struct irq_desc *desc; | 
|  | 141 |  | 
|  | 142 | desc = irq_to_desc(irq); | 
|  | 143 | irq_desc_get_irq_data(desc)->chip = chip; | 
|  | 144 | desc->handle_irq = handler; | 
|  | 145 | desc->name = name; | 
|  | 146 | } | 
|  | 147 |  | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 148 | static inline int irq_balancing_disabled(unsigned int irq) | 
|  | 149 | { | 
|  | 150 | struct irq_desc *desc; | 
|  | 151 |  | 
|  | 152 | desc = irq_to_desc(irq); | 
| Thomas Gleixner | 0c6f8a8 | 2011-03-28 13:32:20 +0200 | [diff] [blame] | 153 | return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 154 | } | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 155 |  | 
| Thomas Gleixner | d3e17de | 2011-03-22 17:08:15 +0100 | [diff] [blame] | 156 | static inline void | 
|  | 157 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) | 
|  | 158 | { | 
|  | 159 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 160 |  | 
|  | 161 | if (desc) | 
|  | 162 | lockdep_set_class(&desc->lock, class); | 
|  | 163 | } | 
|  | 164 |  | 
| Thomas Gleixner | 7812957 | 2011-02-10 15:14:20 +0100 | [diff] [blame] | 165 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | 
|  | 166 | static inline void | 
|  | 167 | __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) | 
|  | 168 | { | 
|  | 169 | struct irq_desc *desc; | 
|  | 170 |  | 
|  | 171 | desc = irq_to_desc(irq); | 
|  | 172 | desc->preflow_handler = handler; | 
|  | 173 | } | 
|  | 174 | #endif | 
| Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 175 | #endif | 
|  | 176 |  | 
|  | 177 | #endif |