| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/kernel/irq/handle.c | 
 | 3 |  * | 
| Ingo Molnar | a34db9b | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 4 |  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
 | 5 |  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * This file contains the core interrupt handling code. | 
| Ingo Molnar | a34db9b | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 8 |  * | 
 | 9 |  * Detailed information is available in Documentation/DocBook/genericirq | 
 | 10 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  */ | 
 | 12 |  | 
 | 13 | #include <linux/irq.h> | 
| Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 14 | #include <linux/sched.h> | 
| Paul Mundt | 948cd52 | 2009-05-22 10:40:09 +0900 | [diff] [blame] | 15 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/random.h> | 
 | 18 | #include <linux/interrupt.h> | 
 | 19 | #include <linux/kernel_stat.h> | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 20 | #include <linux/rculist.h> | 
 | 21 | #include <linux/hash.h> | 
| Yinghai Lu | b5eb78f | 2010-02-10 01:20:35 -0800 | [diff] [blame] | 22 | #include <linux/radix-tree.h> | 
| Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 23 | #include <trace/events/irq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
 | 25 | #include "internals.h" | 
 | 26 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 27 | /* | 
 | 28 |  * lockdep: we want to handle all irq_desc locks as a single lock-class: | 
 | 29 |  */ | 
| Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 30 | struct lock_class_key irq_desc_lock_class; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 31 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 32 | /** | 
 | 33 |  * handle_bad_irq - handle spurious and unhandled irqs | 
| Henrik Kretzschmar | 43a1dd5 | 2006-08-31 21:27:44 -0700 | [diff] [blame] | 34 |  * @irq:       the interrupt number | 
 | 35 |  * @desc:      description of the interrupt | 
| Henrik Kretzschmar | 43a1dd5 | 2006-08-31 21:27:44 -0700 | [diff] [blame] | 36 |  * | 
 | 37 |  * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 38 |  */ | 
| Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 39 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 40 | { | 
| Ingo Molnar | 43f7775 | 2006-06-29 02:24:58 -0700 | [diff] [blame] | 41 | 	print_irq_desc(irq, desc); | 
| Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 42 | 	kstat_incr_irqs_this_cpu(irq, desc); | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 43 | 	ack_bad_irq(irq); | 
 | 44 | } | 
 | 45 |  | 
| David Daney | 97179fd | 2009-01-27 09:53:22 -0800 | [diff] [blame] | 46 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 
 | 47 | static void __init init_irq_default_affinity(void) | 
 | 48 | { | 
| Yinghai Lu | 28be225 | 2009-06-12 11:33:02 +0300 | [diff] [blame] | 49 | 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 
| David Daney | 97179fd | 2009-01-27 09:53:22 -0800 | [diff] [blame] | 50 | 	cpumask_setall(irq_default_affinity); | 
 | 51 | } | 
 | 52 | #else | 
 | 53 | static void __init init_irq_default_affinity(void) | 
 | 54 | { | 
 | 55 | } | 
 | 56 | #endif | 
 | 57 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | /* | 
 | 59 |  * Linux has a controller-independent interrupt architecture. | 
 | 60 |  * Every controller has a 'controller-template', that is used | 
 | 61 |  * by the main code to do the right thing. Each driver-visible | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 62 |  * interrupt source is transparently wired to the appropriate | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  * controller. Thus drivers need not be aware of the | 
 | 64 |  * interrupt-controller. | 
 | 65 |  * | 
 | 66 |  * The code is designed to be easily extended with new/different | 
 | 67 |  * interrupt controllers, without having to do assembly magic or | 
 | 68 |  * having to touch the generic code. | 
 | 69 |  * | 
 | 70 |  * Controller mappings for all interrupt sources: | 
 | 71 |  */ | 
| Yinghai Lu | 85c0f90 | 2008-08-19 20:49:47 -0700 | [diff] [blame] | 72 | int nr_irqs = NR_IRQS; | 
| Ingo Molnar | fa42d10 | 2008-08-19 20:50:30 -0700 | [diff] [blame] | 73 | EXPORT_SYMBOL_GPL(nr_irqs); | 
| Yinghai Lu | d60458b | 2008-08-19 20:50:00 -0700 | [diff] [blame] | 74 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 75 | #ifdef CONFIG_SPARSE_IRQ | 
| Mike Travis | 92296c6 | 2009-01-11 09:22:58 -0800 | [diff] [blame] | 76 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 77 | static struct irq_desc irq_desc_init = { | 
 | 78 | 	.irq	    = -1, | 
 | 79 | 	.status	    = IRQ_DISABLED, | 
 | 80 | 	.chip	    = &no_irq_chip, | 
 | 81 | 	.handle_irq = handle_bad_irq, | 
 | 82 | 	.depth      = 1, | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 83 | 	.lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 84 | }; | 
 | 85 |  | 
| Paul Mundt | 948cd52 | 2009-05-22 10:40:09 +0900 | [diff] [blame] | 86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 87 | { | 
| Yinghai Lu | 005bf0e6 | 2009-02-08 16:18:03 -0800 | [diff] [blame] | 88 | 	void *ptr; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 89 |  | 
| Yinghai Lu | febcb0c | 2010-02-10 01:20:32 -0800 | [diff] [blame] | 90 | 	ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), | 
 | 91 | 			   GFP_ATOMIC, node); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 92 |  | 
| Yinghai Lu | 005bf0e6 | 2009-02-08 16:18:03 -0800 | [diff] [blame] | 93 | 	/* | 
 | 94 | 	 * don't overwite if can not get new one | 
 | 95 | 	 * init_copy_kstat_irqs() could still use old one | 
 | 96 | 	 */ | 
 | 97 | 	if (ptr) { | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 98 | 		printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node); | 
| Yinghai Lu | 005bf0e6 | 2009-02-08 16:18:03 -0800 | [diff] [blame] | 99 | 		desc->kstat_irqs = ptr; | 
 | 100 | 	} | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 101 | } | 
 | 102 |  | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 103 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 104 | { | 
 | 105 | 	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 
| Ingo Molnar | 793f7b1 | 2008-12-26 19:02:20 +0100 | [diff] [blame] | 106 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 107 | 	raw_spin_lock_init(&desc->lock); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 108 | 	desc->irq = irq; | 
 | 109 | #ifdef CONFIG_SMP | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 110 | 	desc->node = node; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 111 | #endif | 
 | 112 | 	lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 113 | 	init_kstat_irqs(desc, node, nr_cpu_ids); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 114 | 	if (!desc->kstat_irqs) { | 
 | 115 | 		printk(KERN_ERR "can not alloc kstat_irqs\n"); | 
 | 116 | 		BUG_ON(1); | 
 | 117 | 	} | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 118 | 	if (!alloc_desc_masks(desc, node, false)) { | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 119 | 		printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | 
 | 120 | 		BUG_ON(1); | 
 | 121 | 	} | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 122 | 	init_desc_masks(desc); | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 123 | 	arch_init_chip_data(desc, node); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 124 | } | 
 | 125 |  | 
 | 126 | /* | 
 | 127 |  * Protect the sparse_irqs: | 
 | 128 |  */ | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 129 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 130 |  | 
| Yinghai Lu | b5eb78f | 2010-02-10 01:20:35 -0800 | [diff] [blame] | 131 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); | 
 | 132 |  | 
 | 133 | static void set_irq_desc(unsigned int irq, struct irq_desc *desc) | 
 | 134 | { | 
 | 135 | 	radix_tree_insert(&irq_desc_tree, irq, desc); | 
 | 136 | } | 
 | 137 |  | 
 | 138 | struct irq_desc *irq_to_desc(unsigned int irq) | 
 | 139 | { | 
 | 140 | 	return radix_tree_lookup(&irq_desc_tree, irq); | 
 | 141 | } | 
 | 142 |  | 
 | 143 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | 
 | 144 | { | 
 | 145 | 	void **ptr; | 
 | 146 |  | 
 | 147 | 	ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); | 
 | 148 | 	if (ptr) | 
 | 149 | 		radix_tree_replace_slot(ptr, desc); | 
 | 150 | } | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 151 |  | 
| Yinghai Lu | 99d093d | 2008-12-05 18:58:32 -0800 | [diff] [blame] | 152 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 
 | 153 | 	[0 ... NR_IRQS_LEGACY-1] = { | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 154 | 		.irq	    = -1, | 
 | 155 | 		.status	    = IRQ_DISABLED, | 
 | 156 | 		.chip	    = &no_irq_chip, | 
 | 157 | 		.handle_irq = handle_bad_irq, | 
 | 158 | 		.depth	    = 1, | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 159 | 		.lock	    = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 160 | 	} | 
 | 161 | }; | 
 | 162 |  | 
| Mike Travis | 542d865 | 2009-01-10 22:24:07 -0800 | [diff] [blame] | 163 | static unsigned int *kstat_irqs_legacy; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 164 |  | 
| Yinghai Lu | 13a0c3c | 2008-12-26 02:05:47 -0800 | [diff] [blame] | 165 | int __init early_irq_init(void) | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 166 | { | 
 | 167 | 	struct irq_desc *desc; | 
 | 168 | 	int legacy_count; | 
| Yinghai Lu | dad213a | 2009-05-28 18:14:40 -0700 | [diff] [blame] | 169 | 	int node; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 170 | 	int i; | 
 | 171 |  | 
| David Daney | 97179fd | 2009-01-27 09:53:22 -0800 | [diff] [blame] | 172 | 	init_irq_default_affinity(); | 
 | 173 |  | 
| Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 174 | 	 /* initialize nr_irqs based on nr_cpu_ids */ | 
 | 175 | 	arch_probe_nr_irqs(); | 
| Mike Travis | 9594949 | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 176 | 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | 
 | 177 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 178 | 	desc = irq_desc_legacy; | 
 | 179 | 	legacy_count = ARRAY_SIZE(irq_desc_legacy); | 
| Yinghai Lu | 372e24b | 2009-08-26 16:20:13 -0700 | [diff] [blame] | 180 | 	node = first_online_node; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 181 |  | 
| Mike Travis | 542d865 | 2009-01-10 22:24:07 -0800 | [diff] [blame] | 182 | 	/* allocate based on nr_cpu_ids */ | 
| Yinghai Lu | dad213a | 2009-05-28 18:14:40 -0700 | [diff] [blame] | 183 | 	kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * | 
 | 184 | 					  sizeof(int), GFP_NOWAIT, node); | 
| Mike Travis | 542d865 | 2009-01-10 22:24:07 -0800 | [diff] [blame] | 185 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 186 | 	for (i = 0; i < legacy_count; i++) { | 
 | 187 | 		desc[i].irq = i; | 
| Yinghai Lu | 372e24b | 2009-08-26 16:20:13 -0700 | [diff] [blame] | 188 | #ifdef CONFIG_SMP | 
 | 189 | 		desc[i].node = node; | 
 | 190 | #endif | 
| Mike Travis | 542d865 | 2009-01-10 22:24:07 -0800 | [diff] [blame] | 191 | 		desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | 
| Yinghai Lu | fa6beb3 | 2008-12-22 20:24:09 -0800 | [diff] [blame] | 192 | 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 
| Yinghai Lu | dad213a | 2009-05-28 18:14:40 -0700 | [diff] [blame] | 193 | 		alloc_desc_masks(&desc[i], node, true); | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 194 | 		init_desc_masks(&desc[i]); | 
| Yinghai Lu | b5eb78f | 2010-02-10 01:20:35 -0800 | [diff] [blame] | 195 | 		set_irq_desc(i, &desc[i]); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 196 | 	} | 
 | 197 |  | 
| Yinghai Lu | 13a0c3c | 2008-12-26 02:05:47 -0800 | [diff] [blame] | 198 | 	return arch_early_irq_init(); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 199 | } | 
 | 200 |  | 
| Paul Mundt | 948cd52 | 2009-05-22 10:40:09 +0900 | [diff] [blame] | 201 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 202 | { | 
 | 203 | 	struct irq_desc *desc; | 
 | 204 | 	unsigned long flags; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 205 |  | 
| Mike Travis | 9594949 | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 206 | 	if (irq >= nr_irqs) { | 
| Mike Travis | e2f4d06 | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 207 | 		WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", | 
 | 208 | 			irq, nr_irqs); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 209 | 		return NULL; | 
 | 210 | 	} | 
 | 211 |  | 
| Yinghai Lu | b5eb78f | 2010-02-10 01:20:35 -0800 | [diff] [blame] | 212 | 	desc = irq_to_desc(irq); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 213 | 	if (desc) | 
 | 214 | 		return desc; | 
 | 215 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 216 | 	raw_spin_lock_irqsave(&sparse_irq_lock, flags); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 217 |  | 
 | 218 | 	/* We have to check it to avoid races with another CPU */ | 
| Yinghai Lu | b5eb78f | 2010-02-10 01:20:35 -0800 | [diff] [blame] | 219 | 	desc = irq_to_desc(irq); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 220 | 	if (desc) | 
 | 221 | 		goto out_unlock; | 
 | 222 |  | 
| Yinghai Lu | febcb0c | 2010-02-10 01:20:32 -0800 | [diff] [blame] | 223 | 	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 
| Paul Mundt | 948cd52 | 2009-05-22 10:40:09 +0900 | [diff] [blame] | 224 |  | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 225 | 	printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 226 | 	if (!desc) { | 
 | 227 | 		printk(KERN_ERR "can not alloc irq_desc\n"); | 
 | 228 | 		BUG_ON(1); | 
 | 229 | 	} | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 230 | 	init_one_irq_desc(irq, desc, node); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 231 |  | 
| Yinghai Lu | b5eb78f | 2010-02-10 01:20:35 -0800 | [diff] [blame] | 232 | 	set_irq_desc(irq, desc); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 233 |  | 
 | 234 | out_unlock: | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 235 | 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 236 |  | 
 | 237 | 	return desc; | 
 | 238 | } | 
 | 239 |  | 
| KOSAKI Motohiro | f9af0e7 | 2008-12-26 12:24:24 +0900 | [diff] [blame] | 240 | #else /* !CONFIG_SPARSE_IRQ */ | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 241 |  | 
| Ravikiran G Thirumalai | e729aa1 | 2007-05-08 00:29:13 -0700 | [diff] [blame] | 242 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | 	[0 ... NR_IRQS-1] = { | 
| Zhang, Yanmin | 4f167fb | 2005-05-16 21:53:43 -0700 | [diff] [blame] | 244 | 		.status = IRQ_DISABLED, | 
| Ingo Molnar | f1c2662 | 2006-06-29 02:24:57 -0700 | [diff] [blame] | 245 | 		.chip = &no_irq_chip, | 
| Ingo Molnar | 7a55713 | 2006-06-29 02:24:54 -0700 | [diff] [blame] | 246 | 		.handle_irq = handle_bad_irq, | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 247 | 		.depth = 1, | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 248 | 		.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | 	} | 
 | 250 | }; | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 251 |  | 
| Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 252 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | 
| Yinghai Lu | 12026ea | 2008-12-26 22:38:15 -0800 | [diff] [blame] | 253 | int __init early_irq_init(void) | 
 | 254 | { | 
 | 255 | 	struct irq_desc *desc; | 
 | 256 | 	int count; | 
 | 257 | 	int i; | 
 | 258 |  | 
| David Daney | 97179fd | 2009-01-27 09:53:22 -0800 | [diff] [blame] | 259 | 	init_irq_default_affinity(); | 
 | 260 |  | 
| Mike Travis | 9594949 | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 261 | 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | 
 | 262 |  | 
| Yinghai Lu | 12026ea | 2008-12-26 22:38:15 -0800 | [diff] [blame] | 263 | 	desc = irq_desc; | 
 | 264 | 	count = ARRAY_SIZE(irq_desc); | 
 | 265 |  | 
| Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 266 | 	for (i = 0; i < count; i++) { | 
| Yinghai Lu | 12026ea | 2008-12-26 22:38:15 -0800 | [diff] [blame] | 267 | 		desc[i].irq = i; | 
| Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 268 | 		alloc_desc_masks(&desc[i], 0, true); | 
 | 269 | 		init_desc_masks(&desc[i]); | 
| Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 270 | 		desc[i].kstat_irqs = kstat_irqs_all[i]; | 
 | 271 | 	} | 
| Yinghai Lu | 12026ea | 2008-12-26 22:38:15 -0800 | [diff] [blame] | 272 | 	return arch_early_irq_init(); | 
 | 273 | } | 
 | 274 |  | 
| KOSAKI Motohiro | f9af0e7 | 2008-12-26 12:24:24 +0900 | [diff] [blame] | 275 | struct irq_desc *irq_to_desc(unsigned int irq) | 
 | 276 | { | 
 | 277 | 	return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 
 | 278 | } | 
 | 279 |  | 
| Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 280 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | 
| KOSAKI Motohiro | f9af0e7 | 2008-12-26 12:24:24 +0900 | [diff] [blame] | 281 | { | 
 | 282 | 	return irq_to_desc(irq); | 
 | 283 | } | 
 | 284 | #endif /* !CONFIG_SPARSE_IRQ */ | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 285 |  | 
| Yinghai Lu | 0f3c2a8 | 2009-02-08 16:18:03 -0800 | [diff] [blame] | 286 | void clear_kstat_irqs(struct irq_desc *desc) | 
 | 287 | { | 
 | 288 | 	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | 
 | 289 | } | 
 | 290 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | /* | 
| Ingo Molnar | 77a5afe | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 292 |  * What should we do if we get a hw irq event on an illegal vector? | 
 | 293 |  * Each architecture has to answer this themself. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 |  */ | 
| Ingo Molnar | 77a5afe | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 295 | static void ack_bad(unsigned int irq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 297 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 298 |  | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 299 | 	print_irq_desc(irq, desc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | 	ack_bad_irq(irq); | 
 | 301 | } | 
 | 302 |  | 
| Ingo Molnar | 77a5afe | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 303 | /* | 
 | 304 |  * NOP functions | 
 | 305 |  */ | 
 | 306 | static void noop(unsigned int irq) | 
 | 307 | { | 
 | 308 | } | 
 | 309 |  | 
 | 310 | static unsigned int noop_ret(unsigned int irq) | 
 | 311 | { | 
 | 312 | 	return 0; | 
 | 313 | } | 
 | 314 |  | 
 | 315 | /* | 
 | 316 |  * Generic no controller implementation | 
 | 317 |  */ | 
| Ingo Molnar | f1c2662 | 2006-06-29 02:24:57 -0700 | [diff] [blame] | 318 | struct irq_chip no_irq_chip = { | 
 | 319 | 	.name		= "none", | 
| Ingo Molnar | 77a5afe | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 320 | 	.startup	= noop_ret, | 
 | 321 | 	.shutdown	= noop, | 
 | 322 | 	.enable		= noop, | 
 | 323 | 	.disable	= noop, | 
 | 324 | 	.ack		= ack_bad, | 
 | 325 | 	.end		= noop, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | }; | 
 | 327 |  | 
 | 328 | /* | 
| Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 329 |  * Generic dummy implementation which can be used for | 
 | 330 |  * real dumb interrupt sources | 
 | 331 |  */ | 
 | 332 | struct irq_chip dummy_irq_chip = { | 
 | 333 | 	.name		= "dummy", | 
 | 334 | 	.startup	= noop_ret, | 
 | 335 | 	.shutdown	= noop, | 
 | 336 | 	.enable		= noop, | 
 | 337 | 	.disable	= noop, | 
 | 338 | 	.ack		= noop, | 
 | 339 | 	.mask		= noop, | 
 | 340 | 	.unmask		= noop, | 
 | 341 | 	.end		= noop, | 
 | 342 | }; | 
 | 343 |  | 
 | 344 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 |  * Special, empty irq handler: | 
 | 346 |  */ | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 347 | irqreturn_t no_action(int cpl, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | { | 
 | 349 | 	return IRQ_NONE; | 
 | 350 | } | 
 | 351 |  | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 352 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | 
 | 353 | { | 
 | 354 | 	if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) | 
 | 355 | 		return; | 
 | 356 |  | 
 | 357 | 	printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " | 
 | 358 | 	       "but no thread function available.", irq, action->name); | 
 | 359 | } | 
 | 360 |  | 
| Ingo Molnar | 8d28bc7 | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 361 | /** | 
 | 362 |  * handle_IRQ_event - irq action chain handler | 
 | 363 |  * @irq:	the interrupt number | 
| Ingo Molnar | 8d28bc7 | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 364 |  * @action:	the interrupt action chain for this irq | 
 | 365 |  * | 
 | 366 |  * Handles the action chain of an irq event | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 |  */ | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 368 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | { | 
| Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 370 | 	irqreturn_t ret, retval = IRQ_NONE; | 
 | 371 | 	unsigned int status = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 |  | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 373 | 	if (!(action->flags & IRQF_DISABLED)) | 
| Ingo Molnar | 366c7f5 | 2006-07-03 00:25:25 -0700 | [diff] [blame] | 374 | 		local_irq_enable_in_hardirq(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 |  | 
 | 376 | 	do { | 
| Jason Baron | af39241 | 2009-02-26 10:11:05 -0500 | [diff] [blame] | 377 | 		trace_irq_handler_entry(irq, action); | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 378 | 		ret = action->handler(irq, action->dev_id); | 
| Jason Baron | af39241 | 2009-02-26 10:11:05 -0500 | [diff] [blame] | 379 | 		trace_irq_handler_exit(irq, action, ret); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 380 |  | 
 | 381 | 		switch (ret) { | 
 | 382 | 		case IRQ_WAKE_THREAD: | 
 | 383 | 			/* | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 384 | 			 * Set result to handled so the spurious check | 
 | 385 | 			 * does not trigger. | 
 | 386 | 			 */ | 
 | 387 | 			ret = IRQ_HANDLED; | 
 | 388 |  | 
 | 389 | 			/* | 
 | 390 | 			 * Catch drivers which return WAKE_THREAD but | 
 | 391 | 			 * did not set up a thread function | 
 | 392 | 			 */ | 
 | 393 | 			if (unlikely(!action->thread_fn)) { | 
 | 394 | 				warn_no_thread(irq, action); | 
 | 395 | 				break; | 
 | 396 | 			} | 
 | 397 |  | 
 | 398 | 			/* | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 399 | 			 * Wake up the handler thread for this | 
 | 400 | 			 * action. In case the thread crashed and was | 
 | 401 | 			 * killed we just pretend that we handled the | 
 | 402 | 			 * interrupt. The hardirq handler above has | 
 | 403 | 			 * disabled the device interrupt, so no irq | 
 | 404 | 			 * storm is lurking. | 
 | 405 | 			 */ | 
 | 406 | 			if (likely(!test_bit(IRQTF_DIED, | 
 | 407 | 					     &action->thread_flags))) { | 
 | 408 | 				set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | 
 | 409 | 				wake_up_process(action->thread); | 
 | 410 | 			} | 
 | 411 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 412 | 			/* Fall through to add to randomness */ | 
 | 413 | 		case IRQ_HANDLED: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | 			status |= action->flags; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 415 | 			break; | 
 | 416 |  | 
 | 417 | 		default: | 
 | 418 | 			break; | 
 | 419 | 		} | 
 | 420 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | 		retval |= ret; | 
 | 422 | 		action = action->next; | 
 | 423 | 	} while (action); | 
 | 424 |  | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 425 | 	if (status & IRQF_SAMPLE_RANDOM) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | 		add_interrupt_randomness(irq); | 
 | 427 | 	local_irq_disable(); | 
 | 428 |  | 
 | 429 | 	return retval; | 
 | 430 | } | 
 | 431 |  | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 432 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 
| Thomas Gleixner | 0e57aa1 | 2009-03-13 14:34:05 +0100 | [diff] [blame] | 433 |  | 
 | 434 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | 
 | 435 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | 
 | 436 | #endif | 
 | 437 |  | 
| Ingo Molnar | 8d28bc7 | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 438 | /** | 
 | 439 |  * __do_IRQ - original all in one highlevel IRQ handler | 
 | 440 |  * @irq:	the interrupt number | 
| Ingo Molnar | 8d28bc7 | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 441 |  * | 
 | 442 |  * __do_IRQ handles all normal device IRQ's (the special | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 |  * SMP cross-CPU interrupts have their own specific | 
 | 444 |  * handlers). | 
| Ingo Molnar | 8d28bc7 | 2006-06-29 02:24:46 -0700 | [diff] [blame] | 445 |  * | 
 | 446 |  * This is the original x86 implementation which is used for every | 
 | 447 |  * interrupt type. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 449 | unsigned int __do_IRQ(unsigned int irq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 451 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 452 | 	struct irqaction *action; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | 	unsigned int status; | 
 | 454 |  | 
| Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 455 | 	kstat_incr_irqs_this_cpu(irq, desc); | 
 | 456 |  | 
| Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 457 | 	if (CHECK_IRQ_PER_CPU(desc->status)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | 		irqreturn_t action_ret; | 
 | 459 |  | 
 | 460 | 		/* | 
 | 461 | 		 * No locking required for CPU-local interrupts: | 
 | 462 | 		 */ | 
| Yinghai Lu | fcef591 | 2009-04-27 17:58:23 -0700 | [diff] [blame] | 463 | 		if (desc->chip->ack) | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 464 | 			desc->chip->ack(irq); | 
| Russ Anderson | c642b83 | 2007-11-14 17:00:15 -0800 | [diff] [blame] | 465 | 		if (likely(!(desc->status & IRQ_DISABLED))) { | 
 | 466 | 			action_ret = handle_IRQ_event(irq, desc->action); | 
 | 467 | 			if (!noirqdebug) | 
 | 468 | 				note_interrupt(irq, desc, action_ret); | 
 | 469 | 		} | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 470 | 		desc->chip->end(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | 		return 1; | 
 | 472 | 	} | 
 | 473 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 474 | 	raw_spin_lock(&desc->lock); | 
| Yinghai Lu | fcef591 | 2009-04-27 17:58:23 -0700 | [diff] [blame] | 475 | 	if (desc->chip->ack) | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 476 | 		desc->chip->ack(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | 	/* | 
 | 478 | 	 * REPLAY is when Linux resends an IRQ that was dropped earlier | 
 | 479 | 	 * WAITING is used by probe to mark irqs that are being tested | 
 | 480 | 	 */ | 
 | 481 | 	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | 
 | 482 | 	status |= IRQ_PENDING; /* we _want_ to handle it */ | 
 | 483 |  | 
 | 484 | 	/* | 
 | 485 | 	 * If the IRQ is disabled for whatever reason, we cannot | 
 | 486 | 	 * use the action we have. | 
 | 487 | 	 */ | 
 | 488 | 	action = NULL; | 
 | 489 | 	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | 
 | 490 | 		action = desc->action; | 
 | 491 | 		status &= ~IRQ_PENDING; /* we commit to handling */ | 
 | 492 | 		status |= IRQ_INPROGRESS; /* we are handling it */ | 
 | 493 | 	} | 
 | 494 | 	desc->status = status; | 
 | 495 |  | 
 | 496 | 	/* | 
 | 497 | 	 * If there is no IRQ handler or it was disabled, exit early. | 
 | 498 | 	 * Since we set PENDING, if another processor is handling | 
 | 499 | 	 * a different instance of this same irq, the other processor | 
 | 500 | 	 * will take care of it. | 
 | 501 | 	 */ | 
 | 502 | 	if (unlikely(!action)) | 
 | 503 | 		goto out; | 
 | 504 |  | 
 | 505 | 	/* | 
 | 506 | 	 * Edge triggered interrupts need to remember | 
 | 507 | 	 * pending events. | 
 | 508 | 	 * This applies to any hw interrupts that allow a second | 
 | 509 | 	 * instance of the same irq to arrive while we are in do_IRQ | 
 | 510 | 	 * or in the handler. But the code here only handles the _second_ | 
 | 511 | 	 * instance of the irq, not the third or fourth. So it is mostly | 
 | 512 | 	 * useful for irq hardware that does not mask cleanly in an | 
 | 513 | 	 * SMP environment. | 
 | 514 | 	 */ | 
 | 515 | 	for (;;) { | 
 | 516 | 		irqreturn_t action_ret; | 
 | 517 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 518 | 		raw_spin_unlock(&desc->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 520 | 		action_ret = handle_IRQ_event(irq, action); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | 		if (!noirqdebug) | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 522 | 			note_interrupt(irq, desc, action_ret); | 
| Linus Torvalds | b42172f | 2006-11-22 09:32:06 -0800 | [diff] [blame] | 523 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 524 | 		raw_spin_lock(&desc->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | 		if (likely(!(desc->status & IRQ_PENDING))) | 
 | 526 | 			break; | 
 | 527 | 		desc->status &= ~IRQ_PENDING; | 
 | 528 | 	} | 
 | 529 | 	desc->status &= ~IRQ_INPROGRESS; | 
 | 530 |  | 
 | 531 | out: | 
 | 532 | 	/* | 
 | 533 | 	 * The ->end() handler has to deal with interrupts which got | 
 | 534 | 	 * disabled while the handler was running. | 
 | 535 | 	 */ | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 536 | 	desc->chip->end(irq); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 537 | 	raw_spin_unlock(&desc->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 |  | 
 | 539 | 	return 1; | 
 | 540 | } | 
| David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 541 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 |  | 
| Ingo Molnar | 243c762 | 2006-07-03 00:25:06 -0700 | [diff] [blame] | 543 | void early_init_irq_lock_class(void) | 
 | 544 | { | 
| Thomas Gleixner | 10e5808 | 2008-10-16 14:19:04 +0200 | [diff] [blame] | 545 | 	struct irq_desc *desc; | 
| Ingo Molnar | 243c762 | 2006-07-03 00:25:06 -0700 | [diff] [blame] | 546 | 	int i; | 
 | 547 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 548 | 	for_each_irq_desc(i, desc) { | 
| Thomas Gleixner | 10e5808 | 2008-10-16 14:19:04 +0200 | [diff] [blame] | 549 | 		lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 550 | 	} | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 551 | } | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 552 |  | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 553 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 
 | 554 | { | 
 | 555 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| KOSAKI Motohiro | 26ddd8d | 2008-12-26 14:24:10 +0900 | [diff] [blame] | 556 | 	return desc ? desc->kstat_irqs[cpu] : 0; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 557 | } | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 558 | EXPORT_SYMBOL(kstat_irqs_cpu); | 
 | 559 |  |