| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
 | 3 |  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 
 | 4 |  * | 
 | 5 |  * This file contains the interrupt descriptor management code | 
 | 6 |  * | 
 | 7 |  * Detailed information is available in Documentation/DocBook/genericirq | 
 | 8 |  * | 
 | 9 |  */ | 
 | 10 | #include <linux/irq.h> | 
 | 11 | #include <linux/slab.h> | 
| Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 12 | #include <linux/export.h> | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 13 | #include <linux/interrupt.h> | 
 | 14 | #include <linux/kernel_stat.h> | 
 | 15 | #include <linux/radix-tree.h> | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 17 |  | 
 | 18 | #include "internals.h" | 
 | 19 |  | 
 | 20 | /* | 
 | 21 |  * lockdep: we want to handle all irq_desc locks as a single lock-class: | 
 | 22 |  */ | 
| Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 23 | static struct lock_class_key irq_desc_lock_class; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 24 |  | 
| Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 25 | #if defined(CONFIG_SMP) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 26 | static void __init init_irq_default_affinity(void) | 
 | 27 | { | 
 | 28 | 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 
 | 29 | 	cpumask_setall(irq_default_affinity); | 
 | 30 | } | 
 | 31 | #else | 
 | 32 | static void __init init_irq_default_affinity(void) | 
 | 33 | { | 
 | 34 | } | 
 | 35 | #endif | 
 | 36 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 37 | #ifdef CONFIG_SMP | 
 | 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | 
 | 39 | { | 
 | 40 | 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | 
 | 41 | 		return -ENOMEM; | 
 | 42 |  | 
 | 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 44 | 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 
 | 45 | 		free_cpumask_var(desc->irq_data.affinity); | 
 | 46 | 		return -ENOMEM; | 
 | 47 | 	} | 
 | 48 | #endif | 
 | 49 | 	return 0; | 
 | 50 | } | 
 | 51 |  | 
 | 52 | static void desc_smp_init(struct irq_desc *desc, int node) | 
 | 53 | { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 54 | 	desc->irq_data.node = node; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 55 | 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 56 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 57 | 	cpumask_clear(desc->pending_mask); | 
 | 58 | #endif | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static inline int desc_node(struct irq_desc *desc) | 
 | 62 | { | 
 | 63 | 	return desc->irq_data.node; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 64 | } | 
 | 65 |  | 
 | 66 | #else | 
 | 67 | static inline int | 
 | 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 
 | 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 71 | #endif | 
 | 72 |  | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | 
 | 74 | 		struct module *owner) | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 75 | { | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 76 | 	int cpu; | 
 | 77 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 78 | 	desc->irq_data.irq = irq; | 
 | 79 | 	desc->irq_data.chip = &no_irq_chip; | 
 | 80 | 	desc->irq_data.chip_data = NULL; | 
 | 81 | 	desc->irq_data.handler_data = NULL; | 
 | 82 | 	desc->irq_data.msi_desc = NULL; | 
| Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 83 | 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 
| Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 84 | 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 85 | 	desc->handle_irq = handle_bad_irq; | 
 | 86 | 	desc->depth = 1; | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 87 | 	desc->irq_count = 0; | 
 | 88 | 	desc->irqs_unhandled = 0; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 89 | 	desc->name = NULL; | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 90 | 	desc->owner = owner; | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 91 | 	for_each_possible_cpu(cpu) | 
 | 92 | 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 93 | 	desc_smp_init(desc, node); | 
 | 94 | } | 
 | 95 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 96 | int nr_irqs = NR_IRQS; | 
 | 97 | EXPORT_SYMBOL_GPL(nr_irqs); | 
 | 98 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 99 | static DEFINE_MUTEX(sparse_irq_lock); | 
| Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 100 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 101 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 102 | #ifdef CONFIG_SPARSE_IRQ | 
 | 103 |  | 
| Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 104 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 105 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 106 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 107 | { | 
 | 108 | 	radix_tree_insert(&irq_desc_tree, irq, desc); | 
 | 109 | } | 
 | 110 |  | 
 | 111 | struct irq_desc *irq_to_desc(unsigned int irq) | 
 | 112 | { | 
 | 113 | 	return radix_tree_lookup(&irq_desc_tree, irq); | 
 | 114 | } | 
| Jiri Kosina | 3911ff3 | 2012-05-13 12:13:15 +0200 | [diff] [blame] | 115 | EXPORT_SYMBOL(irq_to_desc); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 116 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 117 | static void delete_irq_desc(unsigned int irq) | 
 | 118 | { | 
 | 119 | 	radix_tree_delete(&irq_desc_tree, irq); | 
 | 120 | } | 
 | 121 |  | 
 | 122 | #ifdef CONFIG_SMP | 
 | 123 | static void free_masks(struct irq_desc *desc) | 
 | 124 | { | 
 | 125 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 126 | 	free_cpumask_var(desc->pending_mask); | 
 | 127 | #endif | 
| Thomas Gleixner | c0a19eb | 2010-10-12 21:58:27 +0200 | [diff] [blame] | 128 | 	free_cpumask_var(desc->irq_data.affinity); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 129 | } | 
 | 130 | #else | 
 | 131 | static inline void free_masks(struct irq_desc *desc) { } | 
 | 132 | #endif | 
 | 133 |  | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 134 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 135 | { | 
 | 136 | 	struct irq_desc *desc; | 
| Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 137 | 	gfp_t gfp = GFP_KERNEL; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 138 |  | 
 | 139 | 	desc = kzalloc_node(sizeof(*desc), gfp, node); | 
 | 140 | 	if (!desc) | 
 | 141 | 		return NULL; | 
 | 142 | 	/* allocate based on nr_cpu_ids */ | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 143 | 	desc->kstat_irqs = alloc_percpu(unsigned int); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 144 | 	if (!desc->kstat_irqs) | 
 | 145 | 		goto err_desc; | 
 | 146 |  | 
 | 147 | 	if (alloc_masks(desc, gfp, node)) | 
 | 148 | 		goto err_kstat; | 
 | 149 |  | 
 | 150 | 	raw_spin_lock_init(&desc->lock); | 
 | 151 | 	lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
 | 152 |  | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 153 | 	desc_set_defaults(irq, desc, node, owner); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 154 |  | 
 | 155 | 	return desc; | 
 | 156 |  | 
 | 157 | err_kstat: | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 158 | 	free_percpu(desc->kstat_irqs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 159 | err_desc: | 
 | 160 | 	kfree(desc); | 
 | 161 | 	return NULL; | 
 | 162 | } | 
 | 163 |  | 
 | 164 | static void free_desc(unsigned int irq) | 
 | 165 | { | 
 | 166 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 167 |  | 
| Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 168 | 	unregister_irq_proc(irq, desc); | 
 | 169 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 170 | 	mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 171 | 	delete_irq_desc(irq); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 172 | 	mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 173 |  | 
 | 174 | 	free_masks(desc); | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 175 | 	free_percpu(desc->kstat_irqs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 176 | 	kfree(desc); | 
 | 177 | } | 
 | 178 |  | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 179 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, | 
 | 180 | 		       struct module *owner) | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 181 | { | 
 | 182 | 	struct irq_desc *desc; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 183 | 	int i; | 
 | 184 |  | 
 | 185 | 	for (i = 0; i < cnt; i++) { | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 186 | 		desc = alloc_desc(start + i, node, owner); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 187 | 		if (!desc) | 
 | 188 | 			goto err; | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 189 | 		mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 190 | 		irq_insert_desc(start + i, desc); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 191 | 		mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 192 | 	} | 
 | 193 | 	return start; | 
 | 194 |  | 
 | 195 | err: | 
 | 196 | 	for (i--; i >= 0; i--) | 
 | 197 | 		free_desc(start + i); | 
 | 198 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 199 | 	mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 200 | 	bitmap_clear(allocated_irqs, start, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 201 | 	mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 202 | 	return -ENOMEM; | 
 | 203 | } | 
 | 204 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 205 | static int irq_expand_nr_irqs(unsigned int nr) | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 206 | { | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 207 | 	if (nr > IRQ_BITMAP_BITS) | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 208 | 		return -ENOMEM; | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 209 | 	nr_irqs = nr; | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 210 | 	return 0; | 
 | 211 | } | 
 | 212 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 213 | int __init early_irq_init(void) | 
 | 214 | { | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 215 | 	int i, initcnt, node = first_online_node; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 216 | 	struct irq_desc *desc; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 217 |  | 
 | 218 | 	init_irq_default_affinity(); | 
 | 219 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 220 | 	/* Let arch update nr_irqs and return the nr of preallocated irqs */ | 
 | 221 | 	initcnt = arch_probe_nr_irqs(); | 
 | 222 | 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 223 |  | 
| Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 224 | 	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | 
 | 225 | 		nr_irqs = IRQ_BITMAP_BITS; | 
 | 226 |  | 
 | 227 | 	if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | 
 | 228 | 		initcnt = IRQ_BITMAP_BITS; | 
 | 229 |  | 
 | 230 | 	if (initcnt > nr_irqs) | 
 | 231 | 		nr_irqs = initcnt; | 
 | 232 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 233 | 	for (i = 0; i < initcnt; i++) { | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 234 | 		desc = alloc_desc(i, node, NULL); | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 235 | 		set_bit(i, allocated_irqs); | 
 | 236 | 		irq_insert_desc(i, desc); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 237 | 	} | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 238 | 	return arch_early_irq_init(); | 
 | 239 | } | 
 | 240 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 241 | #else /* !CONFIG_SPARSE_IRQ */ | 
 | 242 |  | 
 | 243 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 
 | 244 | 	[0 ... NR_IRQS-1] = { | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 245 | 		.handle_irq	= handle_bad_irq, | 
 | 246 | 		.depth		= 1, | 
 | 247 | 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 
 | 248 | 	} | 
 | 249 | }; | 
 | 250 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 251 | int __init early_irq_init(void) | 
 | 252 | { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 253 | 	int count, i, node = first_online_node; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 254 | 	struct irq_desc *desc; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 255 |  | 
 | 256 | 	init_irq_default_affinity(); | 
 | 257 |  | 
 | 258 | 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | 
 | 259 |  | 
 | 260 | 	desc = irq_desc; | 
 | 261 | 	count = ARRAY_SIZE(irq_desc); | 
 | 262 |  | 
 | 263 | 	for (i = 0; i < count; i++) { | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 264 | 		desc[i].kstat_irqs = alloc_percpu(unsigned int); | 
| Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 265 | 		alloc_masks(&desc[i], GFP_KERNEL, node); | 
 | 266 | 		raw_spin_lock_init(&desc[i].lock); | 
| Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 267 | 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 268 | 		desc_set_defaults(i, &desc[i], node, NULL); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 269 | 	} | 
 | 270 | 	return arch_early_irq_init(); | 
 | 271 | } | 
 | 272 |  | 
 | 273 | struct irq_desc *irq_to_desc(unsigned int irq) | 
 | 274 | { | 
 | 275 | 	return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 
 | 276 | } | 
 | 277 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 278 | static void free_desc(unsigned int irq) | 
 | 279 | { | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 280 | 	dynamic_irq_cleanup(irq); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 281 | } | 
 | 282 |  | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 283 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | 
 | 284 | 			      struct module *owner) | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 285 | { | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 286 | 	u32 i; | 
 | 287 |  | 
 | 288 | 	for (i = 0; i < cnt; i++) { | 
 | 289 | 		struct irq_desc *desc = irq_to_desc(start + i); | 
 | 290 |  | 
 | 291 | 		desc->owner = owner; | 
 | 292 | 	} | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 293 | 	return start; | 
 | 294 | } | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 295 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 296 | static int irq_expand_nr_irqs(unsigned int nr) | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 297 | { | 
 | 298 | 	return -ENOMEM; | 
 | 299 | } | 
 | 300 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 301 | #endif /* !CONFIG_SPARSE_IRQ */ | 
 | 302 |  | 
| Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 303 | /** | 
 | 304 |  * generic_handle_irq - Invoke the handler for a particular irq | 
 | 305 |  * @irq:	The irq number to handle | 
 | 306 |  * | 
 | 307 |  */ | 
 | 308 | int generic_handle_irq(unsigned int irq) | 
 | 309 | { | 
 | 310 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 311 |  | 
 | 312 | 	if (!desc) | 
 | 313 | 		return -EINVAL; | 
 | 314 | 	generic_handle_irq_desc(irq, desc); | 
 | 315 | 	return 0; | 
 | 316 | } | 
| Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 317 | EXPORT_SYMBOL_GPL(generic_handle_irq); | 
| Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 318 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 319 | /* Dynamic interrupt handling */ | 
 | 320 |  | 
 | 321 | /** | 
 | 322 |  * irq_free_descs - free irq descriptors | 
 | 323 |  * @from:	Start of descriptor range | 
 | 324 |  * @cnt:	Number of consecutive irqs to free | 
 | 325 |  */ | 
 | 326 | void irq_free_descs(unsigned int from, unsigned int cnt) | 
 | 327 | { | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 328 | 	int i; | 
 | 329 |  | 
 | 330 | 	if (from >= nr_irqs || (from + cnt) > nr_irqs) | 
 | 331 | 		return; | 
 | 332 |  | 
 | 333 | 	for (i = 0; i < cnt; i++) | 
 | 334 | 		free_desc(from + i); | 
 | 335 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 336 | 	mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 337 | 	bitmap_clear(allocated_irqs, from, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 338 | 	mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 339 | } | 
| Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 340 | EXPORT_SYMBOL_GPL(irq_free_descs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 341 |  | 
 | 342 | /** | 
 | 343 |  * irq_alloc_descs - allocate and initialize a range of irq descriptors | 
 | 344 |  * @irq:	Allocate for specific irq number if irq >= 0 | 
 | 345 |  * @from:	Start the search from this irq number | 
 | 346 |  * @cnt:	Number of consecutive irqs to allocate. | 
 | 347 |  * @node:	Preferred node on which the irq descriptor should be allocated | 
| Randy Dunlap | d522a0d | 2011-08-18 12:19:27 -0700 | [diff] [blame] | 348 |  * @owner:	Owning module (can be NULL) | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 349 |  * | 
 | 350 |  * Returns the first irq number or error code | 
 | 351 |  */ | 
 | 352 | int __ref | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 353 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 
 | 354 | 		  struct module *owner) | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 355 | { | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 356 | 	int start, ret; | 
 | 357 |  | 
 | 358 | 	if (!cnt) | 
 | 359 | 		return -EINVAL; | 
 | 360 |  | 
| Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 361 | 	if (irq >= 0) { | 
 | 362 | 		if (from > irq) | 
 | 363 | 			return -EINVAL; | 
 | 364 | 		from = irq; | 
 | 365 | 	} | 
 | 366 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 367 | 	mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 368 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 369 | 	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, | 
 | 370 | 					   from, cnt, 0); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 371 | 	ret = -EEXIST; | 
 | 372 | 	if (irq >=0 && start != irq) | 
 | 373 | 		goto err; | 
 | 374 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 375 | 	if (start + cnt > nr_irqs) { | 
 | 376 | 		ret = irq_expand_nr_irqs(start + cnt); | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 377 | 		if (ret) | 
 | 378 | 			goto err; | 
 | 379 | 	} | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 380 |  | 
 | 381 | 	bitmap_set(allocated_irqs, start, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 382 | 	mutex_unlock(&sparse_irq_lock); | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 383 | 	return alloc_descs(start, cnt, node, owner); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 384 |  | 
 | 385 | err: | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 386 | 	mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 387 | 	return ret; | 
 | 388 | } | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 389 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 390 |  | 
| Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 391 | /** | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 392 |  * irq_reserve_irqs - mark irqs allocated | 
 | 393 |  * @from:	mark from irq number | 
 | 394 |  * @cnt:	number of irqs to mark | 
 | 395 |  * | 
 | 396 |  * Returns 0 on success or an appropriate error code | 
 | 397 |  */ | 
 | 398 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) | 
 | 399 | { | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 400 | 	unsigned int start; | 
 | 401 | 	int ret = 0; | 
 | 402 |  | 
 | 403 | 	if (!cnt || (from + cnt) > nr_irqs) | 
 | 404 | 		return -EINVAL; | 
 | 405 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 406 | 	mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 407 | 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 
 | 408 | 	if (start == from) | 
 | 409 | 		bitmap_set(allocated_irqs, start, cnt); | 
 | 410 | 	else | 
 | 411 | 		ret = -EEXIST; | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 412 | 	mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 413 | 	return ret; | 
 | 414 | } | 
 | 415 |  | 
 | 416 | /** | 
| Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 417 |  * irq_get_next_irq - get next allocated irq number | 
 | 418 |  * @offset:	where to start the search | 
 | 419 |  * | 
 | 420 |  * Returns next irq number after offset or nr_irqs if none is found. | 
 | 421 |  */ | 
 | 422 | unsigned int irq_get_next_irq(unsigned int offset) | 
 | 423 | { | 
 | 424 | 	return find_next_bit(allocated_irqs, nr_irqs, offset); | 
 | 425 | } | 
 | 426 |  | 
| Thomas Gleixner | d5eb4ad | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 427 | struct irq_desc * | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 428 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, | 
 | 429 | 		    unsigned int check) | 
| Thomas Gleixner | d5eb4ad | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 430 | { | 
 | 431 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 432 |  | 
 | 433 | 	if (desc) { | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 434 | 		if (check & _IRQ_DESC_CHECK) { | 
 | 435 | 			if ((check & _IRQ_DESC_PERCPU) && | 
 | 436 | 			    !irq_settings_is_per_cpu_devid(desc)) | 
 | 437 | 				return NULL; | 
 | 438 |  | 
 | 439 | 			if (!(check & _IRQ_DESC_PERCPU) && | 
 | 440 | 			    irq_settings_is_per_cpu_devid(desc)) | 
 | 441 | 				return NULL; | 
 | 442 | 		} | 
 | 443 |  | 
| Thomas Gleixner | d5eb4ad | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 444 | 		if (bus) | 
 | 445 | 			chip_bus_lock(desc); | 
 | 446 | 		raw_spin_lock_irqsave(&desc->lock, *flags); | 
 | 447 | 	} | 
 | 448 | 	return desc; | 
 | 449 | } | 
 | 450 |  | 
 | 451 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | 
 | 452 | { | 
 | 453 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 454 | 	if (bus) | 
 | 455 | 		chip_bus_sync_unlock(desc); | 
 | 456 | } | 
 | 457 |  | 
| Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 458 | int irq_set_percpu_devid(unsigned int irq) | 
 | 459 | { | 
 | 460 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 461 |  | 
 | 462 | 	if (!desc) | 
 | 463 | 		return -EINVAL; | 
 | 464 |  | 
 | 465 | 	if (desc->percpu_enabled) | 
 | 466 | 		return -EINVAL; | 
 | 467 |  | 
 | 468 | 	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | 
 | 469 |  | 
 | 470 | 	if (!desc->percpu_enabled) | 
 | 471 | 		return -ENOMEM; | 
 | 472 |  | 
 | 473 | 	irq_set_percpu_devid_flags(irq); | 
 | 474 | 	return 0; | 
 | 475 | } | 
 | 476 |  | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 477 | /** | 
 | 478 |  * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 
 | 479 |  * @irq:	irq number to initialize | 
 | 480 |  */ | 
 | 481 | void dynamic_irq_cleanup(unsigned int irq) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 482 | { | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 483 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 484 | 	unsigned long flags; | 
 | 485 |  | 
 | 486 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 487 | 	desc_set_defaults(irq, desc, desc_node(desc), NULL); | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 488 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 489 | } | 
 | 490 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 491 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 
 | 492 | { | 
 | 493 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 494 |  | 
 | 495 | 	return desc && desc->kstat_irqs ? | 
 | 496 | 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 497 | } | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 498 |  | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 499 | unsigned int kstat_irqs(unsigned int irq) | 
 | 500 | { | 
 | 501 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 502 | 	int cpu; | 
 | 503 | 	int sum = 0; | 
 | 504 |  | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 505 | 	if (!desc || !desc->kstat_irqs) | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 506 | 		return 0; | 
 | 507 | 	for_each_possible_cpu(cpu) | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 508 | 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 509 | 	return sum; | 
 | 510 | } |