| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
|  | 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 
|  | 4 | * | 
|  | 5 | * This file contains the interrupt descriptor management code | 
|  | 6 | * | 
|  | 7 | * Detailed information is available in Documentation/DocBook/genericirq | 
|  | 8 | * | 
|  | 9 | */ | 
|  | 10 | #include <linux/irq.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/interrupt.h> | 
|  | 14 | #include <linux/kernel_stat.h> | 
|  | 15 | #include <linux/radix-tree.h> | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 17 |  | 
|  | 18 | #include "internals.h" | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 
|  | 22 | */ | 
| Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 23 | static struct lock_class_key irq_desc_lock_class; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 24 |  | 
|  | 25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 
|  | 26 | static void __init init_irq_default_affinity(void) | 
|  | 27 | { | 
|  | 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 
|  | 29 | cpumask_setall(irq_default_affinity); | 
|  | 30 | } | 
|  | 31 | #else | 
|  | 32 | static void __init init_irq_default_affinity(void) | 
|  | 33 | { | 
|  | 34 | } | 
|  | 35 | #endif | 
|  | 36 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 37 | #ifdef CONFIG_SMP | 
|  | 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | 
|  | 39 | { | 
|  | 40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | 
|  | 41 | return -ENOMEM; | 
|  | 42 |  | 
|  | 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 
|  | 45 | free_cpumask_var(desc->irq_data.affinity); | 
|  | 46 | return -ENOMEM; | 
|  | 47 | } | 
|  | 48 | #endif | 
|  | 49 | return 0; | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static void desc_smp_init(struct irq_desc *desc, int node) | 
|  | 53 | { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 54 | desc->irq_data.node = node; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 56 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 57 | cpumask_clear(desc->pending_mask); | 
|  | 58 | #endif | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static inline int desc_node(struct irq_desc *desc) | 
|  | 62 | { | 
|  | 63 | return desc->irq_data.node; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
|  | 66 | #else | 
|  | 67 | static inline int | 
|  | 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 
|  | 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 71 | #endif | 
|  | 72 |  | 
|  | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 
|  | 74 | { | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 75 | int cpu; | 
|  | 76 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 77 | desc->irq_data.irq = irq; | 
|  | 78 | desc->irq_data.chip = &no_irq_chip; | 
|  | 79 | desc->irq_data.chip_data = NULL; | 
|  | 80 | desc->irq_data.handler_data = NULL; | 
|  | 81 | desc->irq_data.msi_desc = NULL; | 
|  | 82 | desc->status = IRQ_DEFAULT_INIT_FLAGS; | 
|  | 83 | desc->handle_irq = handle_bad_irq; | 
|  | 84 | desc->depth = 1; | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 85 | desc->irq_count = 0; | 
|  | 86 | desc->irqs_unhandled = 0; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 87 | desc->name = NULL; | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 88 | for_each_possible_cpu(cpu) | 
|  | 89 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 90 | desc_smp_init(desc, node); | 
|  | 91 | } | 
|  | 92 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 93 | int nr_irqs = NR_IRQS; | 
|  | 94 | EXPORT_SYMBOL_GPL(nr_irqs); | 
|  | 95 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 96 | static DEFINE_MUTEX(sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 97 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | 
|  | 98 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 99 | #ifdef CONFIG_SPARSE_IRQ | 
|  | 100 |  | 
| Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 101 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 102 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 103 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 104 | { | 
|  | 105 | radix_tree_insert(&irq_desc_tree, irq, desc); | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | struct irq_desc *irq_to_desc(unsigned int irq) | 
|  | 109 | { | 
|  | 110 | return radix_tree_lookup(&irq_desc_tree, irq); | 
|  | 111 | } | 
|  | 112 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 113 | static void delete_irq_desc(unsigned int irq) | 
|  | 114 | { | 
|  | 115 | radix_tree_delete(&irq_desc_tree, irq); | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | #ifdef CONFIG_SMP | 
|  | 119 | static void free_masks(struct irq_desc *desc) | 
|  | 120 | { | 
|  | 121 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 122 | free_cpumask_var(desc->pending_mask); | 
|  | 123 | #endif | 
| Thomas Gleixner | c0a19eb | 2010-10-12 21:58:27 +0200 | [diff] [blame] | 124 | free_cpumask_var(desc->irq_data.affinity); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 125 | } | 
|  | 126 | #else | 
|  | 127 | static inline void free_masks(struct irq_desc *desc) { } | 
|  | 128 | #endif | 
|  | 129 |  | 
|  | 130 | static struct irq_desc *alloc_desc(int irq, int node) | 
|  | 131 | { | 
|  | 132 | struct irq_desc *desc; | 
| Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 133 | gfp_t gfp = GFP_KERNEL; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 134 |  | 
|  | 135 | desc = kzalloc_node(sizeof(*desc), gfp, node); | 
|  | 136 | if (!desc) | 
|  | 137 | return NULL; | 
|  | 138 | /* allocate based on nr_cpu_ids */ | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 139 | desc->kstat_irqs = alloc_percpu(unsigned int); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 140 | if (!desc->kstat_irqs) | 
|  | 141 | goto err_desc; | 
|  | 142 |  | 
|  | 143 | if (alloc_masks(desc, gfp, node)) | 
|  | 144 | goto err_kstat; | 
|  | 145 |  | 
|  | 146 | raw_spin_lock_init(&desc->lock); | 
|  | 147 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
|  | 148 |  | 
|  | 149 | desc_set_defaults(irq, desc, node); | 
|  | 150 |  | 
|  | 151 | return desc; | 
|  | 152 |  | 
|  | 153 | err_kstat: | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 154 | free_percpu(desc->kstat_irqs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 155 | err_desc: | 
|  | 156 | kfree(desc); | 
|  | 157 | return NULL; | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static void free_desc(unsigned int irq) | 
|  | 161 | { | 
|  | 162 | struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 163 |  | 
| Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 164 | unregister_irq_proc(irq, desc); | 
|  | 165 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 166 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 167 | delete_irq_desc(irq); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 168 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 169 |  | 
|  | 170 | free_masks(desc); | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 171 | free_percpu(desc->kstat_irqs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 172 | kfree(desc); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | 
|  | 176 | { | 
|  | 177 | struct irq_desc *desc; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 178 | int i; | 
|  | 179 |  | 
|  | 180 | for (i = 0; i < cnt; i++) { | 
|  | 181 | desc = alloc_desc(start + i, node); | 
|  | 182 | if (!desc) | 
|  | 183 | goto err; | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 184 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 185 | irq_insert_desc(start + i, desc); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 186 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 187 | } | 
|  | 188 | return start; | 
|  | 189 |  | 
|  | 190 | err: | 
|  | 191 | for (i--; i >= 0; i--) | 
|  | 192 | free_desc(start + i); | 
|  | 193 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 194 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 195 | bitmap_clear(allocated_irqs, start, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 196 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 197 | return -ENOMEM; | 
|  | 198 | } | 
|  | 199 |  | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 200 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | 
|  | 201 | { | 
|  | 202 | int res = irq_alloc_descs(irq, irq, 1, node); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 203 |  | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 204 | if (res == -EEXIST || res == irq) | 
|  | 205 | return irq_to_desc(irq); | 
|  | 206 | return NULL; | 
|  | 207 | } | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 208 |  | 
|  | 209 | int __init early_irq_init(void) | 
|  | 210 | { | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 211 | int i, initcnt, node = first_online_node; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 212 | struct irq_desc *desc; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 213 |  | 
|  | 214 | init_irq_default_affinity(); | 
|  | 215 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 216 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ | 
|  | 217 | initcnt = arch_probe_nr_irqs(); | 
|  | 218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 219 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 220 | for (i = 0; i < initcnt; i++) { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 221 | desc = alloc_desc(i, node); | 
|  | 222 | set_bit(i, allocated_irqs); | 
|  | 223 | irq_insert_desc(i, desc); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 224 | } | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 225 | return arch_early_irq_init(); | 
|  | 226 | } | 
|  | 227 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 228 | #else /* !CONFIG_SPARSE_IRQ */ | 
|  | 229 |  | 
|  | 230 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 
|  | 231 | [0 ... NR_IRQS-1] = { | 
| Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 232 | .status		= IRQ_DEFAULT_INIT_FLAGS, | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 233 | .handle_irq	= handle_bad_irq, | 
|  | 234 | .depth		= 1, | 
|  | 235 | .lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 
|  | 236 | } | 
|  | 237 | }; | 
|  | 238 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 239 | int __init early_irq_init(void) | 
|  | 240 | { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 241 | int count, i, node = first_online_node; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 242 | struct irq_desc *desc; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 243 |  | 
|  | 244 | init_irq_default_affinity(); | 
|  | 245 |  | 
|  | 246 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | 
|  | 247 |  | 
|  | 248 | desc = irq_desc; | 
|  | 249 | count = ARRAY_SIZE(irq_desc); | 
|  | 250 |  | 
|  | 251 | for (i = 0; i < count; i++) { | 
|  | 252 | desc[i].irq_data.irq = i; | 
|  | 253 | desc[i].irq_data.chip = &no_irq_chip; | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 254 | /* TODO : do this allocation on-demand ... */ | 
|  | 255 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 256 | alloc_masks(desc + i, GFP_KERNEL, node); | 
|  | 257 | desc_smp_init(desc + i, node); | 
| Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 258 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 259 | } | 
|  | 260 | return arch_early_irq_init(); | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | struct irq_desc *irq_to_desc(unsigned int irq) | 
|  | 264 | { | 
|  | 265 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | 
|  | 269 | { | 
|  | 270 | return irq_to_desc(irq); | 
|  | 271 | } | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 272 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 273 | static void free_desc(unsigned int irq) | 
|  | 274 | { | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 275 | dynamic_irq_cleanup(irq); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 276 | } | 
|  | 277 |  | 
|  | 278 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 
|  | 279 | { | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 280 | #if defined(CONFIG_KSTAT_IRQS_ONDEMAND) | 
|  | 281 | struct irq_desc *desc; | 
|  | 282 | unsigned int i; | 
|  | 283 |  | 
|  | 284 | for (i = 0; i < cnt; i++) { | 
|  | 285 | desc = irq_to_desc(start + i); | 
|  | 286 | if (desc && !desc->kstat_irqs) { | 
|  | 287 | unsigned int __percpu *stats = alloc_percpu(unsigned int); | 
|  | 288 |  | 
|  | 289 | if (!stats) | 
|  | 290 | return -1; | 
|  | 291 | if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) | 
|  | 292 | free_percpu(stats); | 
|  | 293 | } | 
|  | 294 | } | 
|  | 295 | #endif | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 296 | return start; | 
|  | 297 | } | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 298 | #endif /* !CONFIG_SPARSE_IRQ */ | 
|  | 299 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 300 | /* Dynamic interrupt handling */ | 
|  | 301 |  | 
|  | 302 | /** | 
|  | 303 | * irq_free_descs - free irq descriptors | 
|  | 304 | * @from:	Start of descriptor range | 
|  | 305 | * @cnt:	Number of consecutive irqs to free | 
|  | 306 | */ | 
|  | 307 | void irq_free_descs(unsigned int from, unsigned int cnt) | 
|  | 308 | { | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 309 | int i; | 
|  | 310 |  | 
|  | 311 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | 
|  | 312 | return; | 
|  | 313 |  | 
|  | 314 | for (i = 0; i < cnt; i++) | 
|  | 315 | free_desc(from + i); | 
|  | 316 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 317 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 318 | bitmap_clear(allocated_irqs, from, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 319 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 320 | } | 
|  | 321 |  | 
|  | 322 | /** | 
|  | 323 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | 
|  | 324 | * @irq:	Allocate for specific irq number if irq >= 0 | 
|  | 325 | * @from:	Start the search from this irq number | 
|  | 326 | * @cnt:	Number of consecutive irqs to allocate. | 
|  | 327 | * @node:	Preferred node on which the irq descriptor should be allocated | 
|  | 328 | * | 
|  | 329 | * Returns the first irq number or error code | 
|  | 330 | */ | 
|  | 331 | int __ref | 
|  | 332 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | 
|  | 333 | { | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 334 | int start, ret; | 
|  | 335 |  | 
|  | 336 | if (!cnt) | 
|  | 337 | return -EINVAL; | 
|  | 338 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 339 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 340 |  | 
|  | 341 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 
|  | 342 | ret = -EEXIST; | 
|  | 343 | if (irq >=0 && start != irq) | 
|  | 344 | goto err; | 
|  | 345 |  | 
|  | 346 | ret = -ENOMEM; | 
|  | 347 | if (start >= nr_irqs) | 
|  | 348 | goto err; | 
|  | 349 |  | 
|  | 350 | bitmap_set(allocated_irqs, start, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 351 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 352 | return alloc_descs(start, cnt, node); | 
|  | 353 |  | 
|  | 354 | err: | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 355 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 356 | return ret; | 
|  | 357 | } | 
|  | 358 |  | 
| Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 359 | /** | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 360 | * irq_reserve_irqs - mark irqs allocated | 
|  | 361 | * @from:	mark from irq number | 
|  | 362 | * @cnt:	number of irqs to mark | 
|  | 363 | * | 
|  | 364 | * Returns 0 on success or an appropriate error code | 
|  | 365 | */ | 
|  | 366 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) | 
|  | 367 | { | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 368 | unsigned int start; | 
|  | 369 | int ret = 0; | 
|  | 370 |  | 
|  | 371 | if (!cnt || (from + cnt) > nr_irqs) | 
|  | 372 | return -EINVAL; | 
|  | 373 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 374 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 375 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 
|  | 376 | if (start == from) | 
|  | 377 | bitmap_set(allocated_irqs, start, cnt); | 
|  | 378 | else | 
|  | 379 | ret = -EEXIST; | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 380 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 381 | return ret; | 
|  | 382 | } | 
|  | 383 |  | 
|  | 384 | /** | 
| Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 385 | * irq_get_next_irq - get next allocated irq number | 
|  | 386 | * @offset:	where to start the search | 
|  | 387 | * | 
|  | 388 | * Returns next irq number after offset or nr_irqs if none is found. | 
|  | 389 | */ | 
|  | 390 | unsigned int irq_get_next_irq(unsigned int offset) | 
|  | 391 | { | 
|  | 392 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 
|  | 393 | } | 
|  | 394 |  | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 395 | /** | 
|  | 396 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 
|  | 397 | * @irq:	irq number to initialize | 
|  | 398 | */ | 
|  | 399 | void dynamic_irq_cleanup(unsigned int irq) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 400 | { | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 401 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 402 | unsigned long flags; | 
|  | 403 |  | 
|  | 404 | raw_spin_lock_irqsave(&desc->lock, flags); | 
|  | 405 | desc_set_defaults(irq, desc, desc_node(desc)); | 
|  | 406 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 407 | } | 
|  | 408 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 409 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 
|  | 410 | { | 
|  | 411 | struct irq_desc *desc = irq_to_desc(irq); | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 412 |  | 
|  | 413 | return desc && desc->kstat_irqs ? | 
|  | 414 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 415 | } | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 416 |  | 
|  | 417 | #ifdef CONFIG_GENERIC_HARDIRQS | 
|  | 418 | unsigned int kstat_irqs(unsigned int irq) | 
|  | 419 | { | 
|  | 420 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 421 | int cpu; | 
|  | 422 | int sum = 0; | 
|  | 423 |  | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 424 | if (!desc || !desc->kstat_irqs) | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 425 | return 0; | 
|  | 426 | for_each_possible_cpu(cpu) | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 427 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 428 | return sum; | 
|  | 429 | } | 
|  | 430 | #endif /* CONFIG_GENERIC_HARDIRQS */ |