| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
|  | 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 
|  | 4 | * | 
|  | 5 | * This file contains the interrupt descriptor management code | 
|  | 6 | * | 
|  | 7 | * Detailed information is available in Documentation/DocBook/genericirq | 
|  | 8 | * | 
|  | 9 | */ | 
|  | 10 | #include <linux/irq.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/interrupt.h> | 
|  | 14 | #include <linux/kernel_stat.h> | 
|  | 15 | #include <linux/radix-tree.h> | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 17 |  | 
|  | 18 | #include "internals.h" | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 
|  | 22 | */ | 
| Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 23 | static struct lock_class_key irq_desc_lock_class; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 24 |  | 
| Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 25 | #if defined(CONFIG_SMP) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 26 | static void __init init_irq_default_affinity(void) | 
|  | 27 | { | 
|  | 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 
|  | 29 | cpumask_setall(irq_default_affinity); | 
|  | 30 | } | 
|  | 31 | #else | 
|  | 32 | static void __init init_irq_default_affinity(void) | 
|  | 33 | { | 
|  | 34 | } | 
|  | 35 | #endif | 
|  | 36 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 37 | #ifdef CONFIG_SMP | 
|  | 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | 
|  | 39 | { | 
|  | 40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | 
|  | 41 | return -ENOMEM; | 
|  | 42 |  | 
|  | 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 
|  | 45 | free_cpumask_var(desc->irq_data.affinity); | 
|  | 46 | return -ENOMEM; | 
|  | 47 | } | 
|  | 48 | #endif | 
|  | 49 | return 0; | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static void desc_smp_init(struct irq_desc *desc, int node) | 
|  | 53 | { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 54 | desc->irq_data.node = node; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 56 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 57 | cpumask_clear(desc->pending_mask); | 
|  | 58 | #endif | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static inline int desc_node(struct irq_desc *desc) | 
|  | 62 | { | 
|  | 63 | return desc->irq_data.node; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
|  | 66 | #else | 
|  | 67 | static inline int | 
|  | 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 
|  | 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 71 | #endif | 
|  | 72 |  | 
|  | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 
|  | 74 | { | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 75 | int cpu; | 
|  | 76 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 77 | desc->irq_data.irq = irq; | 
|  | 78 | desc->irq_data.chip = &no_irq_chip; | 
|  | 79 | desc->irq_data.chip_data = NULL; | 
|  | 80 | desc->irq_data.handler_data = NULL; | 
|  | 81 | desc->irq_data.msi_desc = NULL; | 
| Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 
| Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 83 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 84 | desc->handle_irq = handle_bad_irq; | 
|  | 85 | desc->depth = 1; | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 86 | desc->irq_count = 0; | 
|  | 87 | desc->irqs_unhandled = 0; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 88 | desc->name = NULL; | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 89 | for_each_possible_cpu(cpu) | 
|  | 90 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 91 | desc_smp_init(desc, node); | 
|  | 92 | } | 
|  | 93 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 94 | int nr_irqs = NR_IRQS; | 
|  | 95 | EXPORT_SYMBOL_GPL(nr_irqs); | 
|  | 96 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 97 | static DEFINE_MUTEX(sparse_irq_lock); | 
| Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 98 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 99 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 100 | #ifdef CONFIG_SPARSE_IRQ | 
|  | 101 |  | 
| Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 102 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 103 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 104 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 105 | { | 
|  | 106 | radix_tree_insert(&irq_desc_tree, irq, desc); | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | struct irq_desc *irq_to_desc(unsigned int irq) | 
|  | 110 | { | 
|  | 111 | return radix_tree_lookup(&irq_desc_tree, irq); | 
|  | 112 | } | 
|  | 113 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 114 | static void delete_irq_desc(unsigned int irq) | 
|  | 115 | { | 
|  | 116 | radix_tree_delete(&irq_desc_tree, irq); | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | #ifdef CONFIG_SMP | 
|  | 120 | static void free_masks(struct irq_desc *desc) | 
|  | 121 | { | 
|  | 122 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 123 | free_cpumask_var(desc->pending_mask); | 
|  | 124 | #endif | 
| Thomas Gleixner | c0a19eb | 2010-10-12 21:58:27 +0200 | [diff] [blame] | 125 | free_cpumask_var(desc->irq_data.affinity); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 126 | } | 
|  | 127 | #else | 
|  | 128 | static inline void free_masks(struct irq_desc *desc) { } | 
|  | 129 | #endif | 
|  | 130 |  | 
|  | 131 | static struct irq_desc *alloc_desc(int irq, int node) | 
|  | 132 | { | 
|  | 133 | struct irq_desc *desc; | 
| Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 134 | gfp_t gfp = GFP_KERNEL; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 135 |  | 
|  | 136 | desc = kzalloc_node(sizeof(*desc), gfp, node); | 
|  | 137 | if (!desc) | 
|  | 138 | return NULL; | 
|  | 139 | /* allocate based on nr_cpu_ids */ | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 140 | desc->kstat_irqs = alloc_percpu(unsigned int); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 141 | if (!desc->kstat_irqs) | 
|  | 142 | goto err_desc; | 
|  | 143 |  | 
|  | 144 | if (alloc_masks(desc, gfp, node)) | 
|  | 145 | goto err_kstat; | 
|  | 146 |  | 
|  | 147 | raw_spin_lock_init(&desc->lock); | 
|  | 148 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
|  | 149 |  | 
|  | 150 | desc_set_defaults(irq, desc, node); | 
|  | 151 |  | 
|  | 152 | return desc; | 
|  | 153 |  | 
|  | 154 | err_kstat: | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 155 | free_percpu(desc->kstat_irqs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 156 | err_desc: | 
|  | 157 | kfree(desc); | 
|  | 158 | return NULL; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | static void free_desc(unsigned int irq) | 
|  | 162 | { | 
|  | 163 | struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 164 |  | 
| Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 165 | unregister_irq_proc(irq, desc); | 
|  | 166 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 167 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 168 | delete_irq_desc(irq); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 169 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 170 |  | 
|  | 171 | free_masks(desc); | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 172 | free_percpu(desc->kstat_irqs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 173 | kfree(desc); | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | 
|  | 177 | { | 
|  | 178 | struct irq_desc *desc; | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 179 | int i; | 
|  | 180 |  | 
|  | 181 | for (i = 0; i < cnt; i++) { | 
|  | 182 | desc = alloc_desc(start + i, node); | 
|  | 183 | if (!desc) | 
|  | 184 | goto err; | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 185 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 186 | irq_insert_desc(start + i, desc); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 187 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 188 | } | 
|  | 189 | return start; | 
|  | 190 |  | 
|  | 191 | err: | 
|  | 192 | for (i--; i >= 0; i--) | 
|  | 193 | free_desc(start + i); | 
|  | 194 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 195 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 196 | bitmap_clear(allocated_irqs, start, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 197 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 198 | return -ENOMEM; | 
|  | 199 | } | 
|  | 200 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 201 | static int irq_expand_nr_irqs(unsigned int nr) | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 202 | { | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 203 | if (nr > IRQ_BITMAP_BITS) | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 204 | return -ENOMEM; | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 205 | nr_irqs = nr; | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 206 | return 0; | 
|  | 207 | } | 
|  | 208 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 209 | int __init early_irq_init(void) | 
|  | 210 | { | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 211 | int i, initcnt, node = first_online_node; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 212 | struct irq_desc *desc; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 213 |  | 
|  | 214 | init_irq_default_affinity(); | 
|  | 215 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 216 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ | 
|  | 217 | initcnt = arch_probe_nr_irqs(); | 
|  | 218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 219 |  | 
| Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 220 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | 
|  | 221 | nr_irqs = IRQ_BITMAP_BITS; | 
|  | 222 |  | 
|  | 223 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | 
|  | 224 | initcnt = IRQ_BITMAP_BITS; | 
|  | 225 |  | 
|  | 226 | if (initcnt > nr_irqs) | 
|  | 227 | nr_irqs = initcnt; | 
|  | 228 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 229 | for (i = 0; i < initcnt; i++) { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 230 | desc = alloc_desc(i, node); | 
|  | 231 | set_bit(i, allocated_irqs); | 
|  | 232 | irq_insert_desc(i, desc); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 233 | } | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 234 | return arch_early_irq_init(); | 
|  | 235 | } | 
|  | 236 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 237 | #else /* !CONFIG_SPARSE_IRQ */ | 
|  | 238 |  | 
|  | 239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 
|  | 240 | [0 ... NR_IRQS-1] = { | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 241 | .handle_irq	= handle_bad_irq, | 
|  | 242 | .depth		= 1, | 
|  | 243 | .lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 
|  | 244 | } | 
|  | 245 | }; | 
|  | 246 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 247 | int __init early_irq_init(void) | 
|  | 248 | { | 
| Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 249 | int count, i, node = first_online_node; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 250 | struct irq_desc *desc; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 251 |  | 
|  | 252 | init_irq_default_affinity(); | 
|  | 253 |  | 
|  | 254 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | 
|  | 255 |  | 
|  | 256 | desc = irq_desc; | 
|  | 257 | count = ARRAY_SIZE(irq_desc); | 
|  | 258 |  | 
|  | 259 | for (i = 0; i < count; i++) { | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 260 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | 
| Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 261 | alloc_masks(&desc[i], GFP_KERNEL, node); | 
|  | 262 | raw_spin_lock_init(&desc[i].lock); | 
| Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 263 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 
| Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 264 | desc_set_defaults(i, &desc[i], node); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 265 | } | 
|  | 266 | return arch_early_irq_init(); | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | struct irq_desc *irq_to_desc(unsigned int irq) | 
|  | 270 | { | 
|  | 271 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 
|  | 272 | } | 
|  | 273 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 274 | static void free_desc(unsigned int irq) | 
|  | 275 | { | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 276 | dynamic_irq_cleanup(irq); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 277 | } | 
|  | 278 |  | 
|  | 279 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 
|  | 280 | { | 
|  | 281 | return start; | 
|  | 282 | } | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 283 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 284 | static int irq_expand_nr_irqs(unsigned int nr) | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 285 | { | 
|  | 286 | return -ENOMEM; | 
|  | 287 | } | 
|  | 288 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 289 | #endif /* !CONFIG_SPARSE_IRQ */ | 
|  | 290 |  | 
| Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 291 | /** | 
|  | 292 | * generic_handle_irq - Invoke the handler for a particular irq | 
|  | 293 | * @irq:	The irq number to handle | 
|  | 294 | * | 
|  | 295 | */ | 
|  | 296 | int generic_handle_irq(unsigned int irq) | 
|  | 297 | { | 
|  | 298 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 299 |  | 
|  | 300 | if (!desc) | 
|  | 301 | return -EINVAL; | 
|  | 302 | generic_handle_irq_desc(irq, desc); | 
|  | 303 | return 0; | 
|  | 304 | } | 
| Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 305 | EXPORT_SYMBOL_GPL(generic_handle_irq); | 
| Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 306 |  | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 307 | /* Dynamic interrupt handling */ | 
|  | 308 |  | 
|  | 309 | /** | 
|  | 310 | * irq_free_descs - free irq descriptors | 
|  | 311 | * @from:	Start of descriptor range | 
|  | 312 | * @cnt:	Number of consecutive irqs to free | 
|  | 313 | */ | 
|  | 314 | void irq_free_descs(unsigned int from, unsigned int cnt) | 
|  | 315 | { | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 316 | int i; | 
|  | 317 |  | 
|  | 318 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | 
|  | 319 | return; | 
|  | 320 |  | 
|  | 321 | for (i = 0; i < cnt; i++) | 
|  | 322 | free_desc(from + i); | 
|  | 323 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 324 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 325 | bitmap_clear(allocated_irqs, from, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 326 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 327 | } | 
| Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 328 | EXPORT_SYMBOL_GPL(irq_free_descs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 329 |  | 
|  | 330 | /** | 
|  | 331 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | 
|  | 332 | * @irq:	Allocate for specific irq number if irq >= 0 | 
|  | 333 | * @from:	Start the search from this irq number | 
|  | 334 | * @cnt:	Number of consecutive irqs to allocate. | 
|  | 335 | * @node:	Preferred node on which the irq descriptor should be allocated | 
|  | 336 | * | 
|  | 337 | * Returns the first irq number or error code | 
|  | 338 | */ | 
|  | 339 | int __ref | 
|  | 340 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | 
|  | 341 | { | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 342 | int start, ret; | 
|  | 343 |  | 
|  | 344 | if (!cnt) | 
|  | 345 | return -EINVAL; | 
|  | 346 |  | 
| Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 347 | if (irq >= 0) { | 
|  | 348 | if (from > irq) | 
|  | 349 | return -EINVAL; | 
|  | 350 | from = irq; | 
|  | 351 | } | 
|  | 352 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 353 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 354 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 355 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, | 
|  | 356 | from, cnt, 0); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 357 | ret = -EEXIST; | 
|  | 358 | if (irq >=0 && start != irq) | 
|  | 359 | goto err; | 
|  | 360 |  | 
| Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 361 | if (start + cnt > nr_irqs) { | 
|  | 362 | ret = irq_expand_nr_irqs(start + cnt); | 
| Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 363 | if (ret) | 
|  | 364 | goto err; | 
|  | 365 | } | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 366 |  | 
|  | 367 | bitmap_set(allocated_irqs, start, cnt); | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 368 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 369 | return alloc_descs(start, cnt, node); | 
|  | 370 |  | 
|  | 371 | err: | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 372 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 373 | return ret; | 
|  | 374 | } | 
| Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 375 | EXPORT_SYMBOL_GPL(irq_alloc_descs); | 
| Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 376 |  | 
| Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 377 | /** | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 378 | * irq_reserve_irqs - mark irqs allocated | 
|  | 379 | * @from:	mark from irq number | 
|  | 380 | * @cnt:	number of irqs to mark | 
|  | 381 | * | 
|  | 382 | * Returns 0 on success or an appropriate error code | 
|  | 383 | */ | 
|  | 384 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) | 
|  | 385 | { | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 386 | unsigned int start; | 
|  | 387 | int ret = 0; | 
|  | 388 |  | 
|  | 389 | if (!cnt || (from + cnt) > nr_irqs) | 
|  | 390 | return -EINVAL; | 
|  | 391 |  | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 392 | mutex_lock(&sparse_irq_lock); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 393 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 
|  | 394 | if (start == from) | 
|  | 395 | bitmap_set(allocated_irqs, start, cnt); | 
|  | 396 | else | 
|  | 397 | ret = -EEXIST; | 
| Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 398 | mutex_unlock(&sparse_irq_lock); | 
| Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 399 | return ret; | 
|  | 400 | } | 
|  | 401 |  | 
|  | 402 | /** | 
| Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 403 | * irq_get_next_irq - get next allocated irq number | 
|  | 404 | * @offset:	where to start the search | 
|  | 405 | * | 
|  | 406 | * Returns next irq number after offset or nr_irqs if none is found. | 
|  | 407 | */ | 
|  | 408 | unsigned int irq_get_next_irq(unsigned int offset) | 
|  | 409 | { | 
|  | 410 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 
|  | 411 | } | 
|  | 412 |  | 
| Thomas Gleixner | d5eb4ad | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 413 | struct irq_desc * | 
|  | 414 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) | 
|  | 415 | { | 
|  | 416 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 417 |  | 
|  | 418 | if (desc) { | 
|  | 419 | if (bus) | 
|  | 420 | chip_bus_lock(desc); | 
|  | 421 | raw_spin_lock_irqsave(&desc->lock, *flags); | 
|  | 422 | } | 
|  | 423 | return desc; | 
|  | 424 | } | 
|  | 425 |  | 
|  | 426 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | 
|  | 427 | { | 
|  | 428 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 
|  | 429 | if (bus) | 
|  | 430 | chip_bus_sync_unlock(desc); | 
|  | 431 | } | 
|  | 432 |  | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 433 | /** | 
|  | 434 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 
|  | 435 | * @irq:	irq number to initialize | 
|  | 436 | */ | 
|  | 437 | void dynamic_irq_cleanup(unsigned int irq) | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 438 | { | 
| Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 439 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 440 | unsigned long flags; | 
|  | 441 |  | 
|  | 442 | raw_spin_lock_irqsave(&desc->lock, flags); | 
|  | 443 | desc_set_defaults(irq, desc, desc_node(desc)); | 
|  | 444 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 445 | } | 
|  | 446 |  | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 447 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 
|  | 448 | { | 
|  | 449 | struct irq_desc *desc = irq_to_desc(irq); | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 450 |  | 
|  | 451 | return desc && desc->kstat_irqs ? | 
|  | 452 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 
| Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 453 | } | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 454 |  | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 455 | unsigned int kstat_irqs(unsigned int irq) | 
|  | 456 | { | 
|  | 457 | struct irq_desc *desc = irq_to_desc(irq); | 
|  | 458 | int cpu; | 
|  | 459 | int sum = 0; | 
|  | 460 |  | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 461 | if (!desc || !desc->kstat_irqs) | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 462 | return 0; | 
|  | 463 | for_each_possible_cpu(cpu) | 
| Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 464 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 
| KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 465 | return sum; | 
|  | 466 | } |