blob: 2676ef288bf5eefe49e545f73dd580b74c04b074 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
Stephen Rothwell756e7102005-11-09 18:07:45 +11006 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
Stephen Rothwell756e7102005-11-09 18:07:45 +110010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
Stephen Rothwell756e7102005-11-09 18:07:45 +110021 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100031#undef DEBUG
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/module.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110038#include <linux/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/init.h>
43#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/delay.h>
45#include <linux/irq.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110046#include <linux/seq_file.h>
47#include <linux/cpumask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/profile.h>
49#include <linux/bitops.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100050#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
Jake Moilanen45934c42006-07-27 13:17:25 -050054#include <linux/pci.h>
Michael Ellerman60b332e2007-08-28 18:47:57 +100055#include <linux/debugfs.h>
Grant Likelye3873442010-06-18 11:09:59 -060056#include <linux/of.h>
57#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#include <asm/uaccess.h>
60#include <asm/system.h>
61#include <asm/io.h>
62#include <asm/pgtable.h>
63#include <asm/irq.h>
64#include <asm/cache.h>
65#include <asm/prom.h>
66#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <asm/machdep.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100068#include <asm/udbg.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100069#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <asm/paca.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100071#include <asm/firmware.h>
Takao Shinohara0874dd42007-05-01 07:01:07 +100072#include <asm/lv1call.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +000074#define CREATE_TRACE_POINTS
75#include <asm/trace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Anton Blanchard8c007bf2010-01-31 20:30:23 +000077DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
78EXPORT_PER_CPU_SYMBOL(irq_stat);
79
Stephen Rothwell868accb2005-11-10 18:38:46 +110080int __irq_offset_value;
Stephen Rothwell756e7102005-11-09 18:07:45 +110081
Stephen Rothwell756e7102005-11-09 18:07:45 +110082#ifdef CONFIG_PPC32
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100083EXPORT_SYMBOL(__irq_offset_value);
Stephen Rothwell756e7102005-11-09 18:07:45 +110084atomic_t ppc_n_lost_interrupts;
85
86#ifdef CONFIG_TAU_INT
87extern int tau_initialized;
88extern int tau_interrupts(int);
89#endif
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100090#endif /* CONFIG_PPC32 */
Stephen Rothwell756e7102005-11-09 18:07:45 +110091
Stephen Rothwell756e7102005-11-09 18:07:45 +110092#ifdef CONFIG_PPC64
Michael Ellermancd015702009-10-13 19:45:03 +000093
94#ifndef CONFIG_SPARSE_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -070095EXPORT_SYMBOL(irq_desc);
Michael Ellermancd015702009-10-13 19:45:03 +000096#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98int distribute_irqs = 1;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100099
Steven Rostedt4e491d12008-05-14 23:49:44 -0400100static inline notrace unsigned long get_hard_enabled(void)
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000101{
102 unsigned long enabled;
103
104 __asm__ __volatile__("lbz %0,%1(13)"
105 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106
107 return enabled;
108}
109
Steven Rostedt4e491d12008-05-14 23:49:44 -0400110static inline notrace void set_soft_enabled(unsigned long enable)
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000111{
112 __asm__ __volatile__("stb %0,%1(13)"
113 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114}
115
Steven Rostedt4e491d12008-05-14 23:49:44 -0400116notrace void raw_local_irq_restore(unsigned long en)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000117{
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000118 /*
119 * get_paca()->soft_enabled = en;
120 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121 * That was allowed before, and in such a case we do need to take care
122 * that gcc will set soft_enabled directly via r13, not choose to use
123 * an intermediate register, lest we're preempted to a different cpu.
124 */
125 set_soft_enabled(en);
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000126 if (!en)
127 return;
128
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000129#ifdef CONFIG_PPC_STD_MMU_64
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000130 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000131 /*
132 * Do we need to disable preemption here? Not really: in the
133 * unlikely event that we're preempted to a different cpu in
134 * between getting r13, loading its lppaca_ptr, and loading
135 * its any_int, we might call iseries_handle_interrupts without
136 * an interrupt pending on the new cpu, but that's no disaster,
137 * is it? And the business of preempting us off the old cpu
138 * would itself involve a local_irq_restore which handles the
139 * interrupt to that cpu.
140 *
141 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
142 * to avoid any preemption checking added into get_paca().
143 */
144 if (local_paca->lppaca_ptr->int_dword.any_int)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000145 iseries_handle_interrupts();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000146 }
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000147#endif /* CONFIG_PPC_STD_MMU_64 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000148
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000149 /*
150 * if (get_paca()->hard_enabled) return;
151 * But again we need to take care that gcc gets hard_enabled directly
152 * via r13, not choose to use an intermediate register, lest we're
153 * preempted to a different cpu in between the two instructions.
154 */
155 if (get_hard_enabled())
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000156 return;
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000157
158 /*
159 * Need to hard-enable interrupts here. Since currently disabled,
160 * no need to take further asm precautions against preemption; but
161 * use local_paca instead of get_paca() to avoid preemption checking.
162 */
163 local_paca->hard_enabled = en;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000164 if ((int)mfspr(SPRN_DEC) < 0)
165 mtspr(SPRN_DEC, 1);
Takao Shinohara0874dd42007-05-01 07:01:07 +1000166
167 /*
168 * Force the delivery of pending soft-disabled interrupts on PS3.
169 * Any HV call will have this side effect.
170 */
171 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
172 u64 tmp;
173 lv1_get_version_info(&tmp);
174 }
175
Benjamin Herrenschmidte1fa2e12007-05-10 22:22:45 -0700176 __hard_irq_enable();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000177}
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000178EXPORT_SYMBOL(raw_local_irq_restore);
Stephen Rothwell756e7102005-11-09 18:07:45 +1100179#endif /* CONFIG_PPC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Anton Blanchardc86845e2010-01-31 20:33:18 +0000181static int show_other_interrupts(struct seq_file *p, int prec)
182{
183 int j;
184
185#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
186 if (tau_initialized) {
187 seq_printf(p, "%*s: ", prec, "TAU");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", tau_interrupts(j));
190 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
191 }
192#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
193
Anton Blanchard89713ed2010-01-31 20:34:06 +0000194 seq_printf(p, "%*s: ", prec, "LOC");
195 for_each_online_cpu(j)
196 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
197 seq_printf(p, " Local timer interrupts\n");
198
Anton Blanchard17081102010-01-31 20:34:36 +0000199 seq_printf(p, "%*s: ", prec, "SPU");
200 for_each_online_cpu(j)
201 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
202 seq_printf(p, " Spurious interrupts\n");
203
Anton Blanchard89713ed2010-01-31 20:34:06 +0000204 seq_printf(p, "%*s: ", prec, "CNT");
205 for_each_online_cpu(j)
206 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
207 seq_printf(p, " Performance monitoring interrupts\n");
208
209 seq_printf(p, "%*s: ", prec, "MCE");
210 for_each_online_cpu(j)
211 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
212 seq_printf(p, " Machine check exceptions\n");
213
Anton Blanchardc86845e2010-01-31 20:33:18 +0000214 return 0;
215}
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217int show_interrupts(struct seq_file *p, void *v)
218{
Anton Blanchardc86845e2010-01-31 20:33:18 +0000219 unsigned long flags, any_count = 0;
220 int i = *(loff_t *) v, j, prec;
Stephen Rothwell756e7102005-11-09 18:07:45 +1100221 struct irqaction *action;
Thomas Gleixner97f7d6b2009-03-10 14:45:54 +0000222 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Anton Blanchardc86845e2010-01-31 20:33:18 +0000224 if (i > nr_irqs)
Michael Ellerman750ab112009-10-13 19:45:00 +0000225 return 0;
Anton Blanchardc86845e2010-01-31 20:33:18 +0000226
227 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
228 j *= 10;
229
230 if (i == nr_irqs)
231 return show_other_interrupts(p, prec);
232
233 /* print header */
234 if (i == 0) {
235 seq_printf(p, "%*s", prec + 8, "");
236 for_each_online_cpu(j)
237 seq_printf(p, "CPU%-8d", j);
238 seq_putc(p, '\n');
Stephen Rothwell756e7102005-11-09 18:07:45 +1100239 }
Michael Ellerman750ab112009-10-13 19:45:00 +0000240
241 desc = irq_to_desc(i);
242 if (!desc)
243 return 0;
244
Thomas Gleixner239007b2009-11-17 16:46:45 +0100245 raw_spin_lock_irqsave(&desc->lock, flags);
Anton Blanchardc86845e2010-01-31 20:33:18 +0000246 for_each_online_cpu(j)
247 any_count |= kstat_irqs_cpu(i, j);
Michael Ellerman750ab112009-10-13 19:45:00 +0000248 action = desc->action;
Anton Blanchardc86845e2010-01-31 20:33:18 +0000249 if (!action && !any_count)
250 goto out;
Michael Ellerman750ab112009-10-13 19:45:00 +0000251
Anton Blanchardc86845e2010-01-31 20:33:18 +0000252 seq_printf(p, "%*d: ", prec, i);
Michael Ellerman750ab112009-10-13 19:45:00 +0000253 for_each_online_cpu(j)
254 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Michael Ellerman750ab112009-10-13 19:45:00 +0000255
256 if (desc->chip)
Anton Blanchardc86845e2010-01-31 20:33:18 +0000257 seq_printf(p, " %-16s", desc->chip->name);
Michael Ellerman750ab112009-10-13 19:45:00 +0000258 else
Anton Blanchardc86845e2010-01-31 20:33:18 +0000259 seq_printf(p, " %-16s", "None");
260 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
Michael Ellerman750ab112009-10-13 19:45:00 +0000261
Anton Blanchardc86845e2010-01-31 20:33:18 +0000262 if (action) {
263 seq_printf(p, " %s", action->name);
264 while ((action = action->next) != NULL)
265 seq_printf(p, ", %s", action->name);
266 }
Michael Ellerman750ab112009-10-13 19:45:00 +0000267
Michael Ellerman750ab112009-10-13 19:45:00 +0000268 seq_putc(p, '\n');
Anton Blanchardc86845e2010-01-31 20:33:18 +0000269out:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100270 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 return 0;
272}
273
Anton Blanchard89713ed2010-01-31 20:34:06 +0000274/*
275 * /proc/stat helpers
276 */
277u64 arch_irq_stat_cpu(unsigned int cpu)
278{
279 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
280
281 sum += per_cpu(irq_stat, cpu).pmu_irqs;
282 sum += per_cpu(irq_stat, cpu).mce_exceptions;
Anton Blanchard17081102010-01-31 20:34:36 +0000283 sum += per_cpu(irq_stat, cpu).spurious_irqs;
Anton Blanchard89713ed2010-01-31 20:34:06 +0000284
285 return sum;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#ifdef CONFIG_HOTPLUG_CPU
Anton Blanchardb6decb72010-04-26 15:32:35 +0000289void fixup_irqs(const struct cpumask *map)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000291 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 unsigned int irq;
293 static int warned;
Anton Blanchardb6decb72010-04-26 15:32:35 +0000294 cpumask_var_t mask;
295
296 alloc_cpumask_var(&mask, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298 for_each_irq(irq) {
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000299 desc = irq_to_desc(irq);
300 if (desc && desc->status & IRQ_PER_CPU)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 continue;
302
Anton Blanchardb6decb72010-04-26 15:32:35 +0000303 cpumask_and(mask, desc->affinity, map);
304 if (cpumask_any(mask) >= nr_cpu_ids) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 printk("Breaking affinity for irq %i\n", irq);
Anton Blanchardb6decb72010-04-26 15:32:35 +0000306 cpumask_copy(mask, map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000308 if (desc->chip->set_affinity)
Anton Blanchardb6decb72010-04-26 15:32:35 +0000309 desc->chip->set_affinity(irq, mask);
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000310 else if (desc->action && !(warned++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 printk("Cannot set affinity for irq %i\n", irq);
312 }
313
Anton Blanchardb6decb72010-04-26 15:32:35 +0000314 free_cpumask_var(mask);
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 local_irq_enable();
317 mdelay(1);
318 local_irq_disable();
319}
320#endif
321
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000322#ifdef CONFIG_IRQSTACKS
323static inline void handle_one_irq(unsigned int irq)
324{
325 struct thread_info *curtp, *irqtp;
326 unsigned long saved_sp_limit;
327 struct irq_desc *desc;
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000328
329 /* Switch to the irq stack to handle this */
330 curtp = current_thread_info();
331 irqtp = hardirq_ctx[smp_processor_id()];
332
333 if (curtp == irqtp) {
334 /* We're already on the irq stack, just handle it */
335 generic_handle_irq(irq);
336 return;
337 }
338
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000339 desc = irq_to_desc(irq);
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000340 saved_sp_limit = current->thread.ksp_limit;
341
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000342 irqtp->task = curtp->task;
343 irqtp->flags = 0;
344
345 /* Copy the softirq bits in preempt_count so that the
346 * softirq checks work in the hardirq context. */
347 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
348 (curtp->preempt_count & SOFTIRQ_MASK);
349
350 current->thread.ksp_limit = (unsigned long)irqtp +
351 _ALIGN_UP(sizeof(struct thread_info), 16);
352
Michael Ellerman835363e2009-04-22 15:31:43 +0000353 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000354 current->thread.ksp_limit = saved_sp_limit;
355 irqtp->task = NULL;
356
357 /* Set any flag that may have been set on the
358 * alternate stack
359 */
360 if (irqtp->flags)
361 set_bits(irqtp->flags, &curtp->flags);
362}
363#else
364static inline void handle_one_irq(unsigned int irq)
365{
366 generic_handle_irq(irq);
367}
368#endif
369
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000370static inline void check_stack_overflow(void)
371{
372#ifdef CONFIG_DEBUG_STACKOVERFLOW
373 long sp;
374
375 sp = __get_SP() & (THREAD_SIZE-1);
376
377 /* check for stack overflow: is there less than 2KB free? */
378 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
379 printk("do_IRQ: stack overflow: %ld\n",
380 sp - sizeof(struct thread_info));
381 dump_stack();
382 }
383#endif
384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386void do_IRQ(struct pt_regs *regs)
387{
David Howells7d12e782006-10-05 14:55:46 +0100388 struct pt_regs *old_regs = set_irq_regs(regs);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000389 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Anton Blanchard1bf4af12009-10-26 18:47:42 +0000391 trace_irq_entry(regs);
392
Scott Wood4b218e92007-08-21 02:36:19 +1000393 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000395 check_stack_overflow();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Olaf Hering35a84c22006-10-07 22:08:26 +1000397 irq = ppc_md.get_irq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000399 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
400 handle_one_irq(irq);
401 else if (irq != NO_IRQ_IGNORE)
Anton Blanchard17081102010-01-31 20:34:36 +0000402 __get_cpu_var(irq_stat).spurious_irqs++;
Stephen Rothwell756e7102005-11-09 18:07:45 +1100403
Scott Wood4b218e92007-08-21 02:36:19 +1000404 irq_exit();
David Howells7d12e782006-10-05 14:55:46 +0100405 set_irq_regs(old_regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100406
407#ifdef CONFIG_PPC_ISERIES
Stephen Rothwellb06a3182006-11-21 14:16:13 +1100408 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
409 get_lppaca()->int_dword.fields.decr_int) {
David Gibson3356bb92006-01-13 10:26:42 +1100410 get_lppaca()->int_dword.fields.decr_int = 0;
411 /* Signal a fake decrementer interrupt */
412 timer_interrupt(regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100413 }
414#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +0000415
416 trace_irq_exit(regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100417}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419void __init init_IRQ(void)
420{
Sonny Rao70584572007-07-10 03:31:44 +1000421 if (ppc_md.init_IRQ)
422 ppc_md.init_IRQ();
Kumar Galabcf0b082008-04-30 03:49:55 -0500423
424 exc_lvl_ctx_init();
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 irq_ctx_init();
427}
428
Kumar Galabcf0b082008-04-30 03:49:55 -0500429#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
430struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
431struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
432struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
433
434void exc_lvl_ctx_init(void)
435{
436 struct thread_info *tp;
437 int i;
438
439 for_each_possible_cpu(i) {
440 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
441 tp = critirq_ctx[i];
442 tp->cpu = i;
443 tp->preempt_count = 0;
444
445#ifdef CONFIG_BOOKE
446 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
447 tp = dbgirq_ctx[i];
448 tp->cpu = i;
449 tp->preempt_count = 0;
450
451 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
452 tp = mcheckirq_ctx[i];
453 tp->cpu = i;
454 tp->preempt_count = HARDIRQ_OFFSET;
455#endif
456 }
457}
458#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460#ifdef CONFIG_IRQSTACKS
Andreas Mohr22722052006-06-23 02:05:30 -0700461struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
462struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464void irq_ctx_init(void)
465{
466 struct thread_info *tp;
467 int i;
468
KAMEZAWA Hiroyuki0e551952006-03-28 14:50:51 -0800469 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
471 tp = softirq_ctx[i];
472 tp->cpu = i;
Benjamin Herrenschmidte6768a42008-04-09 17:21:28 +1000473 tp->preempt_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
476 tp = hardirq_ctx[i];
477 tp->cpu = i;
478 tp->preempt_count = HARDIRQ_OFFSET;
479 }
480}
481
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100482static inline void do_softirq_onstack(void)
483{
484 struct thread_info *curtp, *irqtp;
Kumar Gala85218822008-04-28 16:21:22 +1000485 unsigned long saved_sp_limit = current->thread.ksp_limit;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100486
487 curtp = current_thread_info();
488 irqtp = softirq_ctx[smp_processor_id()];
489 irqtp->task = curtp->task;
Kumar Gala85218822008-04-28 16:21:22 +1000490 current->thread.ksp_limit = (unsigned long)irqtp +
491 _ALIGN_UP(sizeof(struct thread_info), 16);
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100492 call_do_softirq(irqtp);
Kumar Gala85218822008-04-28 16:21:22 +1000493 current->thread.ksp_limit = saved_sp_limit;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100494 irqtp->task = NULL;
495}
496
497#else
498#define do_softirq_onstack() __do_softirq()
499#endif /* CONFIG_IRQSTACKS */
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501void do_softirq(void)
502{
503 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 if (in_interrupt())
506 return;
507
508 local_irq_save(flags);
509
Paul Mackerras829035fd2006-07-03 00:25:40 -0700510 if (local_softirq_pending())
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100511 do_softirq_onstack();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 local_irq_restore(flags);
514}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000516
517/*
518 * IRQ controller and virtual interrupts
519 */
520
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000521static LIST_HEAD(irq_hosts);
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000522static DEFINE_RAW_SPINLOCK(irq_big_lock);
Sebastien Dugue967e0122008-09-04 22:37:07 +1000523static unsigned int revmap_trees_allocated;
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000524static DEFINE_MUTEX(revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000525struct irq_map_entry irq_map[NR_IRQS];
526static unsigned int irq_virq_count = NR_IRQS;
527static struct irq_host *irq_default_host;
528
Olof Johansson35923f122007-06-04 14:47:04 +1000529irq_hw_number_t virq_to_hw(unsigned int virq)
530{
531 return irq_map[virq].hwirq;
532}
533EXPORT_SYMBOL_GPL(virq_to_hw);
534
Michael Ellerman68158002007-08-28 18:47:55 +1000535static int default_irq_host_match(struct irq_host *h, struct device_node *np)
536{
537 return h->of_node != NULL && h->of_node == np;
538}
539
Stephen Rothwell5669c3c2007-10-02 13:37:53 +1000540struct irq_host *irq_alloc_host(struct device_node *of_node,
Michael Ellerman52964f82007-08-28 18:47:54 +1000541 unsigned int revmap_type,
542 unsigned int revmap_arg,
543 struct irq_host_ops *ops,
544 irq_hw_number_t inval_irq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000545{
546 struct irq_host *host;
547 unsigned int size = sizeof(struct irq_host);
548 unsigned int i;
549 unsigned int *rmap;
550 unsigned long flags;
551
552 /* Allocate structure and revmap table if using linear mapping */
553 if (revmap_type == IRQ_HOST_MAP_LINEAR)
554 size += revmap_arg * sizeof(unsigned int);
Stephen Rothwell5669c3c2007-10-02 13:37:53 +1000555 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000556 if (host == NULL)
557 return NULL;
558
559 /* Fill structure */
560 host->revmap_type = revmap_type;
561 host->inval_irq = inval_irq;
562 host->ops = ops;
Michael Ellerman19fc65b2008-05-26 12:12:32 +1000563 host->of_node = of_node_get(of_node);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000564
Michael Ellerman68158002007-08-28 18:47:55 +1000565 if (host->ops->match == NULL)
566 host->ops->match = default_irq_host_match;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000567
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000568 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000569
570 /* If it's a legacy controller, check for duplicates and
571 * mark it as allocated (we use irq 0 host pointer for that
572 */
573 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
574 if (irq_map[0].host != NULL) {
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000575 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000576 /* If we are early boot, we can't free the structure,
577 * too bad...
578 * this will be fixed once slab is made available early
579 * instead of the current cruft
580 */
581 if (mem_init_done)
582 kfree(host);
583 return NULL;
584 }
585 irq_map[0].host = host;
586 }
587
588 list_add(&host->link, &irq_hosts);
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000589 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000590
591 /* Additional setups per revmap type */
592 switch(revmap_type) {
593 case IRQ_HOST_MAP_LEGACY:
594 /* 0 is always the invalid number for legacy */
595 host->inval_irq = 0;
596 /* setup us as the host for all legacy interrupts */
597 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
Michael Ellerman78662912007-08-28 18:47:56 +1000598 irq_map[i].hwirq = i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000599 smp_wmb();
600 irq_map[i].host = host;
601 smp_wmb();
602
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700603 /* Clear norequest flags */
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000604 irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000605
606 /* Legacy flags are left to default at this point,
607 * one can then use irq_create_mapping() to
Jean Delvarec03983a2007-10-19 23:22:55 +0200608 * explicitly change them
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000609 */
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700610 ops->map(host, i, i);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000611 }
612 break;
613 case IRQ_HOST_MAP_LINEAR:
614 rmap = (unsigned int *)(host + 1);
615 for (i = 0; i < revmap_arg; i++)
Michael Ellermanf5921692007-06-01 17:23:26 +1000616 rmap[i] = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000617 host->revmap_data.linear.size = revmap_arg;
618 smp_wmb();
619 host->revmap_data.linear.revmap = rmap;
620 break;
621 default:
622 break;
623 }
624
625 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
626
627 return host;
628}
629
630struct irq_host *irq_find_host(struct device_node *node)
631{
632 struct irq_host *h, *found = NULL;
633 unsigned long flags;
634
635 /* We might want to match the legacy controller last since
636 * it might potentially be set to match all interrupts in
637 * the absence of a device node. This isn't a problem so far
638 * yet though...
639 */
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000640 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000641 list_for_each_entry(h, &irq_hosts, link)
Michael Ellerman68158002007-08-28 18:47:55 +1000642 if (h->ops->match(h, node)) {
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000643 found = h;
644 break;
645 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000646 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000647 return found;
648}
649EXPORT_SYMBOL_GPL(irq_find_host);
650
651void irq_set_default_host(struct irq_host *host)
652{
653 pr_debug("irq: Default host set to @0x%p\n", host);
654
655 irq_default_host = host;
656}
657
658void irq_set_virq_count(unsigned int count)
659{
660 pr_debug("irq: Trying to set virq count to %d\n", count);
661
662 BUG_ON(count < NUM_ISA_INTERRUPTS);
663 if (count < NR_IRQS)
664 irq_virq_count = count;
665}
666
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000667static int irq_setup_virq(struct irq_host *host, unsigned int virq,
668 irq_hw_number_t hwirq)
669{
Michael Ellermancd015702009-10-13 19:45:03 +0000670 struct irq_desc *desc;
671
672 desc = irq_to_desc_alloc_node(virq, 0);
673 if (!desc) {
674 pr_debug("irq: -> allocating desc failed\n");
675 goto error;
676 }
677
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000678 /* Clear IRQ_NOREQUEST flag */
Michael Ellermancd015702009-10-13 19:45:03 +0000679 desc->status &= ~IRQ_NOREQUEST;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000680
681 /* map it */
682 smp_wmb();
683 irq_map[virq].hwirq = hwirq;
684 smp_mb();
685
686 if (host->ops->map(host, virq, hwirq)) {
687 pr_debug("irq: -> mapping failed, freeing\n");
Michael Ellermancd015702009-10-13 19:45:03 +0000688 goto error;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000689 }
690
691 return 0;
Michael Ellermancd015702009-10-13 19:45:03 +0000692
693error:
694 irq_free_virt(virq, 1);
695 return -1;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000696}
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000697
Michael Ellermanee51de52007-06-04 23:00:00 +1000698unsigned int irq_create_direct_mapping(struct irq_host *host)
699{
700 unsigned int virq;
701
702 if (host == NULL)
703 host = irq_default_host;
704
705 BUG_ON(host == NULL);
706 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
707
708 virq = irq_alloc_virt(host, 1, 0);
709 if (virq == NO_IRQ) {
710 pr_debug("irq: create_direct virq allocation failed\n");
711 return NO_IRQ;
712 }
713
714 pr_debug("irq: create_direct obtained virq %d\n", virq);
715
716 if (irq_setup_virq(host, virq, virq))
717 return NO_IRQ;
718
719 return virq;
720}
721
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000722unsigned int irq_create_mapping(struct irq_host *host,
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700723 irq_hw_number_t hwirq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000724{
725 unsigned int virq, hint;
726
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700727 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000728
729 /* Look for default host if nececssary */
730 if (host == NULL)
731 host = irq_default_host;
732 if (host == NULL) {
733 printk(KERN_WARNING "irq_create_mapping called for"
734 " NULL host, hwirq=%lx\n", hwirq);
735 WARN_ON(1);
736 return NO_IRQ;
737 }
738 pr_debug("irq: -> using host @%p\n", host);
739
740 /* Check if mapping already exist, if it does, call
741 * host->ops->map() to update the flags
742 */
743 virq = irq_find_mapping(host, hwirq);
Michael Ellermanf5921692007-06-01 17:23:26 +1000744 if (virq != NO_IRQ) {
Ishizaki Kouacc900e2007-01-12 09:58:39 +0900745 if (host->ops->remap)
746 host->ops->remap(host, virq, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000747 pr_debug("irq: -> existing mapping on virq %d\n", virq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000748 return virq;
749 }
750
751 /* Get a virtual interrupt number */
752 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
753 /* Handle legacy */
754 virq = (unsigned int)hwirq;
755 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
756 return NO_IRQ;
757 return virq;
758 } else {
759 /* Allocate a virtual interrupt number */
760 hint = hwirq % irq_virq_count;
761 virq = irq_alloc_virt(host, 1, hint);
762 if (virq == NO_IRQ) {
763 pr_debug("irq: -> virq allocation failed\n");
764 return NO_IRQ;
765 }
766 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000767
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000768 if (irq_setup_virq(host, virq, hwirq))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000769 return NO_IRQ;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000770
Michael Ellermanc7d07fd2009-04-05 16:05:02 +0000771 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
772 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
773
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000774 return virq;
775}
776EXPORT_SYMBOL_GPL(irq_create_mapping);
777
Al Virof3d2ab42006-10-09 16:22:09 +0100778unsigned int irq_create_of_mapping(struct device_node *controller,
Roman Fietze40d50cf2009-12-08 02:39:50 +0000779 const u32 *intspec, unsigned int intsize)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000780{
781 struct irq_host *host;
782 irq_hw_number_t hwirq;
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700783 unsigned int type = IRQ_TYPE_NONE;
784 unsigned int virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000785
786 if (controller == NULL)
787 host = irq_default_host;
788 else
789 host = irq_find_host(controller);
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700790 if (host == NULL) {
791 printk(KERN_WARNING "irq: no irq host found for %s !\n",
792 controller->full_name);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000793 return NO_IRQ;
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700794 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000795
796 /* If host has no translation, then we assume interrupt line */
797 if (host->ops->xlate == NULL)
798 hwirq = intspec[0];
799 else {
800 if (host->ops->xlate(host, controller, intspec, intsize,
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700801 &hwirq, &type))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000802 return NO_IRQ;
803 }
804
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700805 /* Create mapping */
806 virq = irq_create_mapping(host, hwirq);
807 if (virq == NO_IRQ)
808 return virq;
809
810 /* Set type if specified and different than the current one */
811 if (type != IRQ_TYPE_NONE &&
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000812 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700813 set_irq_type(virq, type);
814 return virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000815}
816EXPORT_SYMBOL_GPL(irq_create_of_mapping);
817
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000818void irq_dispose_mapping(unsigned int virq)
819{
Michael Ellerman5414c6b2006-10-24 13:37:34 +1000820 struct irq_host *host;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000821 irq_hw_number_t hwirq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000822
Michael Ellerman5414c6b2006-10-24 13:37:34 +1000823 if (virq == NO_IRQ)
824 return;
825
826 host = irq_map[virq].host;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000827 WARN_ON (host == NULL);
828 if (host == NULL)
829 return;
830
831 /* Never unmap legacy interrupts */
832 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
833 return;
834
835 /* remove chip and handler */
836 set_irq_chip_and_handler(virq, NULL, NULL);
837
838 /* Make sure it's completed */
839 synchronize_irq(virq);
840
841 /* Tell the PIC about it */
842 if (host->ops->unmap)
843 host->ops->unmap(host, virq);
844 smp_mb();
845
846 /* Clear reverse map */
847 hwirq = irq_map[virq].hwirq;
848 switch(host->revmap_type) {
849 case IRQ_HOST_MAP_LINEAR:
850 if (hwirq < host->revmap_data.linear.size)
Michael Ellermanf5921692007-06-01 17:23:26 +1000851 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000852 break;
853 case IRQ_HOST_MAP_TREE:
Sebastien Dugue967e0122008-09-04 22:37:07 +1000854 /*
855 * Check if radix tree allocated yet, if not then nothing to
856 * remove.
857 */
858 smp_rmb();
859 if (revmap_trees_allocated < 1)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000860 break;
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000861 mutex_lock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000862 radix_tree_delete(&host->revmap_data.tree, hwirq);
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000863 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000864 break;
865 }
866
867 /* Destroy map */
868 smp_mb();
869 irq_map[virq].hwirq = host->inval_irq;
870
871 /* Set some flags */
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000872 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000873
874 /* Free it */
875 irq_free_virt(virq, 1);
876}
877EXPORT_SYMBOL_GPL(irq_dispose_mapping);
878
879unsigned int irq_find_mapping(struct irq_host *host,
880 irq_hw_number_t hwirq)
881{
882 unsigned int i;
883 unsigned int hint = hwirq % irq_virq_count;
884
885 /* Look for default host if nececssary */
886 if (host == NULL)
887 host = irq_default_host;
888 if (host == NULL)
889 return NO_IRQ;
890
891 /* legacy -> bail early */
892 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
893 return hwirq;
894
895 /* Slow path does a linear search of the map */
896 if (hint < NUM_ISA_INTERRUPTS)
897 hint = NUM_ISA_INTERRUPTS;
898 i = hint;
899 do {
900 if (irq_map[i].host == host &&
901 irq_map[i].hwirq == hwirq)
902 return i;
903 i++;
904 if (i >= irq_virq_count)
905 i = NUM_ISA_INTERRUPTS;
906 } while(i != hint);
907 return NO_IRQ;
908}
909EXPORT_SYMBOL_GPL(irq_find_mapping);
910
911
Sebastien Dugue967e0122008-09-04 22:37:07 +1000912unsigned int irq_radix_revmap_lookup(struct irq_host *host,
913 irq_hw_number_t hwirq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000914{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000915 struct irq_map_entry *ptr;
916 unsigned int virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000917
918 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
919
Sebastien Dugue967e0122008-09-04 22:37:07 +1000920 /*
921 * Check if the radix tree exists and has bee initialized.
922 * If not, we fallback to slow mode
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000923 */
Sebastien Dugue967e0122008-09-04 22:37:07 +1000924 if (revmap_trees_allocated < 2)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000925 return irq_find_mapping(host, hwirq);
926
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000927 /* Now try to resolve */
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000928 /*
929 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
930 * as it's referencing an entry in the static irq_map table.
931 */
Sebastien Dugue967e0122008-09-04 22:37:07 +1000932 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000933
Sebastien Dugue967e0122008-09-04 22:37:07 +1000934 /*
935 * If found in radix tree, then fine.
936 * Else fallback to linear lookup - this should not happen in practice
937 * as it means that we failed to insert the node in the radix tree.
938 */
939 if (ptr)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000940 virq = ptr - irq_map;
Sebastien Dugue967e0122008-09-04 22:37:07 +1000941 else
942 virq = irq_find_mapping(host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000943
Sebastien Dugue967e0122008-09-04 22:37:07 +1000944 return virq;
945}
946
947void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
948 irq_hw_number_t hwirq)
949{
Sebastien Dugue967e0122008-09-04 22:37:07 +1000950
951 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
952
953 /*
954 * Check if the radix tree exists yet.
955 * If not, then the irq will be inserted into the tree when it gets
956 * initialized.
957 */
958 smp_rmb();
959 if (revmap_trees_allocated < 1)
960 return;
961
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000962 if (virq != NO_IRQ) {
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000963 mutex_lock(&revmap_trees_mutex);
Sebastien Dugue967e0122008-09-04 22:37:07 +1000964 radix_tree_insert(&host->revmap_data.tree, hwirq,
965 &irq_map[virq]);
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000966 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000967 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000968}
969
970unsigned int irq_linear_revmap(struct irq_host *host,
971 irq_hw_number_t hwirq)
972{
973 unsigned int *revmap;
974
975 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
976
977 /* Check revmap bounds */
978 if (unlikely(hwirq >= host->revmap_data.linear.size))
979 return irq_find_mapping(host, hwirq);
980
981 /* Check if revmap was allocated */
982 revmap = host->revmap_data.linear.revmap;
983 if (unlikely(revmap == NULL))
984 return irq_find_mapping(host, hwirq);
985
986 /* Fill up revmap with slow path if no mapping found */
987 if (unlikely(revmap[hwirq] == NO_IRQ))
988 revmap[hwirq] = irq_find_mapping(host, hwirq);
989
990 return revmap[hwirq];
991}
992
993unsigned int irq_alloc_virt(struct irq_host *host,
994 unsigned int count,
995 unsigned int hint)
996{
997 unsigned long flags;
998 unsigned int i, j, found = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000999
1000 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1001 return NO_IRQ;
1002
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001003 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001004
1005 /* Use hint for 1 interrupt if any */
1006 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1007 hint < irq_virq_count && irq_map[hint].host == NULL) {
1008 found = hint;
1009 goto hint_found;
1010 }
1011
1012 /* Look for count consecutive numbers in the allocatable
1013 * (non-legacy) space
1014 */
Michael Ellermane1251462006-08-02 10:48:50 +10001015 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1016 if (irq_map[i].host != NULL)
1017 j = 0;
1018 else
1019 j++;
1020
1021 if (j == count) {
1022 found = i - count + 1;
1023 break;
1024 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001025 }
1026 if (found == NO_IRQ) {
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001027 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001028 return NO_IRQ;
1029 }
1030 hint_found:
1031 for (i = found; i < (found + count); i++) {
1032 irq_map[i].hwirq = host->inval_irq;
1033 smp_wmb();
1034 irq_map[i].host = host;
1035 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001036 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001037 return found;
1038}
1039
1040void irq_free_virt(unsigned int virq, unsigned int count)
1041{
1042 unsigned long flags;
1043 unsigned int i;
1044
1045 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1046 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1047
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001048 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001049 for (i = virq; i < (virq + count); i++) {
1050 struct irq_host *host;
1051
1052 if (i < NUM_ISA_INTERRUPTS ||
1053 (virq + count) > irq_virq_count)
1054 continue;
1055
1056 host = irq_map[i].host;
1057 irq_map[i].hwirq = host->inval_irq;
1058 smp_wmb();
1059 irq_map[i].host = NULL;
1060 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001061 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001062}
1063
Michael Ellermancd015702009-10-13 19:45:03 +00001064int arch_early_irq_init(void)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001065{
Michael Ellermancd015702009-10-13 19:45:03 +00001066 struct irq_desc *desc;
1067 int i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001068
Michael Ellermancd015702009-10-13 19:45:03 +00001069 for (i = 0; i < NR_IRQS; i++) {
1070 desc = irq_to_desc(i);
1071 if (desc)
1072 desc->status |= IRQ_NOREQUEST;
1073 }
1074
1075 return 0;
1076}
1077
1078int arch_init_chip_data(struct irq_desc *desc, int node)
1079{
1080 desc->status |= IRQ_NOREQUEST;
1081 return 0;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001082}
1083
1084/* We need to create the radix trees late */
1085static int irq_late_init(void)
1086{
1087 struct irq_host *h;
Sebastien Dugue967e0122008-09-04 22:37:07 +10001088 unsigned int i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001089
Sebastien Dugue967e0122008-09-04 22:37:07 +10001090 /*
1091 * No mutual exclusion with respect to accessors of the tree is needed
1092 * here as the synchronization is done via the state variable
1093 * revmap_trees_allocated.
1094 */
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001095 list_for_each_entry(h, &irq_hosts, link) {
1096 if (h->revmap_type == IRQ_HOST_MAP_TREE)
Sebastien Dugue967e0122008-09-04 22:37:07 +10001097 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1098 }
1099
1100 /*
1101 * Make sure the radix trees inits are visible before setting
1102 * the flag
1103 */
1104 smp_wmb();
1105 revmap_trees_allocated = 1;
1106
1107 /*
1108 * Insert the reverse mapping for those interrupts already present
1109 * in irq_map[].
1110 */
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001111 mutex_lock(&revmap_trees_mutex);
Sebastien Dugue967e0122008-09-04 22:37:07 +10001112 for (i = 0; i < irq_virq_count; i++) {
1113 if (irq_map[i].host &&
1114 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1115 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1116 irq_map[i].hwirq, &irq_map[i]);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001117 }
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001118 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001119
Sebastien Dugue967e0122008-09-04 22:37:07 +10001120 /*
1121 * Make sure the radix trees insertions are visible before setting
1122 * the flag
1123 */
1124 smp_wmb();
1125 revmap_trees_allocated = 2;
1126
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001127 return 0;
1128}
1129arch_initcall(irq_late_init);
1130
Michael Ellerman60b332e2007-08-28 18:47:57 +10001131#ifdef CONFIG_VIRQ_DEBUG
1132static int virq_debug_show(struct seq_file *m, void *private)
1133{
1134 unsigned long flags;
Thomas Gleixner97f7d6b2009-03-10 14:45:54 +00001135 struct irq_desc *desc;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001136 const char *p;
1137 char none[] = "none";
1138 int i;
1139
1140 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1141 "chip name", "host name");
1142
Michael Ellerman76f1d942009-10-13 19:44:56 +00001143 for (i = 1; i < nr_irqs; i++) {
Michael Ellerman6cff46f2009-10-13 19:44:51 +00001144 desc = irq_to_desc(i);
Michael Ellerman76f1d942009-10-13 19:44:56 +00001145 if (!desc)
1146 continue;
1147
Thomas Gleixner239007b2009-11-17 16:46:45 +01001148 raw_spin_lock_irqsave(&desc->lock, flags);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001149
1150 if (desc->action && desc->action->handler) {
1151 seq_printf(m, "%5d ", i);
1152 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1153
Thomas Gleixnerb27df672009-11-18 23:44:21 +00001154 if (desc->chip && desc->chip->name)
1155 p = desc->chip->name;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001156 else
1157 p = none;
1158 seq_printf(m, "%-15s ", p);
1159
1160 if (irq_map[i].host && irq_map[i].host->of_node)
1161 p = irq_map[i].host->of_node->full_name;
1162 else
1163 p = none;
1164 seq_printf(m, "%s\n", p);
1165 }
1166
Thomas Gleixner239007b2009-11-17 16:46:45 +01001167 raw_spin_unlock_irqrestore(&desc->lock, flags);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001168 }
1169
1170 return 0;
1171}
1172
1173static int virq_debug_open(struct inode *inode, struct file *file)
1174{
1175 return single_open(file, virq_debug_show, inode->i_private);
1176}
1177
1178static const struct file_operations virq_debug_fops = {
1179 .open = virq_debug_open,
1180 .read = seq_read,
1181 .llseek = seq_lseek,
1182 .release = single_release,
1183};
1184
1185static int __init irq_debugfs_init(void)
1186{
1187 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
Emil Medve476ff8a2008-05-23 05:49:22 +10001188 NULL, &virq_debug_fops) == NULL)
Michael Ellerman60b332e2007-08-28 18:47:57 +10001189 return -ENOMEM;
1190
1191 return 0;
1192}
1193__initcall(irq_debugfs_init);
1194#endif /* CONFIG_VIRQ_DEBUG */
1195
Paul Mackerrasc6622f62006-02-24 10:06:59 +11001196#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197static int __init setup_noirqdistrib(char *str)
1198{
1199 distribute_irqs = 0;
1200 return 1;
1201}
1202
1203__setup("noirqdistrib", setup_noirqdistrib);
Stephen Rothwell756e7102005-11-09 18:07:45 +11001204#endif /* CONFIG_PPC64 */