blob: 250ee2ebf288462068d4631607fb50f3f51c242e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
Stephen Rothwell756e7102005-11-09 18:07:45 +11006 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
Stephen Rothwell756e7102005-11-09 18:07:45 +110010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
Stephen Rothwell756e7102005-11-09 18:07:45 +110021 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100031#undef DEBUG
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/module.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110038#include <linux/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/init.h>
43#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/delay.h>
45#include <linux/irq.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110046#include <linux/seq_file.h>
47#include <linux/cpumask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/profile.h>
49#include <linux/bitops.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100050#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
Jake Moilanen45934c42006-07-27 13:17:25 -050054#include <linux/pci.h>
Michael Ellerman60b332e2007-08-28 18:47:57 +100055#include <linux/debugfs.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58#include <asm/uaccess.h>
59#include <asm/system.h>
60#include <asm/io.h>
61#include <asm/pgtable.h>
62#include <asm/irq.h>
63#include <asm/cache.h>
64#include <asm/prom.h>
65#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <asm/machdep.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100067#include <asm/udbg.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100068#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <asm/paca.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100070#include <asm/firmware.h>
Takao Shinohara0874dd42007-05-01 07:01:07 +100071#include <asm/lv1call.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +000073#define CREATE_TRACE_POINTS
74#include <asm/trace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Anton Blanchard8c007bf2010-01-31 20:30:23 +000076DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77EXPORT_PER_CPU_SYMBOL(irq_stat);
78
Stephen Rothwell868accb2005-11-10 18:38:46 +110079int __irq_offset_value;
Stephen Rothwell756e7102005-11-09 18:07:45 +110080
Stephen Rothwell756e7102005-11-09 18:07:45 +110081#ifdef CONFIG_PPC32
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100082EXPORT_SYMBOL(__irq_offset_value);
Stephen Rothwell756e7102005-11-09 18:07:45 +110083atomic_t ppc_n_lost_interrupts;
84
85#ifdef CONFIG_TAU_INT
86extern int tau_initialized;
87extern int tau_interrupts(int);
88#endif
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100089#endif /* CONFIG_PPC32 */
Stephen Rothwell756e7102005-11-09 18:07:45 +110090
Stephen Rothwell756e7102005-11-09 18:07:45 +110091#ifdef CONFIG_PPC64
Michael Ellermancd015702009-10-13 19:45:03 +000092
93#ifndef CONFIG_SPARSE_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -070094EXPORT_SYMBOL(irq_desc);
Michael Ellermancd015702009-10-13 19:45:03 +000095#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97int distribute_irqs = 1;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100098
Steven Rostedt4e491d12008-05-14 23:49:44 -040099static inline notrace unsigned long get_hard_enabled(void)
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000100{
101 unsigned long enabled;
102
103 __asm__ __volatile__("lbz %0,%1(13)"
104 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
105
106 return enabled;
107}
108
Steven Rostedt4e491d12008-05-14 23:49:44 -0400109static inline notrace void set_soft_enabled(unsigned long enable)
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000110{
111 __asm__ __volatile__("stb %0,%1(13)"
112 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
113}
114
Steven Rostedt4e491d12008-05-14 23:49:44 -0400115notrace void raw_local_irq_restore(unsigned long en)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000116{
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000117 /*
118 * get_paca()->soft_enabled = en;
119 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
120 * That was allowed before, and in such a case we do need to take care
121 * that gcc will set soft_enabled directly via r13, not choose to use
122 * an intermediate register, lest we're preempted to a different cpu.
123 */
124 set_soft_enabled(en);
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000125 if (!en)
126 return;
127
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000128#ifdef CONFIG_PPC_STD_MMU_64
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000129 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000130 /*
131 * Do we need to disable preemption here? Not really: in the
132 * unlikely event that we're preempted to a different cpu in
133 * between getting r13, loading its lppaca_ptr, and loading
134 * its any_int, we might call iseries_handle_interrupts without
135 * an interrupt pending on the new cpu, but that's no disaster,
136 * is it? And the business of preempting us off the old cpu
137 * would itself involve a local_irq_restore which handles the
138 * interrupt to that cpu.
139 *
140 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
141 * to avoid any preemption checking added into get_paca().
142 */
143 if (local_paca->lppaca_ptr->int_dword.any_int)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000144 iseries_handle_interrupts();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000145 }
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000146#endif /* CONFIG_PPC_STD_MMU_64 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000147
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200148 if (test_perf_event_pending()) {
149 clear_perf_event_pending();
150 perf_event_do_pending();
Paul Mackerrasb6c5a71d2009-03-16 21:00:00 +1100151 }
Paul Mackerras93a6d3c2009-01-09 16:52:19 +1100152
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000153 /*
154 * if (get_paca()->hard_enabled) return;
155 * But again we need to take care that gcc gets hard_enabled directly
156 * via r13, not choose to use an intermediate register, lest we're
157 * preempted to a different cpu in between the two instructions.
158 */
159 if (get_hard_enabled())
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000160 return;
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000161
162 /*
163 * Need to hard-enable interrupts here. Since currently disabled,
164 * no need to take further asm precautions against preemption; but
165 * use local_paca instead of get_paca() to avoid preemption checking.
166 */
167 local_paca->hard_enabled = en;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000168 if ((int)mfspr(SPRN_DEC) < 0)
169 mtspr(SPRN_DEC, 1);
Takao Shinohara0874dd42007-05-01 07:01:07 +1000170
171 /*
172 * Force the delivery of pending soft-disabled interrupts on PS3.
173 * Any HV call will have this side effect.
174 */
175 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
176 u64 tmp;
177 lv1_get_version_info(&tmp);
178 }
179
Benjamin Herrenschmidte1fa2e12007-05-10 22:22:45 -0700180 __hard_irq_enable();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000181}
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000182EXPORT_SYMBOL(raw_local_irq_restore);
Stephen Rothwell756e7102005-11-09 18:07:45 +1100183#endif /* CONFIG_PPC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Anton Blanchardc86845e2010-01-31 20:33:18 +0000185static int show_other_interrupts(struct seq_file *p, int prec)
186{
187 int j;
188
189#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
190 if (tau_initialized) {
191 seq_printf(p, "%*s: ", prec, "TAU");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", tau_interrupts(j));
194 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
195 }
196#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
197
Anton Blanchard89713ed2010-01-31 20:34:06 +0000198 seq_printf(p, "%*s: ", prec, "LOC");
199 for_each_online_cpu(j)
200 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
201 seq_printf(p, " Local timer interrupts\n");
202
Anton Blanchard17081102010-01-31 20:34:36 +0000203 seq_printf(p, "%*s: ", prec, "SPU");
204 for_each_online_cpu(j)
205 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
206 seq_printf(p, " Spurious interrupts\n");
207
Anton Blanchard89713ed2010-01-31 20:34:06 +0000208 seq_printf(p, "%*s: ", prec, "CNT");
209 for_each_online_cpu(j)
210 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
211 seq_printf(p, " Performance monitoring interrupts\n");
212
213 seq_printf(p, "%*s: ", prec, "MCE");
214 for_each_online_cpu(j)
215 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
216 seq_printf(p, " Machine check exceptions\n");
217
Anton Blanchardc86845e2010-01-31 20:33:18 +0000218 return 0;
219}
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221int show_interrupts(struct seq_file *p, void *v)
222{
Anton Blanchardc86845e2010-01-31 20:33:18 +0000223 unsigned long flags, any_count = 0;
224 int i = *(loff_t *) v, j, prec;
Stephen Rothwell756e7102005-11-09 18:07:45 +1100225 struct irqaction *action;
Thomas Gleixner97f7d6b2009-03-10 14:45:54 +0000226 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Anton Blanchardc86845e2010-01-31 20:33:18 +0000228 if (i > nr_irqs)
Michael Ellerman750ab112009-10-13 19:45:00 +0000229 return 0;
Anton Blanchardc86845e2010-01-31 20:33:18 +0000230
231 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
232 j *= 10;
233
234 if (i == nr_irqs)
235 return show_other_interrupts(p, prec);
236
237 /* print header */
238 if (i == 0) {
239 seq_printf(p, "%*s", prec + 8, "");
240 for_each_online_cpu(j)
241 seq_printf(p, "CPU%-8d", j);
242 seq_putc(p, '\n');
Stephen Rothwell756e7102005-11-09 18:07:45 +1100243 }
Michael Ellerman750ab112009-10-13 19:45:00 +0000244
245 desc = irq_to_desc(i);
246 if (!desc)
247 return 0;
248
Thomas Gleixner239007b2009-11-17 16:46:45 +0100249 raw_spin_lock_irqsave(&desc->lock, flags);
Anton Blanchardc86845e2010-01-31 20:33:18 +0000250 for_each_online_cpu(j)
251 any_count |= kstat_irqs_cpu(i, j);
Michael Ellerman750ab112009-10-13 19:45:00 +0000252 action = desc->action;
Anton Blanchardc86845e2010-01-31 20:33:18 +0000253 if (!action && !any_count)
254 goto out;
Michael Ellerman750ab112009-10-13 19:45:00 +0000255
Anton Blanchardc86845e2010-01-31 20:33:18 +0000256 seq_printf(p, "%*d: ", prec, i);
Michael Ellerman750ab112009-10-13 19:45:00 +0000257 for_each_online_cpu(j)
258 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Michael Ellerman750ab112009-10-13 19:45:00 +0000259
260 if (desc->chip)
Anton Blanchardc86845e2010-01-31 20:33:18 +0000261 seq_printf(p, " %-16s", desc->chip->name);
Michael Ellerman750ab112009-10-13 19:45:00 +0000262 else
Anton Blanchardc86845e2010-01-31 20:33:18 +0000263 seq_printf(p, " %-16s", "None");
264 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
Michael Ellerman750ab112009-10-13 19:45:00 +0000265
Anton Blanchardc86845e2010-01-31 20:33:18 +0000266 if (action) {
267 seq_printf(p, " %s", action->name);
268 while ((action = action->next) != NULL)
269 seq_printf(p, ", %s", action->name);
270 }
Michael Ellerman750ab112009-10-13 19:45:00 +0000271
Michael Ellerman750ab112009-10-13 19:45:00 +0000272 seq_putc(p, '\n');
Anton Blanchardc86845e2010-01-31 20:33:18 +0000273out:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100274 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 return 0;
276}
277
Anton Blanchard89713ed2010-01-31 20:34:06 +0000278/*
279 * /proc/stat helpers
280 */
281u64 arch_irq_stat_cpu(unsigned int cpu)
282{
283 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
284
285 sum += per_cpu(irq_stat, cpu).pmu_irqs;
286 sum += per_cpu(irq_stat, cpu).mce_exceptions;
Anton Blanchard17081102010-01-31 20:34:36 +0000287 sum += per_cpu(irq_stat, cpu).spurious_irqs;
Anton Blanchard89713ed2010-01-31 20:34:06 +0000288
289 return sum;
290}
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292#ifdef CONFIG_HOTPLUG_CPU
Anton Blanchardb6decb72010-04-26 15:32:35 +0000293void fixup_irqs(const struct cpumask *map)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000295 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 unsigned int irq;
297 static int warned;
Anton Blanchardb6decb72010-04-26 15:32:35 +0000298 cpumask_var_t mask;
299
300 alloc_cpumask_var(&mask, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302 for_each_irq(irq) {
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000303 desc = irq_to_desc(irq);
304 if (desc && desc->status & IRQ_PER_CPU)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 continue;
306
Anton Blanchardb6decb72010-04-26 15:32:35 +0000307 cpumask_and(mask, desc->affinity, map);
308 if (cpumask_any(mask) >= nr_cpu_ids) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 printk("Breaking affinity for irq %i\n", irq);
Anton Blanchardb6decb72010-04-26 15:32:35 +0000310 cpumask_copy(mask, map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 }
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000312 if (desc->chip->set_affinity)
Anton Blanchardb6decb72010-04-26 15:32:35 +0000313 desc->chip->set_affinity(irq, mask);
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000314 else if (desc->action && !(warned++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 printk("Cannot set affinity for irq %i\n", irq);
316 }
317
Anton Blanchardb6decb72010-04-26 15:32:35 +0000318 free_cpumask_var(mask);
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 local_irq_enable();
321 mdelay(1);
322 local_irq_disable();
323}
324#endif
325
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000326#ifdef CONFIG_IRQSTACKS
327static inline void handle_one_irq(unsigned int irq)
328{
329 struct thread_info *curtp, *irqtp;
330 unsigned long saved_sp_limit;
331 struct irq_desc *desc;
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000332
333 /* Switch to the irq stack to handle this */
334 curtp = current_thread_info();
335 irqtp = hardirq_ctx[smp_processor_id()];
336
337 if (curtp == irqtp) {
338 /* We're already on the irq stack, just handle it */
339 generic_handle_irq(irq);
340 return;
341 }
342
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000343 desc = irq_to_desc(irq);
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000344 saved_sp_limit = current->thread.ksp_limit;
345
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000346 irqtp->task = curtp->task;
347 irqtp->flags = 0;
348
349 /* Copy the softirq bits in preempt_count so that the
350 * softirq checks work in the hardirq context. */
351 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
352 (curtp->preempt_count & SOFTIRQ_MASK);
353
354 current->thread.ksp_limit = (unsigned long)irqtp +
355 _ALIGN_UP(sizeof(struct thread_info), 16);
356
Michael Ellerman835363e2009-04-22 15:31:43 +0000357 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000358 current->thread.ksp_limit = saved_sp_limit;
359 irqtp->task = NULL;
360
361 /* Set any flag that may have been set on the
362 * alternate stack
363 */
364 if (irqtp->flags)
365 set_bits(irqtp->flags, &curtp->flags);
366}
367#else
368static inline void handle_one_irq(unsigned int irq)
369{
370 generic_handle_irq(irq);
371}
372#endif
373
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000374static inline void check_stack_overflow(void)
375{
376#ifdef CONFIG_DEBUG_STACKOVERFLOW
377 long sp;
378
379 sp = __get_SP() & (THREAD_SIZE-1);
380
381 /* check for stack overflow: is there less than 2KB free? */
382 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
383 printk("do_IRQ: stack overflow: %ld\n",
384 sp - sizeof(struct thread_info));
385 dump_stack();
386 }
387#endif
388}
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390void do_IRQ(struct pt_regs *regs)
391{
David Howells7d12e782006-10-05 14:55:46 +0100392 struct pt_regs *old_regs = set_irq_regs(regs);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000393 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Anton Blanchard1bf4af12009-10-26 18:47:42 +0000395 trace_irq_entry(regs);
396
Scott Wood4b218e92007-08-21 02:36:19 +1000397 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000399 check_stack_overflow();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Olaf Hering35a84c22006-10-07 22:08:26 +1000401 irq = ppc_md.get_irq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000403 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
404 handle_one_irq(irq);
405 else if (irq != NO_IRQ_IGNORE)
Anton Blanchard17081102010-01-31 20:34:36 +0000406 __get_cpu_var(irq_stat).spurious_irqs++;
Stephen Rothwell756e7102005-11-09 18:07:45 +1100407
Scott Wood4b218e92007-08-21 02:36:19 +1000408 irq_exit();
David Howells7d12e782006-10-05 14:55:46 +0100409 set_irq_regs(old_regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100410
411#ifdef CONFIG_PPC_ISERIES
Stephen Rothwellb06a3182006-11-21 14:16:13 +1100412 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
413 get_lppaca()->int_dword.fields.decr_int) {
David Gibson3356bb92006-01-13 10:26:42 +1100414 get_lppaca()->int_dword.fields.decr_int = 0;
415 /* Signal a fake decrementer interrupt */
416 timer_interrupt(regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100417 }
418#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +0000419
420 trace_irq_exit(regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100421}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423void __init init_IRQ(void)
424{
Sonny Rao70584572007-07-10 03:31:44 +1000425 if (ppc_md.init_IRQ)
426 ppc_md.init_IRQ();
Kumar Galabcf0b082008-04-30 03:49:55 -0500427
428 exc_lvl_ctx_init();
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 irq_ctx_init();
431}
432
Kumar Galabcf0b082008-04-30 03:49:55 -0500433#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
434struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
435struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
436struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
437
438void exc_lvl_ctx_init(void)
439{
440 struct thread_info *tp;
441 int i;
442
443 for_each_possible_cpu(i) {
444 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
445 tp = critirq_ctx[i];
446 tp->cpu = i;
447 tp->preempt_count = 0;
448
449#ifdef CONFIG_BOOKE
450 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
451 tp = dbgirq_ctx[i];
452 tp->cpu = i;
453 tp->preempt_count = 0;
454
455 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
456 tp = mcheckirq_ctx[i];
457 tp->cpu = i;
458 tp->preempt_count = HARDIRQ_OFFSET;
459#endif
460 }
461}
462#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464#ifdef CONFIG_IRQSTACKS
Andreas Mohr22722052006-06-23 02:05:30 -0700465struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
466struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468void irq_ctx_init(void)
469{
470 struct thread_info *tp;
471 int i;
472
KAMEZAWA Hiroyuki0e551952006-03-28 14:50:51 -0800473 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
475 tp = softirq_ctx[i];
476 tp->cpu = i;
Benjamin Herrenschmidte6768a42008-04-09 17:21:28 +1000477 tp->preempt_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
480 tp = hardirq_ctx[i];
481 tp->cpu = i;
482 tp->preempt_count = HARDIRQ_OFFSET;
483 }
484}
485
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100486static inline void do_softirq_onstack(void)
487{
488 struct thread_info *curtp, *irqtp;
Kumar Gala85218822008-04-28 16:21:22 +1000489 unsigned long saved_sp_limit = current->thread.ksp_limit;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100490
491 curtp = current_thread_info();
492 irqtp = softirq_ctx[smp_processor_id()];
493 irqtp->task = curtp->task;
Kumar Gala85218822008-04-28 16:21:22 +1000494 current->thread.ksp_limit = (unsigned long)irqtp +
495 _ALIGN_UP(sizeof(struct thread_info), 16);
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100496 call_do_softirq(irqtp);
Kumar Gala85218822008-04-28 16:21:22 +1000497 current->thread.ksp_limit = saved_sp_limit;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100498 irqtp->task = NULL;
499}
500
501#else
502#define do_softirq_onstack() __do_softirq()
503#endif /* CONFIG_IRQSTACKS */
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505void do_softirq(void)
506{
507 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
509 if (in_interrupt())
510 return;
511
512 local_irq_save(flags);
513
Paul Mackerras829035f2006-07-03 00:25:40 -0700514 if (local_softirq_pending())
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100515 do_softirq_onstack();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 local_irq_restore(flags);
518}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000520
521/*
522 * IRQ controller and virtual interrupts
523 */
524
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000525static LIST_HEAD(irq_hosts);
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000526static DEFINE_RAW_SPINLOCK(irq_big_lock);
Sebastien Dugue967e0122008-09-04 22:37:07 +1000527static unsigned int revmap_trees_allocated;
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000528static DEFINE_MUTEX(revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000529struct irq_map_entry irq_map[NR_IRQS];
530static unsigned int irq_virq_count = NR_IRQS;
531static struct irq_host *irq_default_host;
532
Olof Johansson35923f12007-06-04 14:47:04 +1000533irq_hw_number_t virq_to_hw(unsigned int virq)
534{
535 return irq_map[virq].hwirq;
536}
537EXPORT_SYMBOL_GPL(virq_to_hw);
538
Michael Ellerman68158002007-08-28 18:47:55 +1000539static int default_irq_host_match(struct irq_host *h, struct device_node *np)
540{
541 return h->of_node != NULL && h->of_node == np;
542}
543
Stephen Rothwell5669c3c2007-10-02 13:37:53 +1000544struct irq_host *irq_alloc_host(struct device_node *of_node,
Michael Ellerman52964f82007-08-28 18:47:54 +1000545 unsigned int revmap_type,
546 unsigned int revmap_arg,
547 struct irq_host_ops *ops,
548 irq_hw_number_t inval_irq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000549{
550 struct irq_host *host;
551 unsigned int size = sizeof(struct irq_host);
552 unsigned int i;
553 unsigned int *rmap;
554 unsigned long flags;
555
556 /* Allocate structure and revmap table if using linear mapping */
557 if (revmap_type == IRQ_HOST_MAP_LINEAR)
558 size += revmap_arg * sizeof(unsigned int);
Stephen Rothwell5669c3c2007-10-02 13:37:53 +1000559 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000560 if (host == NULL)
561 return NULL;
562
563 /* Fill structure */
564 host->revmap_type = revmap_type;
565 host->inval_irq = inval_irq;
566 host->ops = ops;
Michael Ellerman19fc65b2008-05-26 12:12:32 +1000567 host->of_node = of_node_get(of_node);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000568
Michael Ellerman68158002007-08-28 18:47:55 +1000569 if (host->ops->match == NULL)
570 host->ops->match = default_irq_host_match;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000571
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000572 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000573
574 /* If it's a legacy controller, check for duplicates and
575 * mark it as allocated (we use irq 0 host pointer for that
576 */
577 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
578 if (irq_map[0].host != NULL) {
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000579 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000580 /* If we are early boot, we can't free the structure,
581 * too bad...
582 * this will be fixed once slab is made available early
583 * instead of the current cruft
584 */
585 if (mem_init_done)
586 kfree(host);
587 return NULL;
588 }
589 irq_map[0].host = host;
590 }
591
592 list_add(&host->link, &irq_hosts);
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000593 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000594
595 /* Additional setups per revmap type */
596 switch(revmap_type) {
597 case IRQ_HOST_MAP_LEGACY:
598 /* 0 is always the invalid number for legacy */
599 host->inval_irq = 0;
600 /* setup us as the host for all legacy interrupts */
601 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
Michael Ellerman78662912007-08-28 18:47:56 +1000602 irq_map[i].hwirq = i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000603 smp_wmb();
604 irq_map[i].host = host;
605 smp_wmb();
606
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700607 /* Clear norequest flags */
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000608 irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000609
610 /* Legacy flags are left to default at this point,
611 * one can then use irq_create_mapping() to
Jean Delvarec03983a2007-10-19 23:22:55 +0200612 * explicitly change them
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000613 */
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700614 ops->map(host, i, i);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000615 }
616 break;
617 case IRQ_HOST_MAP_LINEAR:
618 rmap = (unsigned int *)(host + 1);
619 for (i = 0; i < revmap_arg; i++)
Michael Ellermanf5921692007-06-01 17:23:26 +1000620 rmap[i] = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000621 host->revmap_data.linear.size = revmap_arg;
622 smp_wmb();
623 host->revmap_data.linear.revmap = rmap;
624 break;
625 default:
626 break;
627 }
628
629 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
630
631 return host;
632}
633
634struct irq_host *irq_find_host(struct device_node *node)
635{
636 struct irq_host *h, *found = NULL;
637 unsigned long flags;
638
639 /* We might want to match the legacy controller last since
640 * it might potentially be set to match all interrupts in
641 * the absence of a device node. This isn't a problem so far
642 * yet though...
643 */
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000644 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000645 list_for_each_entry(h, &irq_hosts, link)
Michael Ellerman68158002007-08-28 18:47:55 +1000646 if (h->ops->match(h, node)) {
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000647 found = h;
648 break;
649 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000650 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000651 return found;
652}
653EXPORT_SYMBOL_GPL(irq_find_host);
654
655void irq_set_default_host(struct irq_host *host)
656{
657 pr_debug("irq: Default host set to @0x%p\n", host);
658
659 irq_default_host = host;
660}
661
662void irq_set_virq_count(unsigned int count)
663{
664 pr_debug("irq: Trying to set virq count to %d\n", count);
665
666 BUG_ON(count < NUM_ISA_INTERRUPTS);
667 if (count < NR_IRQS)
668 irq_virq_count = count;
669}
670
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000671static int irq_setup_virq(struct irq_host *host, unsigned int virq,
672 irq_hw_number_t hwirq)
673{
Michael Ellermancd015702009-10-13 19:45:03 +0000674 struct irq_desc *desc;
675
676 desc = irq_to_desc_alloc_node(virq, 0);
677 if (!desc) {
678 pr_debug("irq: -> allocating desc failed\n");
679 goto error;
680 }
681
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000682 /* Clear IRQ_NOREQUEST flag */
Michael Ellermancd015702009-10-13 19:45:03 +0000683 desc->status &= ~IRQ_NOREQUEST;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000684
685 /* map it */
686 smp_wmb();
687 irq_map[virq].hwirq = hwirq;
688 smp_mb();
689
690 if (host->ops->map(host, virq, hwirq)) {
691 pr_debug("irq: -> mapping failed, freeing\n");
Michael Ellermancd015702009-10-13 19:45:03 +0000692 goto error;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000693 }
694
695 return 0;
Michael Ellermancd015702009-10-13 19:45:03 +0000696
697error:
698 irq_free_virt(virq, 1);
699 return -1;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000700}
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000701
Michael Ellermanee51de52007-06-04 23:00:00 +1000702unsigned int irq_create_direct_mapping(struct irq_host *host)
703{
704 unsigned int virq;
705
706 if (host == NULL)
707 host = irq_default_host;
708
709 BUG_ON(host == NULL);
710 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
711
712 virq = irq_alloc_virt(host, 1, 0);
713 if (virq == NO_IRQ) {
714 pr_debug("irq: create_direct virq allocation failed\n");
715 return NO_IRQ;
716 }
717
718 pr_debug("irq: create_direct obtained virq %d\n", virq);
719
720 if (irq_setup_virq(host, virq, virq))
721 return NO_IRQ;
722
723 return virq;
724}
725
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000726unsigned int irq_create_mapping(struct irq_host *host,
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700727 irq_hw_number_t hwirq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000728{
729 unsigned int virq, hint;
730
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700731 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000732
733 /* Look for default host if nececssary */
734 if (host == NULL)
735 host = irq_default_host;
736 if (host == NULL) {
737 printk(KERN_WARNING "irq_create_mapping called for"
738 " NULL host, hwirq=%lx\n", hwirq);
739 WARN_ON(1);
740 return NO_IRQ;
741 }
742 pr_debug("irq: -> using host @%p\n", host);
743
744 /* Check if mapping already exist, if it does, call
745 * host->ops->map() to update the flags
746 */
747 virq = irq_find_mapping(host, hwirq);
Michael Ellermanf5921692007-06-01 17:23:26 +1000748 if (virq != NO_IRQ) {
Ishizaki Kouacc900e2007-01-12 09:58:39 +0900749 if (host->ops->remap)
750 host->ops->remap(host, virq, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000751 pr_debug("irq: -> existing mapping on virq %d\n", virq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000752 return virq;
753 }
754
755 /* Get a virtual interrupt number */
756 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
757 /* Handle legacy */
758 virq = (unsigned int)hwirq;
759 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
760 return NO_IRQ;
761 return virq;
762 } else {
763 /* Allocate a virtual interrupt number */
764 hint = hwirq % irq_virq_count;
765 virq = irq_alloc_virt(host, 1, hint);
766 if (virq == NO_IRQ) {
767 pr_debug("irq: -> virq allocation failed\n");
768 return NO_IRQ;
769 }
770 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000771
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000772 if (irq_setup_virq(host, virq, hwirq))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000773 return NO_IRQ;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000774
Michael Ellermanc7d07fd2009-04-05 16:05:02 +0000775 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
776 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
777
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000778 return virq;
779}
780EXPORT_SYMBOL_GPL(irq_create_mapping);
781
Al Virof3d2ab42006-10-09 16:22:09 +0100782unsigned int irq_create_of_mapping(struct device_node *controller,
Roman Fietze40d50cf2009-12-08 02:39:50 +0000783 const u32 *intspec, unsigned int intsize)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000784{
785 struct irq_host *host;
786 irq_hw_number_t hwirq;
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700787 unsigned int type = IRQ_TYPE_NONE;
788 unsigned int virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000789
790 if (controller == NULL)
791 host = irq_default_host;
792 else
793 host = irq_find_host(controller);
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700794 if (host == NULL) {
795 printk(KERN_WARNING "irq: no irq host found for %s !\n",
796 controller->full_name);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000797 return NO_IRQ;
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700798 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000799
800 /* If host has no translation, then we assume interrupt line */
801 if (host->ops->xlate == NULL)
802 hwirq = intspec[0];
803 else {
804 if (host->ops->xlate(host, controller, intspec, intsize,
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700805 &hwirq, &type))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000806 return NO_IRQ;
807 }
808
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700809 /* Create mapping */
810 virq = irq_create_mapping(host, hwirq);
811 if (virq == NO_IRQ)
812 return virq;
813
814 /* Set type if specified and different than the current one */
815 if (type != IRQ_TYPE_NONE &&
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000816 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700817 set_irq_type(virq, type);
818 return virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000819}
820EXPORT_SYMBOL_GPL(irq_create_of_mapping);
821
822unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
823{
824 struct of_irq oirq;
825
826 if (of_irq_map_one(dev, index, &oirq))
827 return NO_IRQ;
828
829 return irq_create_of_mapping(oirq.controller, oirq.specifier,
830 oirq.size);
831}
832EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
833
834void irq_dispose_mapping(unsigned int virq)
835{
Michael Ellerman5414c6b2006-10-24 13:37:34 +1000836 struct irq_host *host;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000837 irq_hw_number_t hwirq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000838
Michael Ellerman5414c6b2006-10-24 13:37:34 +1000839 if (virq == NO_IRQ)
840 return;
841
842 host = irq_map[virq].host;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000843 WARN_ON (host == NULL);
844 if (host == NULL)
845 return;
846
847 /* Never unmap legacy interrupts */
848 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
849 return;
850
851 /* remove chip and handler */
852 set_irq_chip_and_handler(virq, NULL, NULL);
853
854 /* Make sure it's completed */
855 synchronize_irq(virq);
856
857 /* Tell the PIC about it */
858 if (host->ops->unmap)
859 host->ops->unmap(host, virq);
860 smp_mb();
861
862 /* Clear reverse map */
863 hwirq = irq_map[virq].hwirq;
864 switch(host->revmap_type) {
865 case IRQ_HOST_MAP_LINEAR:
866 if (hwirq < host->revmap_data.linear.size)
Michael Ellermanf5921692007-06-01 17:23:26 +1000867 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000868 break;
869 case IRQ_HOST_MAP_TREE:
Sebastien Dugue967e0122008-09-04 22:37:07 +1000870 /*
871 * Check if radix tree allocated yet, if not then nothing to
872 * remove.
873 */
874 smp_rmb();
875 if (revmap_trees_allocated < 1)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000876 break;
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000877 mutex_lock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000878 radix_tree_delete(&host->revmap_data.tree, hwirq);
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000879 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000880 break;
881 }
882
883 /* Destroy map */
884 smp_mb();
885 irq_map[virq].hwirq = host->inval_irq;
886
887 /* Set some flags */
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000888 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000889
890 /* Free it */
891 irq_free_virt(virq, 1);
892}
893EXPORT_SYMBOL_GPL(irq_dispose_mapping);
894
895unsigned int irq_find_mapping(struct irq_host *host,
896 irq_hw_number_t hwirq)
897{
898 unsigned int i;
899 unsigned int hint = hwirq % irq_virq_count;
900
901 /* Look for default host if nececssary */
902 if (host == NULL)
903 host = irq_default_host;
904 if (host == NULL)
905 return NO_IRQ;
906
907 /* legacy -> bail early */
908 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
909 return hwirq;
910
911 /* Slow path does a linear search of the map */
912 if (hint < NUM_ISA_INTERRUPTS)
913 hint = NUM_ISA_INTERRUPTS;
914 i = hint;
915 do {
916 if (irq_map[i].host == host &&
917 irq_map[i].hwirq == hwirq)
918 return i;
919 i++;
920 if (i >= irq_virq_count)
921 i = NUM_ISA_INTERRUPTS;
922 } while(i != hint);
923 return NO_IRQ;
924}
925EXPORT_SYMBOL_GPL(irq_find_mapping);
926
927
Sebastien Dugue967e0122008-09-04 22:37:07 +1000928unsigned int irq_radix_revmap_lookup(struct irq_host *host,
929 irq_hw_number_t hwirq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000930{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000931 struct irq_map_entry *ptr;
932 unsigned int virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000933
934 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
935
Sebastien Dugue967e0122008-09-04 22:37:07 +1000936 /*
937 * Check if the radix tree exists and has bee initialized.
938 * If not, we fallback to slow mode
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000939 */
Sebastien Dugue967e0122008-09-04 22:37:07 +1000940 if (revmap_trees_allocated < 2)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000941 return irq_find_mapping(host, hwirq);
942
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000943 /* Now try to resolve */
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000944 /*
945 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
946 * as it's referencing an entry in the static irq_map table.
947 */
Sebastien Dugue967e0122008-09-04 22:37:07 +1000948 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000949
Sebastien Dugue967e0122008-09-04 22:37:07 +1000950 /*
951 * If found in radix tree, then fine.
952 * Else fallback to linear lookup - this should not happen in practice
953 * as it means that we failed to insert the node in the radix tree.
954 */
955 if (ptr)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000956 virq = ptr - irq_map;
Sebastien Dugue967e0122008-09-04 22:37:07 +1000957 else
958 virq = irq_find_mapping(host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000959
Sebastien Dugue967e0122008-09-04 22:37:07 +1000960 return virq;
961}
962
963void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
964 irq_hw_number_t hwirq)
965{
Sebastien Dugue967e0122008-09-04 22:37:07 +1000966
967 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
968
969 /*
970 * Check if the radix tree exists yet.
971 * If not, then the irq will be inserted into the tree when it gets
972 * initialized.
973 */
974 smp_rmb();
975 if (revmap_trees_allocated < 1)
976 return;
977
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000978 if (virq != NO_IRQ) {
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000979 mutex_lock(&revmap_trees_mutex);
Sebastien Dugue967e0122008-09-04 22:37:07 +1000980 radix_tree_insert(&host->revmap_data.tree, hwirq,
981 &irq_map[virq]);
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000982 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000983 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000984}
985
986unsigned int irq_linear_revmap(struct irq_host *host,
987 irq_hw_number_t hwirq)
988{
989 unsigned int *revmap;
990
991 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
992
993 /* Check revmap bounds */
994 if (unlikely(hwirq >= host->revmap_data.linear.size))
995 return irq_find_mapping(host, hwirq);
996
997 /* Check if revmap was allocated */
998 revmap = host->revmap_data.linear.revmap;
999 if (unlikely(revmap == NULL))
1000 return irq_find_mapping(host, hwirq);
1001
1002 /* Fill up revmap with slow path if no mapping found */
1003 if (unlikely(revmap[hwirq] == NO_IRQ))
1004 revmap[hwirq] = irq_find_mapping(host, hwirq);
1005
1006 return revmap[hwirq];
1007}
1008
1009unsigned int irq_alloc_virt(struct irq_host *host,
1010 unsigned int count,
1011 unsigned int hint)
1012{
1013 unsigned long flags;
1014 unsigned int i, j, found = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001015
1016 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1017 return NO_IRQ;
1018
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001019 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001020
1021 /* Use hint for 1 interrupt if any */
1022 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1023 hint < irq_virq_count && irq_map[hint].host == NULL) {
1024 found = hint;
1025 goto hint_found;
1026 }
1027
1028 /* Look for count consecutive numbers in the allocatable
1029 * (non-legacy) space
1030 */
Michael Ellermane1251462006-08-02 10:48:50 +10001031 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1032 if (irq_map[i].host != NULL)
1033 j = 0;
1034 else
1035 j++;
1036
1037 if (j == count) {
1038 found = i - count + 1;
1039 break;
1040 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001041 }
1042 if (found == NO_IRQ) {
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001043 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001044 return NO_IRQ;
1045 }
1046 hint_found:
1047 for (i = found; i < (found + count); i++) {
1048 irq_map[i].hwirq = host->inval_irq;
1049 smp_wmb();
1050 irq_map[i].host = host;
1051 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001052 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001053 return found;
1054}
1055
1056void irq_free_virt(unsigned int virq, unsigned int count)
1057{
1058 unsigned long flags;
1059 unsigned int i;
1060
1061 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1062 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1063
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001064 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001065 for (i = virq; i < (virq + count); i++) {
1066 struct irq_host *host;
1067
1068 if (i < NUM_ISA_INTERRUPTS ||
1069 (virq + count) > irq_virq_count)
1070 continue;
1071
1072 host = irq_map[i].host;
1073 irq_map[i].hwirq = host->inval_irq;
1074 smp_wmb();
1075 irq_map[i].host = NULL;
1076 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001077 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001078}
1079
Michael Ellermancd015702009-10-13 19:45:03 +00001080int arch_early_irq_init(void)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001081{
Michael Ellermancd015702009-10-13 19:45:03 +00001082 struct irq_desc *desc;
1083 int i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001084
Michael Ellermancd015702009-10-13 19:45:03 +00001085 for (i = 0; i < NR_IRQS; i++) {
1086 desc = irq_to_desc(i);
1087 if (desc)
1088 desc->status |= IRQ_NOREQUEST;
1089 }
1090
1091 return 0;
1092}
1093
1094int arch_init_chip_data(struct irq_desc *desc, int node)
1095{
1096 desc->status |= IRQ_NOREQUEST;
1097 return 0;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001098}
1099
1100/* We need to create the radix trees late */
1101static int irq_late_init(void)
1102{
1103 struct irq_host *h;
Sebastien Dugue967e0122008-09-04 22:37:07 +10001104 unsigned int i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001105
Sebastien Dugue967e0122008-09-04 22:37:07 +10001106 /*
1107 * No mutual exclusion with respect to accessors of the tree is needed
1108 * here as the synchronization is done via the state variable
1109 * revmap_trees_allocated.
1110 */
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001111 list_for_each_entry(h, &irq_hosts, link) {
1112 if (h->revmap_type == IRQ_HOST_MAP_TREE)
Sebastien Dugue967e0122008-09-04 22:37:07 +10001113 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1114 }
1115
1116 /*
1117 * Make sure the radix trees inits are visible before setting
1118 * the flag
1119 */
1120 smp_wmb();
1121 revmap_trees_allocated = 1;
1122
1123 /*
1124 * Insert the reverse mapping for those interrupts already present
1125 * in irq_map[].
1126 */
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001127 mutex_lock(&revmap_trees_mutex);
Sebastien Dugue967e0122008-09-04 22:37:07 +10001128 for (i = 0; i < irq_virq_count; i++) {
1129 if (irq_map[i].host &&
1130 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1131 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1132 irq_map[i].hwirq, &irq_map[i]);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001133 }
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001134 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001135
Sebastien Dugue967e0122008-09-04 22:37:07 +10001136 /*
1137 * Make sure the radix trees insertions are visible before setting
1138 * the flag
1139 */
1140 smp_wmb();
1141 revmap_trees_allocated = 2;
1142
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001143 return 0;
1144}
1145arch_initcall(irq_late_init);
1146
Michael Ellerman60b332e2007-08-28 18:47:57 +10001147#ifdef CONFIG_VIRQ_DEBUG
1148static int virq_debug_show(struct seq_file *m, void *private)
1149{
1150 unsigned long flags;
Thomas Gleixner97f7d6b2009-03-10 14:45:54 +00001151 struct irq_desc *desc;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001152 const char *p;
1153 char none[] = "none";
1154 int i;
1155
1156 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1157 "chip name", "host name");
1158
Michael Ellerman76f1d942009-10-13 19:44:56 +00001159 for (i = 1; i < nr_irqs; i++) {
Michael Ellerman6cff46f2009-10-13 19:44:51 +00001160 desc = irq_to_desc(i);
Michael Ellerman76f1d942009-10-13 19:44:56 +00001161 if (!desc)
1162 continue;
1163
Thomas Gleixner239007b2009-11-17 16:46:45 +01001164 raw_spin_lock_irqsave(&desc->lock, flags);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001165
1166 if (desc->action && desc->action->handler) {
1167 seq_printf(m, "%5d ", i);
1168 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1169
Thomas Gleixnerb27df672009-11-18 23:44:21 +00001170 if (desc->chip && desc->chip->name)
1171 p = desc->chip->name;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001172 else
1173 p = none;
1174 seq_printf(m, "%-15s ", p);
1175
1176 if (irq_map[i].host && irq_map[i].host->of_node)
1177 p = irq_map[i].host->of_node->full_name;
1178 else
1179 p = none;
1180 seq_printf(m, "%s\n", p);
1181 }
1182
Thomas Gleixner239007b2009-11-17 16:46:45 +01001183 raw_spin_unlock_irqrestore(&desc->lock, flags);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001184 }
1185
1186 return 0;
1187}
1188
1189static int virq_debug_open(struct inode *inode, struct file *file)
1190{
1191 return single_open(file, virq_debug_show, inode->i_private);
1192}
1193
1194static const struct file_operations virq_debug_fops = {
1195 .open = virq_debug_open,
1196 .read = seq_read,
1197 .llseek = seq_lseek,
1198 .release = single_release,
1199};
1200
1201static int __init irq_debugfs_init(void)
1202{
1203 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
Emil Medve476ff8a2008-05-23 05:49:22 +10001204 NULL, &virq_debug_fops) == NULL)
Michael Ellerman60b332e2007-08-28 18:47:57 +10001205 return -ENOMEM;
1206
1207 return 0;
1208}
1209__initcall(irq_debugfs_init);
1210#endif /* CONFIG_VIRQ_DEBUG */
1211
Paul Mackerrasc6622f62006-02-24 10:06:59 +11001212#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213static int __init setup_noirqdistrib(char *str)
1214{
1215 distribute_irqs = 0;
1216 return 1;
1217}
1218
1219__setup("noirqdistrib", setup_noirqdistrib);
Stephen Rothwell756e7102005-11-09 18:07:45 +11001220#endif /* CONFIG_PPC64 */