blob: eb804e15b29ba282852caf0937c6bded9b5de59d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
Stephen Rothwell756e7102005-11-09 18:07:45 +11006 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
Stephen Rothwell756e7102005-11-09 18:07:45 +110010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
Stephen Rothwell756e7102005-11-09 18:07:45 +110021 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100031#undef DEBUG
32
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040033#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110038#include <linux/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/init.h>
43#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/delay.h>
45#include <linux/irq.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110046#include <linux/seq_file.h>
47#include <linux/cpumask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/profile.h>
49#include <linux/bitops.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100050#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
Jake Moilanen45934c42006-07-27 13:17:25 -050054#include <linux/pci.h>
Michael Ellerman60b332e2007-08-28 18:47:57 +100055#include <linux/debugfs.h>
Grant Likelye3873442010-06-18 11:09:59 -060056#include <linux/of.h>
57#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#include <asm/uaccess.h>
60#include <asm/system.h>
61#include <asm/io.h>
62#include <asm/pgtable.h>
63#include <asm/irq.h>
64#include <asm/cache.h>
65#include <asm/prom.h>
66#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <asm/machdep.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100068#include <asm/udbg.h>
Dave Kleikamp3e7f45a2010-08-18 06:44:25 +000069#include <asm/smp.h>
Benjamin Herrenschmidt89c81792010-07-09 15:31:28 +100070
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100071#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <asm/paca.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100073#include <asm/firmware.h>
Takao Shinohara0874dd42007-05-01 07:01:07 +100074#include <asm/lv1call.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +000076#define CREATE_TRACE_POINTS
77#include <asm/trace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Anton Blanchard8c007bf2010-01-31 20:30:23 +000079DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80EXPORT_PER_CPU_SYMBOL(irq_stat);
81
Stephen Rothwell868accb2005-11-10 18:38:46 +110082int __irq_offset_value;
Stephen Rothwell756e7102005-11-09 18:07:45 +110083
Stephen Rothwell756e7102005-11-09 18:07:45 +110084#ifdef CONFIG_PPC32
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100085EXPORT_SYMBOL(__irq_offset_value);
Stephen Rothwell756e7102005-11-09 18:07:45 +110086atomic_t ppc_n_lost_interrupts;
87
88#ifdef CONFIG_TAU_INT
89extern int tau_initialized;
90extern int tau_interrupts(int);
91#endif
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100092#endif /* CONFIG_PPC32 */
Stephen Rothwell756e7102005-11-09 18:07:45 +110093
Stephen Rothwell756e7102005-11-09 18:07:45 +110094#ifdef CONFIG_PPC64
Michael Ellermancd015702009-10-13 19:45:03 +000095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096int distribute_irqs = 1;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100097
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110098static inline notrace unsigned long get_irq_happened(void)
Hugh Dickinsef2b3432006-11-10 21:32:40 +000099{
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100100 unsigned long happened;
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000101
102 __asm__ __volatile__("lbz %0,%1(13)"
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000104
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100105 return happened;
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000106}
107
Steven Rostedt4e491d12008-05-14 23:49:44 -0400108static inline notrace void set_soft_enabled(unsigned long enable)
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000109{
110 __asm__ __volatile__("stb %0,%1(13)"
111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
112}
113
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100114static inline notrace int decrementer_check_overflow(void)
Anton Blanchard7df10272011-11-23 20:07:22 +0000115{
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
118
Anton Blanchard7df10272011-11-23 20:07:22 +0000119 if (now >= *next_tb)
120 set_dec(1);
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100121 return now >= *next_tb;
Anton Blanchard7df10272011-11-23 20:07:22 +0000122}
123
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100124/* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900 if there's
126 * either an EE or a DEC to generate.
127 *
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
132 * disabled.
133 *
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
137 */
138notrace unsigned int __check_irq_replay(void)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000139{
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000140 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100141 * We use local_paca rather than get_paca() to avoid all
142 * the debug_smp_processor_id() business in this low level
143 * function
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000144 */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100145 unsigned char happened = local_paca->irq_happened;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000146
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100147 /* Clear bit 0 which we wouldn't clear otherwise */
148 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
Takao Shinohara0874dd42007-05-01 07:01:07 +1000149
150 /*
151 * Force the delivery of pending soft-disabled interrupts on PS3.
152 * Any HV call will have this side effect.
153 */
154 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
Geoff Levand816cb492011-11-29 15:38:50 +0000155 u64 tmp, tmp2;
156 lv1_get_version_info(&tmp, &tmp2);
Takao Shinohara0874dd42007-05-01 07:01:07 +1000157 }
158
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100159 /*
160 * We may have missed a decrementer interrupt. We check the
161 * decrementer itself rather than the paca irq_happened field
162 * in case we also had a rollover while hard disabled
163 */
164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow())
166 return 0x900;
167
168 /* Finally check if an external interrupt happened */
169 local_paca->irq_happened &= ~PACA_IRQ_EE;
170 if (happened & PACA_IRQ_EE)
171 return 0x500;
172
173#ifdef CONFIG_PPC_BOOK3E
174 /* Finally check if an EPR external interrupt happened
175 * this bit is typically set if we need to handle another
176 * "edge" interrupt from within the MPIC "EPR" handler
177 */
178 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
179 if (happened & PACA_IRQ_EE_EDGE)
180 return 0x500;
181
182 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
183 if (happened & PACA_IRQ_DBELL)
184 return 0x280;
185#endif /* CONFIG_PPC_BOOK3E */
186
187 /* There should be nothing left ! */
188 BUG_ON(local_paca->irq_happened != 0);
189
190 return 0;
191}
192
193notrace void arch_local_irq_restore(unsigned long en)
194{
195 unsigned char irq_happened;
196 unsigned int replay;
197
198 /* Write the new soft-enabled value */
199 set_soft_enabled(en);
200 if (!en)
201 return;
202 /*
203 * From this point onward, we can take interrupts, preempt,
204 * etc... unless we got hard-disabled. We check if an event
205 * happened. If none happened, we know we can just return.
206 *
207 * We may have preempted before the check below, in which case
208 * we are checking the "new" CPU instead of the old one. This
209 * is only a problem if an event happened on the "old" CPU.
210 *
211 * External interrupt events on non-iseries will have caused
212 * interrupts to be hard-disabled, so there is no problem, we
213 * cannot have preempted.
214 *
215 * That leaves us with EEs on iSeries or decrementer interrupts,
216 * which I decided to safely ignore. The preemption would have
217 * itself been the result of an interrupt, upon which return we
218 * will have checked for pending events on the old CPU.
219 */
220 irq_happened = get_irq_happened();
221 if (!irq_happened)
222 return;
223
224 /*
225 * We need to hard disable to get a trusted value from
226 * __check_irq_replay(). We also need to soft-disable
227 * again to avoid warnings in there due to the use of
228 * per-cpu variables.
229 *
230 * We know that if the value in irq_happened is exactly 0x01
231 * then we are already hard disabled (there are other less
232 * common cases that we'll ignore for now), so we skip the
233 * (expensive) mtmsrd.
234 */
235 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
236 __hard_irq_disable();
237 set_soft_enabled(0);
238
239 /*
240 * Check if anything needs to be re-emitted. We haven't
241 * soft-enabled yet to avoid warnings in decrementer_check_overflow
242 * accessing per-cpu variables
243 */
244 replay = __check_irq_replay();
245
246 /* We can soft-enable now */
247 set_soft_enabled(1);
248
249 /*
250 * And replay if we have to. This will return with interrupts
251 * hard-enabled.
252 */
253 if (replay) {
254 __replay_interrupt(replay);
255 return;
256 }
257
258 /* Finally, let's ensure we are hard enabled */
Benjamin Herrenschmidte1fa2e12007-05-10 22:22:45 -0700259 __hard_irq_enable();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000260}
David Howellsdf9ee292010-10-07 14:08:55 +0100261EXPORT_SYMBOL(arch_local_irq_restore);
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100262
263/*
264 * This is specifically called by assembly code to re-enable interrupts
265 * if they are currently disabled. This is typically called before
266 * schedule() or do_signal() when returning to userspace. We do it
267 * in C to avoid the burden of dealing with lockdep etc...
268 */
269void restore_interrupts(void)
270{
271 if (irqs_disabled())
272 local_irq_enable();
273}
274
Stephen Rothwell756e7102005-11-09 18:07:45 +1100275#endif /* CONFIG_PPC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Thomas Gleixner433c9c62011-03-25 17:04:59 +0100277int arch_show_interrupts(struct seq_file *p, int prec)
Anton Blanchardc86845e2010-01-31 20:33:18 +0000278{
279 int j;
280
281#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
282 if (tau_initialized) {
283 seq_printf(p, "%*s: ", prec, "TAU");
284 for_each_online_cpu(j)
285 seq_printf(p, "%10u ", tau_interrupts(j));
286 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
287 }
288#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
289
Anton Blanchard89713ed2010-01-31 20:34:06 +0000290 seq_printf(p, "%*s: ", prec, "LOC");
291 for_each_online_cpu(j)
292 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
293 seq_printf(p, " Local timer interrupts\n");
294
Anton Blanchard17081102010-01-31 20:34:36 +0000295 seq_printf(p, "%*s: ", prec, "SPU");
296 for_each_online_cpu(j)
297 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
298 seq_printf(p, " Spurious interrupts\n");
299
Anton Blanchard89713ed2010-01-31 20:34:06 +0000300 seq_printf(p, "%*s: ", prec, "CNT");
301 for_each_online_cpu(j)
302 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
303 seq_printf(p, " Performance monitoring interrupts\n");
304
305 seq_printf(p, "%*s: ", prec, "MCE");
306 for_each_online_cpu(j)
307 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
308 seq_printf(p, " Machine check exceptions\n");
309
Anton Blanchardc86845e2010-01-31 20:33:18 +0000310 return 0;
311}
312
Anton Blanchard89713ed2010-01-31 20:34:06 +0000313/*
314 * /proc/stat helpers
315 */
316u64 arch_irq_stat_cpu(unsigned int cpu)
317{
318 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
319
320 sum += per_cpu(irq_stat, cpu).pmu_irqs;
321 sum += per_cpu(irq_stat, cpu).mce_exceptions;
Anton Blanchard17081102010-01-31 20:34:36 +0000322 sum += per_cpu(irq_stat, cpu).spurious_irqs;
Anton Blanchard89713ed2010-01-31 20:34:06 +0000323
324 return sum;
325}
326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327#ifdef CONFIG_HOTPLUG_CPU
Benjamin Herrenschmidt1c91cc52011-02-11 13:05:17 +1100328void migrate_irqs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000330 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 unsigned int irq;
332 static int warned;
Anton Blanchardb6decb72010-04-26 15:32:35 +0000333 cpumask_var_t mask;
Benjamin Herrenschmidt1c91cc52011-02-11 13:05:17 +1100334 const struct cpumask *map = cpu_online_mask;
Anton Blanchardb6decb72010-04-26 15:32:35 +0000335
336 alloc_cpumask_var(&mask, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 for_each_irq(irq) {
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100339 struct irq_data *data;
Lennert Buytenheke1180282011-03-07 14:00:20 +0000340 struct irq_chip *chip;
341
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000342 desc = irq_to_desc(irq);
Johannes Berg3cd85192010-06-16 00:09:35 +0000343 if (!desc)
344 continue;
345
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100346 data = irq_desc_get_irq_data(desc);
347 if (irqd_is_per_cpu(data))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 continue;
349
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100350 chip = irq_data_get_irq_chip(data);
Lennert Buytenheke1180282011-03-07 14:00:20 +0000351
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100352 cpumask_and(mask, data->affinity, map);
Anton Blanchardb6decb72010-04-26 15:32:35 +0000353 if (cpumask_any(mask) >= nr_cpu_ids) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 printk("Breaking affinity for irq %i\n", irq);
Anton Blanchardb6decb72010-04-26 15:32:35 +0000355 cpumask_copy(mask, map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
Lennert Buytenheke1180282011-03-07 14:00:20 +0000357 if (chip->irq_set_affinity)
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100358 chip->irq_set_affinity(data, mask, true);
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000359 else if (desc->action && !(warned++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 printk("Cannot set affinity for irq %i\n", irq);
361 }
362
Anton Blanchardb6decb72010-04-26 15:32:35 +0000363 free_cpumask_var(mask);
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 local_irq_enable();
366 mdelay(1);
367 local_irq_disable();
368}
369#endif
370
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000371static inline void handle_one_irq(unsigned int irq)
372{
373 struct thread_info *curtp, *irqtp;
374 unsigned long saved_sp_limit;
375 struct irq_desc *desc;
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000376
Milton Miller2e455252011-05-24 20:34:18 +0000377 desc = irq_to_desc(irq);
378 if (!desc)
379 return;
380
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000381 /* Switch to the irq stack to handle this */
382 curtp = current_thread_info();
383 irqtp = hardirq_ctx[smp_processor_id()];
384
385 if (curtp == irqtp) {
386 /* We're already on the irq stack, just handle it */
Milton Miller2e455252011-05-24 20:34:18 +0000387 desc->handle_irq(irq, desc);
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000388 return;
389 }
390
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000391 saved_sp_limit = current->thread.ksp_limit;
392
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000393 irqtp->task = curtp->task;
394 irqtp->flags = 0;
395
396 /* Copy the softirq bits in preempt_count so that the
397 * softirq checks work in the hardirq context. */
398 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
399 (curtp->preempt_count & SOFTIRQ_MASK);
400
401 current->thread.ksp_limit = (unsigned long)irqtp +
402 _ALIGN_UP(sizeof(struct thread_info), 16);
403
Michael Ellerman835363e2009-04-22 15:31:43 +0000404 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000405 current->thread.ksp_limit = saved_sp_limit;
406 irqtp->task = NULL;
407
408 /* Set any flag that may have been set on the
409 * alternate stack
410 */
411 if (irqtp->flags)
412 set_bits(irqtp->flags, &curtp->flags);
413}
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000414
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000415static inline void check_stack_overflow(void)
416{
417#ifdef CONFIG_DEBUG_STACKOVERFLOW
418 long sp;
419
420 sp = __get_SP() & (THREAD_SIZE-1);
421
422 /* check for stack overflow: is there less than 2KB free? */
423 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
424 printk("do_IRQ: stack overflow: %ld\n",
425 sp - sizeof(struct thread_info));
426 dump_stack();
427 }
428#endif
429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431void do_IRQ(struct pt_regs *regs)
432{
David Howells7d12e782006-10-05 14:55:46 +0100433 struct pt_regs *old_regs = set_irq_regs(regs);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000434 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Anton Blanchard1bf4af12009-10-26 18:47:42 +0000436 trace_irq_entry(regs);
437
Scott Wood4b218e92007-08-21 02:36:19 +1000438 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000440 check_stack_overflow();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100442 /*
443 * Query the platform PIC for the interrupt & ack it.
444 *
445 * This will typically lower the interrupt line to the CPU
446 */
Olaf Hering35a84c22006-10-07 22:08:26 +1000447 irq = ppc_md.get_irq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100449 /* We can hard enable interrupts now */
450 may_hard_irq_enable();
451
452 /* And finally process it */
Michael Ellermanf2694ba2009-04-28 01:57:43 +0000453 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
454 handle_one_irq(irq);
455 else if (irq != NO_IRQ_IGNORE)
Anton Blanchard17081102010-01-31 20:34:36 +0000456 __get_cpu_var(irq_stat).spurious_irqs++;
Stephen Rothwell756e7102005-11-09 18:07:45 +1100457
Scott Wood4b218e92007-08-21 02:36:19 +1000458 irq_exit();
David Howells7d12e782006-10-05 14:55:46 +0100459 set_irq_regs(old_regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100460
461#ifdef CONFIG_PPC_ISERIES
Stephen Rothwellb06a3182006-11-21 14:16:13 +1100462 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
463 get_lppaca()->int_dword.fields.decr_int) {
David Gibson3356bb92006-01-13 10:26:42 +1100464 get_lppaca()->int_dword.fields.decr_int = 0;
465 /* Signal a fake decrementer interrupt */
466 timer_interrupt(regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100467 }
468#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +0000469
470 trace_irq_exit(regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100471}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473void __init init_IRQ(void)
474{
Sonny Rao70584572007-07-10 03:31:44 +1000475 if (ppc_md.init_IRQ)
476 ppc_md.init_IRQ();
Kumar Galabcf0b082008-04-30 03:49:55 -0500477
478 exc_lvl_ctx_init();
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 irq_ctx_init();
481}
482
Kumar Galabcf0b082008-04-30 03:49:55 -0500483#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
484struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
485struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
486struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
487
488void exc_lvl_ctx_init(void)
489{
490 struct thread_info *tp;
Michael Ellermanca1769f2011-04-14 22:32:04 +0000491 int i, cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500492
493 for_each_possible_cpu(i) {
Michael Ellermanca1769f2011-04-14 22:32:04 +0000494#ifdef CONFIG_PPC64
495 cpu_nr = i;
496#else
497 cpu_nr = get_hard_smp_processor_id(i);
498#endif
499 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
500 tp = critirq_ctx[cpu_nr];
501 tp->cpu = cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500502 tp->preempt_count = 0;
503
504#ifdef CONFIG_BOOKE
Michael Ellermanca1769f2011-04-14 22:32:04 +0000505 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
506 tp = dbgirq_ctx[cpu_nr];
507 tp->cpu = cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500508 tp->preempt_count = 0;
509
Michael Ellermanca1769f2011-04-14 22:32:04 +0000510 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
511 tp = mcheckirq_ctx[cpu_nr];
512 tp->cpu = cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500513 tp->preempt_count = HARDIRQ_OFFSET;
514#endif
515 }
516}
517#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Andreas Mohr22722052006-06-23 02:05:30 -0700519struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
520struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522void irq_ctx_init(void)
523{
524 struct thread_info *tp;
525 int i;
526
KAMEZAWA Hiroyuki0e551952006-03-28 14:50:51 -0800527 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
529 tp = softirq_ctx[i];
530 tp->cpu = i;
Benjamin Herrenschmidte6768a42008-04-09 17:21:28 +1000531 tp->preempt_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
534 tp = hardirq_ctx[i];
535 tp->cpu = i;
536 tp->preempt_count = HARDIRQ_OFFSET;
537 }
538}
539
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100540static inline void do_softirq_onstack(void)
541{
542 struct thread_info *curtp, *irqtp;
Kumar Gala85218822008-04-28 16:21:22 +1000543 unsigned long saved_sp_limit = current->thread.ksp_limit;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100544
545 curtp = current_thread_info();
546 irqtp = softirq_ctx[smp_processor_id()];
547 irqtp->task = curtp->task;
Benjamin Herrenschmidt50d2a422011-07-18 17:17:22 +0000548 irqtp->flags = 0;
Kumar Gala85218822008-04-28 16:21:22 +1000549 current->thread.ksp_limit = (unsigned long)irqtp +
550 _ALIGN_UP(sizeof(struct thread_info), 16);
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100551 call_do_softirq(irqtp);
Kumar Gala85218822008-04-28 16:21:22 +1000552 current->thread.ksp_limit = saved_sp_limit;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100553 irqtp->task = NULL;
Benjamin Herrenschmidt50d2a422011-07-18 17:17:22 +0000554
555 /* Set any flag that may have been set on the
556 * alternate stack
557 */
558 if (irqtp->flags)
559 set_bits(irqtp->flags, &curtp->flags);
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100560}
561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562void do_softirq(void)
563{
564 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 if (in_interrupt())
567 return;
568
569 local_irq_save(flags);
570
Paul Mackerras829035fd2006-07-03 00:25:40 -0700571 if (local_softirq_pending())
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100572 do_softirq_onstack();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 local_irq_restore(flags);
575}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000577
578/*
579 * IRQ controller and virtual interrupts
580 */
581
Grant Likely476eb492011-05-04 15:02:15 +1000582/* The main irq map itself is an array of NR_IRQ entries containing the
583 * associate host and irq number. An entry with a host of NULL is free.
584 * An entry can be allocated if it's free, the allocator always then sets
585 * hwirq first to the host's invalid irq number and then fills ops.
586 */
587struct irq_map_entry {
588 irq_hw_number_t hwirq;
589 struct irq_host *host;
590};
591
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000592static LIST_HEAD(irq_hosts);
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000593static DEFINE_RAW_SPINLOCK(irq_big_lock);
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000594static DEFINE_MUTEX(revmap_trees_mutex);
Grant Likely476eb492011-05-04 15:02:15 +1000595static struct irq_map_entry irq_map[NR_IRQS];
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000596static unsigned int irq_virq_count = NR_IRQS;
597static struct irq_host *irq_default_host;
598
Grant Likely476eb492011-05-04 15:02:15 +1000599irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
600{
601 return irq_map[d->irq].hwirq;
602}
603EXPORT_SYMBOL_GPL(irqd_to_hwirq);
604
Olof Johansson35923f122007-06-04 14:47:04 +1000605irq_hw_number_t virq_to_hw(unsigned int virq)
606{
607 return irq_map[virq].hwirq;
608}
609EXPORT_SYMBOL_GPL(virq_to_hw);
610
Milton Miller3ee62d32011-05-10 19:30:36 +0000611bool virq_is_host(unsigned int virq, struct irq_host *host)
612{
613 return irq_map[virq].host == host;
614}
615EXPORT_SYMBOL_GPL(virq_is_host);
616
Michael Ellerman68158002007-08-28 18:47:55 +1000617static int default_irq_host_match(struct irq_host *h, struct device_node *np)
618{
619 return h->of_node != NULL && h->of_node == np;
620}
621
Stephen Rothwell5669c3c2007-10-02 13:37:53 +1000622struct irq_host *irq_alloc_host(struct device_node *of_node,
Michael Ellerman52964f82007-08-28 18:47:54 +1000623 unsigned int revmap_type,
624 unsigned int revmap_arg,
625 struct irq_host_ops *ops,
626 irq_hw_number_t inval_irq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000627{
628 struct irq_host *host;
629 unsigned int size = sizeof(struct irq_host);
630 unsigned int i;
631 unsigned int *rmap;
632 unsigned long flags;
633
634 /* Allocate structure and revmap table if using linear mapping */
635 if (revmap_type == IRQ_HOST_MAP_LINEAR)
636 size += revmap_arg * sizeof(unsigned int);
Milton Miller3af259d2011-05-10 19:29:53 +0000637 host = kzalloc(size, GFP_KERNEL);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000638 if (host == NULL)
639 return NULL;
640
641 /* Fill structure */
642 host->revmap_type = revmap_type;
643 host->inval_irq = inval_irq;
644 host->ops = ops;
Michael Ellerman19fc65b2008-05-26 12:12:32 +1000645 host->of_node = of_node_get(of_node);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000646
Michael Ellerman68158002007-08-28 18:47:55 +1000647 if (host->ops->match == NULL)
648 host->ops->match = default_irq_host_match;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000649
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000650 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000651
652 /* If it's a legacy controller, check for duplicates and
653 * mark it as allocated (we use irq 0 host pointer for that
654 */
655 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
656 if (irq_map[0].host != NULL) {
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000657 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Milton Miller3d1b5e22011-05-24 20:34:17 +0000658 of_node_put(host->of_node);
659 kfree(host);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000660 return NULL;
661 }
662 irq_map[0].host = host;
663 }
664
665 list_add(&host->link, &irq_hosts);
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000666 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000667
668 /* Additional setups per revmap type */
669 switch(revmap_type) {
670 case IRQ_HOST_MAP_LEGACY:
671 /* 0 is always the invalid number for legacy */
672 host->inval_irq = 0;
673 /* setup us as the host for all legacy interrupts */
674 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
Michael Ellerman78662912007-08-28 18:47:56 +1000675 irq_map[i].hwirq = i;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000676 smp_wmb();
677 irq_map[i].host = host;
678 smp_wmb();
679
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000680 /* Legacy flags are left to default at this point,
681 * one can then use irq_create_mapping() to
Jean Delvarec03983a2007-10-19 23:22:55 +0200682 * explicitly change them
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000683 */
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700684 ops->map(host, i, i);
Milton Miller41fb5e62011-05-10 19:30:44 +0000685
686 /* Clear norequest flags */
687 irq_clear_status_flags(i, IRQ_NOREQUEST);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000688 }
689 break;
690 case IRQ_HOST_MAP_LINEAR:
691 rmap = (unsigned int *)(host + 1);
692 for (i = 0; i < revmap_arg; i++)
Michael Ellermanf5921692007-06-01 17:23:26 +1000693 rmap[i] = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000694 host->revmap_data.linear.size = revmap_arg;
695 smp_wmb();
696 host->revmap_data.linear.revmap = rmap;
697 break;
Milton Miller3af259d2011-05-10 19:29:53 +0000698 case IRQ_HOST_MAP_TREE:
699 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
700 break;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000701 default:
702 break;
703 }
704
705 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
706
707 return host;
708}
709
710struct irq_host *irq_find_host(struct device_node *node)
711{
712 struct irq_host *h, *found = NULL;
713 unsigned long flags;
714
715 /* We might want to match the legacy controller last since
716 * it might potentially be set to match all interrupts in
717 * the absence of a device node. This isn't a problem so far
718 * yet though...
719 */
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000720 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000721 list_for_each_entry(h, &irq_hosts, link)
Michael Ellerman68158002007-08-28 18:47:55 +1000722 if (h->ops->match(h, node)) {
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000723 found = h;
724 break;
725 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +0000726 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000727 return found;
728}
729EXPORT_SYMBOL_GPL(irq_find_host);
730
731void irq_set_default_host(struct irq_host *host)
732{
733 pr_debug("irq: Default host set to @0x%p\n", host);
734
735 irq_default_host = host;
736}
737
738void irq_set_virq_count(unsigned int count)
739{
740 pr_debug("irq: Trying to set virq count to %d\n", count);
741
742 BUG_ON(count < NUM_ISA_INTERRUPTS);
743 if (count < NR_IRQS)
744 irq_virq_count = count;
745}
746
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000747static int irq_setup_virq(struct irq_host *host, unsigned int virq,
748 irq_hw_number_t hwirq)
749{
Thomas Gleixnera9d89462011-01-21 06:12:30 +0000750 int res;
Michael Ellermancd015702009-10-13 19:45:03 +0000751
Thomas Gleixnera9d89462011-01-21 06:12:30 +0000752 res = irq_alloc_desc_at(virq, 0);
753 if (res != virq) {
Michael Ellermancd015702009-10-13 19:45:03 +0000754 pr_debug("irq: -> allocating desc failed\n");
755 goto error;
756 }
757
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000758 /* map it */
759 smp_wmb();
760 irq_map[virq].hwirq = hwirq;
761 smp_mb();
762
763 if (host->ops->map(host, virq, hwirq)) {
764 pr_debug("irq: -> mapping failed, freeing\n");
Thomas Gleixnera9d89462011-01-21 06:12:30 +0000765 goto errdesc;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000766 }
767
Milton Miller41fb5e62011-05-10 19:30:44 +0000768 irq_clear_status_flags(virq, IRQ_NOREQUEST);
769
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000770 return 0;
Michael Ellermancd015702009-10-13 19:45:03 +0000771
Thomas Gleixnera9d89462011-01-21 06:12:30 +0000772errdesc:
773 irq_free_descs(virq, 1);
Michael Ellermancd015702009-10-13 19:45:03 +0000774error:
775 irq_free_virt(virq, 1);
776 return -1;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000777}
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +1000778
Michael Ellermanee51de52007-06-04 23:00:00 +1000779unsigned int irq_create_direct_mapping(struct irq_host *host)
780{
781 unsigned int virq;
782
783 if (host == NULL)
784 host = irq_default_host;
785
786 BUG_ON(host == NULL);
787 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
788
789 virq = irq_alloc_virt(host, 1, 0);
790 if (virq == NO_IRQ) {
791 pr_debug("irq: create_direct virq allocation failed\n");
792 return NO_IRQ;
793 }
794
795 pr_debug("irq: create_direct obtained virq %d\n", virq);
796
797 if (irq_setup_virq(host, virq, virq))
798 return NO_IRQ;
799
800 return virq;
801}
802
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000803unsigned int irq_create_mapping(struct irq_host *host,
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700804 irq_hw_number_t hwirq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000805{
806 unsigned int virq, hint;
807
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700808 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000809
810 /* Look for default host if nececssary */
811 if (host == NULL)
812 host = irq_default_host;
813 if (host == NULL) {
814 printk(KERN_WARNING "irq_create_mapping called for"
815 " NULL host, hwirq=%lx\n", hwirq);
816 WARN_ON(1);
817 return NO_IRQ;
818 }
819 pr_debug("irq: -> using host @%p\n", host);
820
Milton Miller8142f032011-05-24 20:34:18 +0000821 /* Check if mapping already exists */
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000822 virq = irq_find_mapping(host, hwirq);
Michael Ellermanf5921692007-06-01 17:23:26 +1000823 if (virq != NO_IRQ) {
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000824 pr_debug("irq: -> existing mapping on virq %d\n", virq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000825 return virq;
826 }
827
828 /* Get a virtual interrupt number */
829 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
830 /* Handle legacy */
831 virq = (unsigned int)hwirq;
832 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
833 return NO_IRQ;
834 return virq;
835 } else {
836 /* Allocate a virtual interrupt number */
837 hint = hwirq % irq_virq_count;
838 virq = irq_alloc_virt(host, 1, hint);
839 if (virq == NO_IRQ) {
840 pr_debug("irq: -> virq allocation failed\n");
841 return NO_IRQ;
842 }
843 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000844
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000845 if (irq_setup_virq(host, virq, hwirq))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000846 return NO_IRQ;
Michael Ellerman6fde40f2007-06-04 22:59:59 +1000847
Anton Blanchard88962932011-07-07 20:35:38 +0000848 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
Michael Ellermanc7d07fd2009-04-05 16:05:02 +0000849 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
850
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000851 return virq;
852}
853EXPORT_SYMBOL_GPL(irq_create_mapping);
854
Al Virof3d2ab42006-10-09 16:22:09 +0100855unsigned int irq_create_of_mapping(struct device_node *controller,
Roman Fietze40d50cf2009-12-08 02:39:50 +0000856 const u32 *intspec, unsigned int intsize)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000857{
858 struct irq_host *host;
859 irq_hw_number_t hwirq;
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700860 unsigned int type = IRQ_TYPE_NONE;
861 unsigned int virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000862
863 if (controller == NULL)
864 host = irq_default_host;
865 else
866 host = irq_find_host(controller);
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700867 if (host == NULL) {
868 printk(KERN_WARNING "irq: no irq host found for %s !\n",
869 controller->full_name);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000870 return NO_IRQ;
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700871 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000872
873 /* If host has no translation, then we assume interrupt line */
874 if (host->ops->xlate == NULL)
875 hwirq = intspec[0];
876 else {
877 if (host->ops->xlate(host, controller, intspec, intsize,
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700878 &hwirq, &type))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000879 return NO_IRQ;
880 }
881
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700882 /* Create mapping */
883 virq = irq_create_mapping(host, hwirq);
884 if (virq == NO_IRQ)
885 return virq;
886
887 /* Set type if specified and different than the current one */
888 if (type != IRQ_TYPE_NONE &&
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100889 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
Thomas Gleixnerec775d02011-03-25 16:45:20 +0100890 irq_set_irq_type(virq, type);
Benjamin Herrenschmidt6e99e452006-07-10 04:44:42 -0700891 return virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000892}
893EXPORT_SYMBOL_GPL(irq_create_of_mapping);
894
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000895void irq_dispose_mapping(unsigned int virq)
896{
Michael Ellerman5414c6b2006-10-24 13:37:34 +1000897 struct irq_host *host;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000898 irq_hw_number_t hwirq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000899
Michael Ellerman5414c6b2006-10-24 13:37:34 +1000900 if (virq == NO_IRQ)
901 return;
902
903 host = irq_map[virq].host;
Milton Miller2d441682011-05-10 19:29:57 +0000904 if (WARN_ON(host == NULL))
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000905 return;
906
907 /* Never unmap legacy interrupts */
908 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
909 return;
910
Milton Miller41fb5e62011-05-10 19:30:44 +0000911 irq_set_status_flags(virq, IRQ_NOREQUEST);
912
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000913 /* remove chip and handler */
Thomas Gleixnerec775d02011-03-25 16:45:20 +0100914 irq_set_chip_and_handler(virq, NULL, NULL);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000915
916 /* Make sure it's completed */
917 synchronize_irq(virq);
918
919 /* Tell the PIC about it */
920 if (host->ops->unmap)
921 host->ops->unmap(host, virq);
922 smp_mb();
923
924 /* Clear reverse map */
925 hwirq = irq_map[virq].hwirq;
926 switch(host->revmap_type) {
927 case IRQ_HOST_MAP_LINEAR:
928 if (hwirq < host->revmap_data.linear.size)
Michael Ellermanf5921692007-06-01 17:23:26 +1000929 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000930 break;
931 case IRQ_HOST_MAP_TREE:
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000932 mutex_lock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000933 radix_tree_delete(&host->revmap_data.tree, hwirq);
Sebastien Dugue150c6c82008-09-04 22:37:08 +1000934 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000935 break;
936 }
937
938 /* Destroy map */
939 smp_mb();
940 irq_map[virq].hwirq = host->inval_irq;
941
Thomas Gleixnera9d89462011-01-21 06:12:30 +0000942 irq_free_descs(virq, 1);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000943 /* Free it */
944 irq_free_virt(virq, 1);
945}
946EXPORT_SYMBOL_GPL(irq_dispose_mapping);
947
948unsigned int irq_find_mapping(struct irq_host *host,
949 irq_hw_number_t hwirq)
950{
951 unsigned int i;
952 unsigned int hint = hwirq % irq_virq_count;
953
954 /* Look for default host if nececssary */
955 if (host == NULL)
956 host = irq_default_host;
957 if (host == NULL)
958 return NO_IRQ;
959
960 /* legacy -> bail early */
961 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
962 return hwirq;
963
964 /* Slow path does a linear search of the map */
965 if (hint < NUM_ISA_INTERRUPTS)
966 hint = NUM_ISA_INTERRUPTS;
967 i = hint;
968 do {
969 if (irq_map[i].host == host &&
970 irq_map[i].hwirq == hwirq)
971 return i;
972 i++;
973 if (i >= irq_virq_count)
974 i = NUM_ISA_INTERRUPTS;
975 } while(i != hint);
976 return NO_IRQ;
977}
978EXPORT_SYMBOL_GPL(irq_find_mapping);
979
Stuart Yoder6ec36b52011-05-19 08:54:26 -0500980#ifdef CONFIG_SMP
981int irq_choose_cpu(const struct cpumask *mask)
982{
983 int cpuid;
984
985 if (cpumask_equal(mask, cpu_all_mask)) {
986 static int irq_rover;
987 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
988 unsigned long flags;
989
990 /* Round-robin distribution... */
991do_round_robin:
992 raw_spin_lock_irqsave(&irq_rover_lock, flags);
993
994 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
995 if (irq_rover >= nr_cpu_ids)
996 irq_rover = cpumask_first(cpu_online_mask);
997
998 cpuid = irq_rover;
999
1000 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
1001 } else {
1002 cpuid = cpumask_first_and(mask, cpu_online_mask);
1003 if (cpuid >= nr_cpu_ids)
1004 goto do_round_robin;
1005 }
1006
1007 return get_hard_smp_processor_id(cpuid);
1008}
1009#else
1010int irq_choose_cpu(const struct cpumask *mask)
1011{
1012 return hard_smp_processor_id();
1013}
1014#endif
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001015
Sebastien Dugue967e0122008-09-04 22:37:07 +10001016unsigned int irq_radix_revmap_lookup(struct irq_host *host,
1017 irq_hw_number_t hwirq)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001018{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001019 struct irq_map_entry *ptr;
1020 unsigned int virq;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001021
Milton Miller2d441682011-05-10 19:29:57 +00001022 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
1023 return irq_find_mapping(host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001024
Sebastien Dugue967e0122008-09-04 22:37:07 +10001025 /*
Milton Miller9b788252011-05-24 20:34:18 +00001026 * The ptr returned references the static global irq_map.
1027 * but freeing an irq can delete nodes along the path to
1028 * do the lookup via call_rcu.
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001029 */
Milton Miller9b788252011-05-24 20:34:18 +00001030 rcu_read_lock();
Sebastien Dugue967e0122008-09-04 22:37:07 +10001031 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
Milton Miller9b788252011-05-24 20:34:18 +00001032 rcu_read_unlock();
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +10001033
Sebastien Dugue967e0122008-09-04 22:37:07 +10001034 /*
1035 * If found in radix tree, then fine.
1036 * Else fallback to linear lookup - this should not happen in practice
1037 * as it means that we failed to insert the node in the radix tree.
1038 */
1039 if (ptr)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001040 virq = ptr - irq_map;
Sebastien Dugue967e0122008-09-04 22:37:07 +10001041 else
1042 virq = irq_find_mapping(host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001043
Sebastien Dugue967e0122008-09-04 22:37:07 +10001044 return virq;
1045}
1046
1047void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
1048 irq_hw_number_t hwirq)
1049{
Milton Miller2d441682011-05-10 19:29:57 +00001050 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
1051 return;
Sebastien Dugue967e0122008-09-04 22:37:07 +10001052
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +10001053 if (virq != NO_IRQ) {
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001054 mutex_lock(&revmap_trees_mutex);
Sebastien Dugue967e0122008-09-04 22:37:07 +10001055 radix_tree_insert(&host->revmap_data.tree, hwirq,
1056 &irq_map[virq]);
Sebastien Dugue150c6c82008-09-04 22:37:08 +10001057 mutex_unlock(&revmap_trees_mutex);
Benjamin Herrenschmidt8ec8f2e2006-08-28 11:17:37 +10001058 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001059}
1060
1061unsigned int irq_linear_revmap(struct irq_host *host,
1062 irq_hw_number_t hwirq)
1063{
1064 unsigned int *revmap;
1065
Milton Miller2d441682011-05-10 19:29:57 +00001066 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
1067 return irq_find_mapping(host, hwirq);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001068
1069 /* Check revmap bounds */
1070 if (unlikely(hwirq >= host->revmap_data.linear.size))
1071 return irq_find_mapping(host, hwirq);
1072
1073 /* Check if revmap was allocated */
1074 revmap = host->revmap_data.linear.revmap;
1075 if (unlikely(revmap == NULL))
1076 return irq_find_mapping(host, hwirq);
1077
1078 /* Fill up revmap with slow path if no mapping found */
1079 if (unlikely(revmap[hwirq] == NO_IRQ))
1080 revmap[hwirq] = irq_find_mapping(host, hwirq);
1081
1082 return revmap[hwirq];
1083}
1084
1085unsigned int irq_alloc_virt(struct irq_host *host,
1086 unsigned int count,
1087 unsigned int hint)
1088{
1089 unsigned long flags;
1090 unsigned int i, j, found = NO_IRQ;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001091
1092 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1093 return NO_IRQ;
1094
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001095 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001096
1097 /* Use hint for 1 interrupt if any */
1098 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1099 hint < irq_virq_count && irq_map[hint].host == NULL) {
1100 found = hint;
1101 goto hint_found;
1102 }
1103
1104 /* Look for count consecutive numbers in the allocatable
1105 * (non-legacy) space
1106 */
Michael Ellermane1251462006-08-02 10:48:50 +10001107 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1108 if (irq_map[i].host != NULL)
1109 j = 0;
1110 else
1111 j++;
1112
1113 if (j == count) {
1114 found = i - count + 1;
1115 break;
1116 }
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001117 }
1118 if (found == NO_IRQ) {
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001119 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001120 return NO_IRQ;
1121 }
1122 hint_found:
1123 for (i = found; i < (found + count); i++) {
1124 irq_map[i].hwirq = host->inval_irq;
1125 smp_wmb();
1126 irq_map[i].host = host;
1127 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001128 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001129 return found;
1130}
1131
1132void irq_free_virt(unsigned int virq, unsigned int count)
1133{
1134 unsigned long flags;
1135 unsigned int i;
1136
1137 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1138 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1139
Milton Miller4dd60292011-05-24 20:34:18 +00001140 if (virq < NUM_ISA_INTERRUPTS) {
1141 if (virq + count < NUM_ISA_INTERRUPTS)
1142 return;
1143 count =- NUM_ISA_INTERRUPTS - virq;
1144 virq = NUM_ISA_INTERRUPTS;
1145 }
1146
1147 if (count > irq_virq_count || virq > irq_virq_count - count) {
1148 if (virq > irq_virq_count)
1149 return;
1150 count = irq_virq_count - virq;
1151 }
1152
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001153 raw_spin_lock_irqsave(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001154 for (i = virq; i < (virq + count); i++) {
1155 struct irq_host *host;
1156
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001157 host = irq_map[i].host;
1158 irq_map[i].hwirq = host->inval_irq;
1159 smp_wmb();
1160 irq_map[i].host = NULL;
1161 }
Thomas Gleixnerf95e0852010-02-18 02:22:24 +00001162 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001163}
1164
Michael Ellermancd015702009-10-13 19:45:03 +00001165int arch_early_irq_init(void)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001166{
Michael Ellermancd015702009-10-13 19:45:03 +00001167 return 0;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +10001168}
1169
Michael Ellerman60b332e2007-08-28 18:47:57 +10001170#ifdef CONFIG_VIRQ_DEBUG
1171static int virq_debug_show(struct seq_file *m, void *private)
1172{
1173 unsigned long flags;
Thomas Gleixner97f7d6b2009-03-10 14:45:54 +00001174 struct irq_desc *desc;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001175 const char *p;
Joe Perches4e74fd72010-09-13 09:47:40 +00001176 static const char none[] = "none";
Michael Ellerman73706c32011-04-10 20:26:15 +00001177 void *data;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001178 int i;
1179
Michael Ellerman73706c32011-04-10 20:26:15 +00001180 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1181 "chip name", "chip data", "host name");
Michael Ellerman60b332e2007-08-28 18:47:57 +10001182
Michael Ellerman76f1d942009-10-13 19:44:56 +00001183 for (i = 1; i < nr_irqs; i++) {
Michael Ellerman6cff46f2009-10-13 19:44:51 +00001184 desc = irq_to_desc(i);
Michael Ellerman76f1d942009-10-13 19:44:56 +00001185 if (!desc)
1186 continue;
1187
Thomas Gleixner239007b2009-11-17 16:46:45 +01001188 raw_spin_lock_irqsave(&desc->lock, flags);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001189
1190 if (desc->action && desc->action->handler) {
Lennert Buytenheke1180282011-03-07 14:00:20 +00001191 struct irq_chip *chip;
1192
Michael Ellerman60b332e2007-08-28 18:47:57 +10001193 seq_printf(m, "%5d ", i);
Grant Likely476eb492011-05-04 15:02:15 +10001194 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001195
Thomas Gleixnerec775d02011-03-25 16:45:20 +01001196 chip = irq_desc_get_chip(desc);
Lennert Buytenheke1180282011-03-07 14:00:20 +00001197 if (chip && chip->name)
1198 p = chip->name;
Michael Ellerman60b332e2007-08-28 18:47:57 +10001199 else
1200 p = none;
1201 seq_printf(m, "%-15s ", p);
1202
Michael Ellerman73706c32011-04-10 20:26:15 +00001203 data = irq_desc_get_chip_data(desc);
1204 seq_printf(m, "0x%16p ", data);
1205
Michael Ellerman60b332e2007-08-28 18:47:57 +10001206 if (irq_map[i].host && irq_map[i].host->of_node)
1207 p = irq_map[i].host->of_node->full_name;
1208 else
1209 p = none;
1210 seq_printf(m, "%s\n", p);
1211 }
1212
Thomas Gleixner239007b2009-11-17 16:46:45 +01001213 raw_spin_unlock_irqrestore(&desc->lock, flags);
Michael Ellerman60b332e2007-08-28 18:47:57 +10001214 }
1215
1216 return 0;
1217}
1218
1219static int virq_debug_open(struct inode *inode, struct file *file)
1220{
1221 return single_open(file, virq_debug_show, inode->i_private);
1222}
1223
1224static const struct file_operations virq_debug_fops = {
1225 .open = virq_debug_open,
1226 .read = seq_read,
1227 .llseek = seq_lseek,
1228 .release = single_release,
1229};
1230
1231static int __init irq_debugfs_init(void)
1232{
1233 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
Emil Medve476ff8a2008-05-23 05:49:22 +10001234 NULL, &virq_debug_fops) == NULL)
Michael Ellerman60b332e2007-08-28 18:47:57 +10001235 return -ENOMEM;
1236
1237 return 0;
1238}
1239__initcall(irq_debugfs_init);
1240#endif /* CONFIG_VIRQ_DEBUG */
1241
Paul Mackerrasc6622f62006-02-24 10:06:59 +11001242#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243static int __init setup_noirqdistrib(char *str)
1244{
1245 distribute_irqs = 0;
1246 return 1;
1247}
1248
1249__setup("noirqdistrib", setup_noirqdistrib);
Stephen Rothwell756e7102005-11-09 18:07:45 +11001250#endif /* CONFIG_PPC64 */