blob: eb16e3b8a2dd3c80eed85733b6bb14d791b98131 [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -070010#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070023#include <linux/ftrace.h>
David S. Millere18e2a02006-06-20 01:23:32 -070024#include <linux/irq.h>
Frederic Weisbecker2e2dc1d2010-04-13 14:28:24 -070025#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27#include <asm/ptrace.h>
28#include <asm/processor.h>
29#include <asm/atomic.h>
30#include <asm/system.h>
31#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070032#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070036#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/timer.h>
38#include <asm/smp.h>
39#include <asm/starfire.h>
40#include <asm/uaccess.h>
41#include <asm/cache.h>
42#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070043#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080044#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070045#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070046#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David S. Millerd91aa122008-03-26 00:37:51 -070048#include "entry.h"
Hong H. Pham280ff972009-06-04 02:10:11 -070049#include "cpumap.h"
David S. Millerec687882010-04-14 02:04:29 -070050#include "kstack.h"
David S. Millere18e2a02006-06-20 01:23:32 -070051
52#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070053
David S. Miller10397e42007-10-13 21:43:31 -070054struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070055unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller42d5f992007-10-13 23:03:21 -070057/* On several sun4u processors, it is illegal to mix bypass and
58 * non-bypass accesses. Therefore we access all INO buckets
59 * using bypass accesses only.
60 */
61static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
62{
63 unsigned long ret;
64
65 __asm__ __volatile__("ldxa [%1] %2, %0"
66 : "=&r" (ret)
67 : "r" (bucket_pa +
68 offsetof(struct ino_bucket,
69 __irq_chain_pa)),
70 "i" (ASI_PHYS_USE_EC));
71
72 return ret;
73}
74
75static void bucket_clear_chain_pa(unsigned long bucket_pa)
76{
77 __asm__ __volatile__("stxa %%g0, [%0] %1"
78 : /* no outputs */
79 : "r" (bucket_pa +
80 offsetof(struct ino_bucket,
81 __irq_chain_pa)),
82 "i" (ASI_PHYS_USE_EC));
83}
84
Sam Ravnborgfe414932011-01-22 11:32:19 +000085static unsigned int bucket_get_irq(unsigned long bucket_pa)
David S. Miller42d5f992007-10-13 23:03:21 -070086{
87 unsigned int ret;
88
89 __asm__ __volatile__("lduwa [%1] %2, %0"
90 : "=&r" (ret)
91 : "r" (bucket_pa +
92 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +000093 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -070094 "i" (ASI_PHYS_USE_EC));
95
96 return ret;
97}
98
Sam Ravnborgfe414932011-01-22 11:32:19 +000099static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
David S. Miller42d5f992007-10-13 23:03:21 -0700100{
101 __asm__ __volatile__("stwa %0, [%1] %2"
102 : /* no outputs */
Sam Ravnborgfe414932011-01-22 11:32:19 +0000103 : "r" (irq),
David S. Miller42d5f992007-10-13 23:03:21 -0700104 "r" (bucket_pa +
105 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +0000106 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -0700107 "i" (ASI_PHYS_USE_EC));
108}
109
David S. Millereb2d8d62007-10-13 21:42:46 -0700110#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
David S. Miller93b32382007-07-20 02:58:28 -0700112static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700113 unsigned int dev_handle;
114 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700115 unsigned int in_use;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000116} irq_table[NR_IRQS];
117static DEFINE_SPINLOCK(irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700118
Sam Ravnborgfe414932011-01-22 11:32:19 +0000119unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700120{
David S. Miller759f89e2007-10-11 03:16:13 -0700121 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700122 unsigned char ent;
123
124 BUILD_BUG_ON(NR_IRQS >= 256);
125
Sam Ravnborgfe414932011-01-22 11:32:19 +0000126 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700127
David S. Miller35a17eb2007-02-10 17:41:02 -0800128 for (ent = 1; ent < NR_IRQS; ent++) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000129 if (!irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800130 break;
131 }
David S. Miller8047e242006-06-20 01:22:35 -0700132 if (ent >= NR_IRQS) {
133 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700134 ent = 0;
135 } else {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000136 irq_table[ent].dev_handle = dev_handle;
137 irq_table[ent].dev_ino = dev_ino;
138 irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700139 }
140
Sam Ravnborgfe414932011-01-22 11:32:19 +0000141 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700142
143 return ent;
144}
145
David S. Miller5746c992007-02-20 01:26:48 -0800146#ifdef CONFIG_PCI_MSI
Sam Ravnborgfe414932011-01-22 11:32:19 +0000147void irq_free(unsigned int irq)
David S. Miller8047e242006-06-20 01:22:35 -0700148{
David S. Miller759f89e2007-10-11 03:16:13 -0700149 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700150
Sam Ravnborgfe414932011-01-22 11:32:19 +0000151 if (irq >= NR_IRQS)
David S. Miller35a17eb2007-02-10 17:41:02 -0800152 return;
153
Sam Ravnborgfe414932011-01-22 11:32:19 +0000154 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700155
Sam Ravnborgfe414932011-01-22 11:32:19 +0000156 irq_table[irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800157
Sam Ravnborgfe414932011-01-22 11:32:19 +0000158 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700159}
David S. Miller5746c992007-02-20 01:26:48 -0800160#endif
David S. Miller8047e242006-06-20 01:22:35 -0700161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
David S. Millere18e2a02006-06-20 01:23:32 -0700163 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166int show_interrupts(struct seq_file *p, void *v)
167{
David S. Millere18e2a02006-06-20 01:23:32 -0700168 int i = *(loff_t *) v, j;
169 struct irqaction * action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
David S. Millere18e2a02006-06-20 01:23:32 -0700172 if (i == 0) {
173 seq_printf(p, " ");
174 for_each_online_cpu(j)
175 seq_printf(p, "CPU%d ",j);
176 seq_putc(p, '\n');
177 }
178
179 if (i < NR_IRQS) {
Thomas Gleixner239007b2009-11-17 16:46:45 +0100180 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
David S. Millere18e2a02006-06-20 01:23:32 -0700181 action = irq_desc[i].action;
182 if (!action)
183 goto skip;
184 seq_printf(p, "%3d: ",i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185#ifndef CONFIG_SMP
186 seq_printf(p, "%10u ", kstat_irqs(i));
187#else
David S. Millere18e2a02006-06-20 01:23:32 -0700188 for_each_online_cpu(j)
David Millere81838d2009-01-21 17:15:53 -0800189 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#endif
Sam Ravnborg9f2264a2011-01-22 11:32:14 +0000191 seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
David S. Millere18e2a02006-06-20 01:23:32 -0700192 seq_printf(p, " %s", action->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
David S. Millere18e2a02006-06-20 01:23:32 -0700194 for (action=action->next; action; action = action->next)
195 seq_printf(p, ", %s", action->name);
196
197 seq_putc(p, '\n');
198skip:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100199 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
David S. Millere5553a62009-01-29 21:22:47 -0800200 } else if (i == NR_IRQS) {
201 seq_printf(p, "NMI: ");
202 for_each_online_cpu(j)
203 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
204 seq_printf(p, " Non-maskable interrupts\n");
David S. Millere18e2a02006-06-20 01:23:32 -0700205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 return 0;
207}
208
David S. Millerebd8c562006-02-17 08:38:06 -0800209static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
210{
211 unsigned int tid;
212
213 if (this_is_starfire) {
214 tid = starfire_translate(imap, cpuid);
215 tid <<= IMAP_TID_SHIFT;
216 tid &= IMAP_TID_UPA;
217 } else {
218 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
219 unsigned long ver;
220
221 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
222 if ((ver >> 32UL) == __JALAPENO_ID ||
223 (ver >> 32UL) == __SERRANO_ID) {
224 tid = cpuid << IMAP_TID_SHIFT;
225 tid &= IMAP_TID_JBUS;
226 } else {
227 unsigned int a = cpuid & 0x1f;
228 unsigned int n = (cpuid >> 5) & 0x1f;
229
230 tid = ((a << IMAP_AID_SHIFT) |
231 (n << IMAP_NID_SHIFT));
232 tid &= (IMAP_AID_SAFARI |
Joe Perchesa419aef2009-08-18 11:18:35 -0700233 IMAP_NID_SAFARI);
David S. Millerebd8c562006-02-17 08:38:06 -0800234 }
235 } else {
236 tid = cpuid << IMAP_TID_SHIFT;
237 tid &= IMAP_TID_UPA;
238 }
239 }
240
241 return tid;
242}
243
David S. Millere18e2a02006-06-20 01:23:32 -0700244struct irq_handler_data {
245 unsigned long iclr;
246 unsigned long imap;
247
248 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700249 void *arg1;
250 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700251};
252
David S. Millere18e2a02006-06-20 01:23:32 -0700253#ifdef CONFIG_SMP
Sam Ravnborgfe414932011-01-22 11:32:19 +0000254static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
David S. Millere18e2a02006-06-20 01:23:32 -0700255{
Mike Travise65e49d2009-01-12 15:27:13 -0800256 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700257 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
David S. Miller1091ce62010-01-20 19:30:49 -0800259 cpumask_copy(&mask, affinity);
Hong H. Pham280ff972009-06-04 02:10:11 -0700260 if (cpus_equal(mask, cpu_online_map)) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000261 cpuid = map_to_cpu(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700262 } else {
263 cpumask_t tmp;
264
265 cpus_and(tmp, cpu_online_map, mask);
Sam Ravnborgfe414932011-01-22 11:32:19 +0000266 cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
David S. Millere18e2a02006-06-20 01:23:32 -0700267 }
268
269 return cpuid;
270}
271#else
Sam Ravnborgfe414932011-01-22 11:32:19 +0000272#define irq_choose_cpu(irq, affinity) \
David S. Miller6abce772010-01-26 04:16:49 -0800273 real_hard_smp_processor_id()
David S. Millere18e2a02006-06-20 01:23:32 -0700274#endif
275
Sam Ravnborg4832b992011-01-22 11:32:18 +0000276static void sun4u_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700277{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000278 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700279
Sam Ravnborgcae787282011-01-22 11:32:16 +0000280 if (likely(handler_data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700281 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700282 unsigned int tid;
283
Sam Ravnborg4832b992011-01-22 11:32:18 +0000284 cpuid = irq_choose_cpu(data->irq, data->affinity);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000285 imap = handler_data->imap;
David S. Millere18e2a02006-06-20 01:23:32 -0700286
287 tid = sun4u_compute_tid(imap, cpuid);
288
David S. Miller861fe902007-05-02 17:31:36 -0700289 val = upa_readq(imap);
290 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
291 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
292 val |= tid | IMAP_VALID;
293 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000294 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700295 }
296}
297
Sam Ravnborg4832b992011-01-22 11:32:18 +0000298static int sun4u_set_affinity(struct irq_data *data,
299 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700300{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000301 struct irq_handler_data *handler_data = data->handler_data;
David S. Miller1091ce62010-01-20 19:30:49 -0800302
Sam Ravnborgcae787282011-01-22 11:32:16 +0000303 if (likely(handler_data)) {
David S. Miller1091ce62010-01-20 19:30:49 -0800304 unsigned long cpuid, imap, val;
305 unsigned int tid;
306
Sam Ravnborg4832b992011-01-22 11:32:18 +0000307 cpuid = irq_choose_cpu(data->irq, mask);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000308 imap = handler_data->imap;
David S. Miller1091ce62010-01-20 19:30:49 -0800309
310 tid = sun4u_compute_tid(imap, cpuid);
311
312 val = upa_readq(imap);
313 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
314 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
315 val |= tid | IMAP_VALID;
316 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000317 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Miller1091ce62010-01-20 19:30:49 -0800318 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700319
320 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700321}
322
David S. Millerd0cac392009-03-04 14:43:47 -0800323/* Don't do anything. The desc->status check for IRQ_DISABLED in
324 * handler_irq() will skip the handler call and that will leave the
325 * interrupt in the sent state. The next ->enable() call will hit the
326 * ICLR register to reset the state machine.
327 *
328 * This scheme is necessary, instead of clearing the Valid bit in the
329 * IMAP register, to handle the case of IMAP registers being shared by
330 * multiple INOs (and thus ICLR registers). Since we use a different
331 * virtual IRQ for each shared IMAP instance, the generic code thinks
332 * there is only one user so it prematurely calls ->disable() on
333 * free_irq().
334 *
335 * We have to provide an explicit ->disable() method instead of using
336 * NULL to get the default. The reason is that if the generic code
337 * sees that, it also hooks up a default ->shutdown method which
338 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
339 */
Sam Ravnborg4832b992011-01-22 11:32:18 +0000340static void sun4u_irq_disable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700341{
David S. Millere18e2a02006-06-20 01:23:32 -0700342}
343
Sam Ravnborg4832b992011-01-22 11:32:18 +0000344static void sun4u_irq_eoi(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700345{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000346 struct irq_handler_data *handler_data = data->handler_data;
347 struct irq_desc *desc = irq_desc + data->irq;
David S. Miller5a606b72007-07-09 22:40:36 -0700348
349 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
350 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700351
Sam Ravnborgcae787282011-01-22 11:32:16 +0000352 if (likely(handler_data))
353 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700354}
355
Sam Ravnborg4832b992011-01-22 11:32:18 +0000356static void sun4v_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700357{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000358 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000359 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller771823002007-10-13 23:41:28 -0700360 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700361
David S. Miller771823002007-10-13 23:41:28 -0700362 err = sun4v_intr_settarget(ino, cpuid);
363 if (err != HV_EOK)
364 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
365 "err(%d)\n", ino, cpuid, err);
366 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
367 if (err != HV_EOK)
368 printk(KERN_ERR "sun4v_intr_setstate(%x): "
369 "err(%d)\n", ino, err);
370 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
371 if (err != HV_EOK)
372 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
373 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374}
375
Sam Ravnborg4832b992011-01-22 11:32:18 +0000376static int sun4v_set_affinity(struct irq_data *data,
377 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700378{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000379 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000380 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
David S. Miller771823002007-10-13 23:41:28 -0700381 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700382
David S. Miller771823002007-10-13 23:41:28 -0700383 err = sun4v_intr_settarget(ino, cpuid);
384 if (err != HV_EOK)
385 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
386 "err(%d)\n", ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700387
388 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700389}
390
Sam Ravnborg4832b992011-01-22 11:32:18 +0000391static void sun4v_irq_disable(struct irq_data *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000393 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700394 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
David S. Miller771823002007-10-13 23:41:28 -0700396 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
397 if (err != HV_EOK)
398 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
399 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
Sam Ravnborg4832b992011-01-22 11:32:18 +0000402static void sun4v_irq_eoi(struct irq_data *data)
David S. Miller088dd1f2005-07-04 13:24:38 -0700403{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000404 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000405 struct irq_desc *desc = irq_desc + data->irq;
David S. Miller771823002007-10-13 23:41:28 -0700406 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700407
408 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
409 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700410
David S. Miller771823002007-10-13 23:41:28 -0700411 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
412 if (err != HV_EOK)
413 printk(KERN_ERR "sun4v_intr_setstate(%x): "
414 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700415}
416
Sam Ravnborg4832b992011-01-22 11:32:18 +0000417static void sun4v_virq_enable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700418{
David S. Miller771823002007-10-13 23:41:28 -0700419 unsigned long cpuid, dev_handle, dev_ino;
420 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700421
Sam Ravnborg4832b992011-01-22 11:32:18 +0000422 cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller4a907de2007-06-13 00:01:04 -0700423
Sam Ravnborgfe414932011-01-22 11:32:19 +0000424 dev_handle = irq_table[data->irq].dev_handle;
425 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700426
David S. Miller771823002007-10-13 23:41:28 -0700427 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
428 if (err != HV_EOK)
429 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
430 "err(%d)\n",
431 dev_handle, dev_ino, cpuid, err);
432 err = sun4v_vintr_set_state(dev_handle, dev_ino,
433 HV_INTR_STATE_IDLE);
434 if (err != HV_EOK)
435 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
436 "HV_INTR_STATE_IDLE): err(%d)\n",
437 dev_handle, dev_ino, err);
438 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
439 HV_INTR_ENABLED);
440 if (err != HV_EOK)
441 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
442 "HV_INTR_ENABLED): err(%d)\n",
443 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700444}
445
Sam Ravnborg4832b992011-01-22 11:32:18 +0000446static int sun4v_virt_set_affinity(struct irq_data *data,
447 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700448{
David S. Miller771823002007-10-13 23:41:28 -0700449 unsigned long cpuid, dev_handle, dev_ino;
450 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700451
Sam Ravnborg4832b992011-01-22 11:32:18 +0000452 cpuid = irq_choose_cpu(data->irq, mask);
David S. Millerb53bcb62007-07-14 03:16:13 -0700453
Sam Ravnborgfe414932011-01-22 11:32:19 +0000454 dev_handle = irq_table[data->irq].dev_handle;
455 dev_ino = irq_table[data->irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700456
David S. Miller771823002007-10-13 23:41:28 -0700457 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
458 if (err != HV_EOK)
459 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
460 "err(%d)\n",
461 dev_handle, dev_ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700462
463 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700464}
465
Sam Ravnborg4832b992011-01-22 11:32:18 +0000466static void sun4v_virq_disable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700467{
David S. Miller771823002007-10-13 23:41:28 -0700468 unsigned long dev_handle, dev_ino;
469 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700470
Sam Ravnborgfe414932011-01-22 11:32:19 +0000471 dev_handle = irq_table[data->irq].dev_handle;
472 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700473
David S. Miller771823002007-10-13 23:41:28 -0700474 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
475 HV_INTR_DISABLED);
476 if (err != HV_EOK)
477 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
478 "HV_INTR_DISABLED): err(%d)\n",
479 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700480}
481
Sam Ravnborg4832b992011-01-22 11:32:18 +0000482static void sun4v_virq_eoi(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700483{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000484 struct irq_desc *desc = irq_desc + data->irq;
David S. Miller771823002007-10-13 23:41:28 -0700485 unsigned long dev_handle, dev_ino;
486 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700487
488 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
489 return;
David S. Miller4a907de2007-06-13 00:01:04 -0700490
Sam Ravnborgfe414932011-01-22 11:32:19 +0000491 dev_handle = irq_table[data->irq].dev_handle;
492 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700493
David S. Miller771823002007-10-13 23:41:28 -0700494 err = sun4v_vintr_set_state(dev_handle, dev_ino,
495 HV_INTR_STATE_IDLE);
496 if (err != HV_EOK)
497 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
498 "HV_INTR_STATE_IDLE): err(%d)\n",
499 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700500}
501
David S. Miller729e7d72006-12-12 00:59:12 -0800502static struct irq_chip sun4u_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000503 .name = "sun4u",
504 .irq_enable = sun4u_irq_enable,
505 .irq_disable = sun4u_irq_disable,
506 .irq_eoi = sun4u_irq_eoi,
507 .irq_set_affinity = sun4u_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700508};
509
David S. Miller729e7d72006-12-12 00:59:12 -0800510static struct irq_chip sun4v_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000511 .name = "sun4v",
512 .irq_enable = sun4v_irq_enable,
513 .irq_disable = sun4v_irq_disable,
514 .irq_eoi = sun4v_irq_eoi,
515 .irq_set_affinity = sun4v_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700516};
517
David S. Miller4a907de2007-06-13 00:01:04 -0700518static struct irq_chip sun4v_virq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000519 .name = "vsun4v",
520 .irq_enable = sun4v_virq_enable,
521 .irq_disable = sun4v_virq_disable,
522 .irq_eoi = sun4v_virq_eoi,
523 .irq_set_affinity = sun4v_virt_set_affinity,
David S. Miller4a907de2007-06-13 00:01:04 -0700524};
525
Sam Ravnborgfe414932011-01-22 11:32:19 +0000526static void pre_flow_handler(unsigned int irq, struct irq_desc *desc)
David S. Miller8d57d3a2007-10-22 02:16:45 -0700527{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000528 struct irq_handler_data *handler_data = get_irq_data(irq);
529 unsigned int ino = irq_table[irq].dev_ino;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700530
Sam Ravnborgcae787282011-01-22 11:32:16 +0000531 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700532
Sam Ravnborgfe414932011-01-22 11:32:19 +0000533 handle_fasteoi_irq(irq, desc);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700534}
535
Sam Ravnborgfe414932011-01-22 11:32:19 +0000536void irq_install_pre_handler(int irq,
David S. Millere18e2a02006-06-20 01:23:32 -0700537 void (*func)(unsigned int, void *, void *),
538 void *arg1, void *arg2)
539{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000540 struct irq_handler_data *handler_data = get_irq_data(irq);
541 struct irq_desc *desc = irq_desc + irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700542
Sam Ravnborgcae787282011-01-22 11:32:16 +0000543 handler_data->pre_handler = func;
544 handler_data->arg1 = arg1;
545 handler_data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700546
David S. Miller8d57d3a2007-10-22 02:16:45 -0700547 desc->handle_irq = pre_flow_handler;
David S. Millere18e2a02006-06-20 01:23:32 -0700548}
549
550unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551{
552 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000553 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000554 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 int ino;
556
David S. Miller10951ee2006-02-13 18:22:57 -0800557 BUG_ON(tlb_type == hypervisor);
558
David S. Miller861fe902007-05-02 17:31:36 -0700559 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700560 bucket = &ivector_table[ino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000561 irq = bucket_get_irq(__pa(bucket));
562 if (!irq) {
563 irq = irq_alloc(0, ino);
564 bucket_set_irq(__pa(bucket), irq);
565 set_irq_chip_and_handler_name(irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700566 &sun4u_irq,
567 handle_fasteoi_irq,
568 "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700569 }
570
Sam Ravnborgfe414932011-01-22 11:32:19 +0000571 handler_data = get_irq_data(irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000572 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700573 goto out;
574
Sam Ravnborgcae787282011-01-22 11:32:16 +0000575 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
576 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700577 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700578 prom_halt();
579 }
Sam Ravnborgfe414932011-01-22 11:32:19 +0000580 set_irq_data(irq, handler_data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700581
Sam Ravnborgcae787282011-01-22 11:32:16 +0000582 handler_data->imap = imap;
583 handler_data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
David S. Miller088dd1f2005-07-04 13:24:38 -0700585out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000586 return irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
David S. Miller4a907de2007-06-13 00:01:04 -0700589static unsigned int sun4v_build_common(unsigned long sysino,
590 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800591{
592 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000593 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000594 unsigned int irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700595
596 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800597
David S. Millere3999572006-02-13 18:16:10 -0800598 bucket = &ivector_table[sysino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000599 irq = bucket_get_irq(__pa(bucket));
600 if (!irq) {
601 irq = irq_alloc(0, sysino);
602 bucket_set_irq(__pa(bucket), irq);
603 set_irq_chip_and_handler_name(irq, chip,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700604 handle_fasteoi_irq,
605 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700606 }
607
Sam Ravnborgfe414932011-01-22 11:32:19 +0000608 handler_data = get_irq_data(irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000609 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700610 goto out;
611
Sam Ravnborgcae787282011-01-22 11:32:16 +0000612 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
613 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700614 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
615 prom_halt();
616 }
Sam Ravnborgfe414932011-01-22 11:32:19 +0000617 set_irq_data(irq, handler_data);
David S. Millere3999572006-02-13 18:16:10 -0800618
619 /* Catch accidental accesses to these things. IMAP/ICLR handling
620 * is done by hypervisor calls on sun4v platforms, not by direct
621 * register accesses.
622 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000623 handler_data->imap = ~0UL;
624 handler_data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800625
David S. Millere18e2a02006-06-20 01:23:32 -0700626out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000627 return irq;
David S. Millere3999572006-02-13 18:16:10 -0800628}
629
David S. Miller4a907de2007-06-13 00:01:04 -0700630unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
631{
632 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
633
634 return sun4v_build_common(sysino, &sun4v_irq);
635}
636
637unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
638{
Sam Ravnborgcae787282011-01-22 11:32:16 +0000639 struct irq_handler_data *handler_data;
David S. Millerb80e6992007-10-13 21:51:37 -0700640 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700641 struct ino_bucket *bucket;
642 struct irq_desc *desc;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000643 unsigned int irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700644
David S. Millerb80e6992007-10-13 21:51:37 -0700645 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
646 if (unlikely(!bucket))
647 return 0;
David S. Miller25ad4032010-04-10 20:24:22 -0700648
649 /* The only reference we store to the IRQ bucket is
650 * by physical address which kmemleak can't see, tell
651 * it that this object explicitly is not a leak and
652 * should be scanned.
653 */
654 kmemleak_not_leak(bucket);
655
David S. Miller42d5f992007-10-13 23:03:21 -0700656 __flush_dcache_range((unsigned long) bucket,
657 ((unsigned long) bucket +
658 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700659
Sam Ravnborgfe414932011-01-22 11:32:19 +0000660 irq = irq_alloc(devhandle, devino);
661 bucket_set_irq(__pa(bucket), irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700662
Sam Ravnborgfe414932011-01-22 11:32:19 +0000663 set_irq_chip_and_handler_name(irq, &sun4v_virq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700664 handle_fasteoi_irq,
665 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700666
Sam Ravnborgcae787282011-01-22 11:32:16 +0000667 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
668 if (unlikely(!handler_data))
David S. Millerb80e6992007-10-13 21:51:37 -0700669 return 0;
670
David S. Millerb7c2a752008-07-22 22:34:29 -0700671 /* In order to make the LDC channel startup sequence easier,
672 * especially wrt. locking, we do not let request_irq() enable
673 * the interrupt.
674 */
Sam Ravnborgfe414932011-01-22 11:32:19 +0000675 desc = irq_desc + irq;
David S. Millerb7c2a752008-07-22 22:34:29 -0700676 desc->status |= IRQ_NOAUTOEN;
677
Sam Ravnborgfe414932011-01-22 11:32:19 +0000678 set_irq_data(irq, handler_data);
David S. Millerb80e6992007-10-13 21:51:37 -0700679
680 /* Catch accidental accesses to these things. IMAP/ICLR handling
681 * is done by hypervisor calls on sun4v platforms, not by direct
682 * register accesses.
683 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000684 handler_data->imap = ~0UL;
685 handler_data->iclr = ~0UL;
David S. Millerb80e6992007-10-13 21:51:37 -0700686
687 cookie = ~__pa(bucket);
688 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700689 if (hv_err) {
690 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
691 "err=%lu\n", devhandle, devino, hv_err);
692 prom_halt();
693 }
694
Sam Ravnborgfe414932011-01-22 11:32:19 +0000695 return irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700696}
697
Sam Ravnborgfe414932011-01-22 11:32:19 +0000698void ack_bad_irq(unsigned int irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700699{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000700 unsigned int ino = irq_table[irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700701
David S. Miller771823002007-10-13 23:41:28 -0700702 if (!ino)
703 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700704
Sam Ravnborgfe414932011-01-22 11:32:19 +0000705 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
706 ino, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
David S. Miller4f70f7a2008-08-12 18:33:56 -0700709void *hardirq_stack[NR_CPUS];
710void *softirq_stack[NR_CPUS];
711
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000712void __irq_entry handler_irq(int pil, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713{
David S. Millereb2d8d62007-10-13 21:42:46 -0700714 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400715 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700716 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000718 clear_softint(1 << pil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
Al Viro6d24c8d2006-10-08 08:23:28 -0400720 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
David S. Millera650d382007-10-12 02:59:40 -0700723 /* Grab an atomic snapshot of the pending IVECs. */
724 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
725 "wrpr %0, %3, %%pstate\n\t"
726 "ldx [%2], %1\n\t"
727 "stx %%g0, [%2]\n\t"
728 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700729 : "=&r" (pstate), "=&r" (bucket_pa)
730 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700731 "i" (PSTATE_IE)
732 : "memory");
733
David S. Miller4f70f7a2008-08-12 18:33:56 -0700734 orig_sp = set_hardirq_stack();
735
David S. Millereb2d8d62007-10-13 21:42:46 -0700736 while (bucket_pa) {
David S. Miller8d57d3a2007-10-22 02:16:45 -0700737 struct irq_desc *desc;
David S. Millereb2d8d62007-10-13 21:42:46 -0700738 unsigned long next_pa;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000739 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
David S. Miller42d5f992007-10-13 23:03:21 -0700741 next_pa = bucket_get_chain_pa(bucket_pa);
Sam Ravnborgfe414932011-01-22 11:32:19 +0000742 irq = bucket_get_irq(bucket_pa);
David S. Miller42d5f992007-10-13 23:03:21 -0700743 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700744
Sam Ravnborgfe414932011-01-22 11:32:19 +0000745 desc = irq_desc + irq;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700746
David S. Millerd0cac392009-03-04 14:43:47 -0800747 if (!(desc->status & IRQ_DISABLED))
Sam Ravnborgfe414932011-01-22 11:32:19 +0000748 desc->handle_irq(irq, desc);
David S. Millereb2d8d62007-10-13 21:42:46 -0700749
750 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 }
David S. Millere18e2a02006-06-20 01:23:32 -0700752
David S. Miller4f70f7a2008-08-12 18:33:56 -0700753 restore_hardirq_stack(orig_sp);
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400756 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
David S. Miller4f70f7a2008-08-12 18:33:56 -0700759void do_softirq(void)
760{
761 unsigned long flags;
762
763 if (in_interrupt())
764 return;
765
766 local_irq_save(flags);
767
768 if (local_softirq_pending()) {
769 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
770
771 sp += THREAD_SIZE - 192 - STACK_BIAS;
772
773 __asm__ __volatile__("mov %%sp, %0\n\t"
774 "mov %1, %%sp"
775 : "=&r" (orig_sp)
776 : "r" (sp));
777 __do_softirq();
778 __asm__ __volatile__("mov %0, %%sp"
779 : : "r" (orig_sp));
780 }
781
782 local_irq_restore(flags);
783}
784
David S. Millere0204402007-07-16 03:49:40 -0700785#ifdef CONFIG_HOTPLUG_CPU
786void fixup_irqs(void)
787{
788 unsigned int irq;
789
790 for (irq = 0; irq < NR_IRQS; irq++) {
791 unsigned long flags;
792
Thomas Gleixner239007b2009-11-17 16:46:45 +0100793 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700794 if (irq_desc[irq].action &&
795 !(irq_desc[irq].status & IRQ_PER_CPU)) {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000796 struct irq_data *data = irq_get_irq_data(irq);
797
798 if (data->chip->irq_set_affinity)
799 data->chip->irq_set_affinity(data,
800 data->affinity,
801 false);
David S. Millere0204402007-07-16 03:49:40 -0700802 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100803 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700804 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700805
806 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700807}
808#endif
809
David S. Millercdd51862005-07-24 19:36:13 -0700810struct sun5_timer {
811 u64 count0;
812 u64 limit0;
813 u64 count1;
814 u64 limit1;
815};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
David S. Millercdd51862005-07-24 19:36:13 -0700817static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818static u64 prom_limit0, prom_limit1;
819
820static void map_prom_timers(void)
821{
David S. Miller25c75812006-06-22 20:21:22 -0700822 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700823 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700826 dp = of_find_node_by_path("/");
827 dp = dp->child;
828 while (dp) {
829 if (!strcmp(dp->name, "counter-timer"))
830 break;
831 dp = dp->sibling;
832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 /* Assume if node is not present, PROM uses different tick mechanism
835 * which we should not care about.
836 */
David S. Miller25c75812006-06-22 20:21:22 -0700837 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 prom_timers = (struct sun5_timer *) 0;
839 return;
840 }
841
842 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700843 addr = of_get_property(dp, "address", NULL);
844 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 prom_printf("PROM does not have timer mapped, trying to continue.\n");
846 prom_timers = (struct sun5_timer *) 0;
847 return;
848 }
849 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
850}
851
852static void kill_prom_timer(void)
853{
854 if (!prom_timers)
855 return;
856
857 /* Save them away for later. */
858 prom_limit0 = prom_timers->limit0;
859 prom_limit1 = prom_timers->limit1;
860
861 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
862 * We turn both off here just to be paranoid.
863 */
864 prom_timers->limit0 = 0;
865 prom_timers->limit1 = 0;
866
867 /* Wheee, eat the interrupt packet too... */
868 __asm__ __volatile__(
869" mov 0x40, %%g2\n"
870" ldxa [%%g0] %0, %%g1\n"
871" ldxa [%%g2] %1, %%g1\n"
872" stxa %%g0, [%%g0] %0\n"
873" membar #Sync\n"
874 : /* no outputs */
875 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
876 : "g1", "g2");
877}
878
David S. Miller98430992008-09-16 11:44:00 -0700879void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 int cpu = hard_smp_processor_id();
882
David S. Millereb2d8d62007-10-13 21:42:46 -0700883 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884}
885
David S. Miller5cbc3072007-05-25 15:49:59 -0700886/* Please be very careful with register_one_mondo() and
887 * sun4v_register_mondo_queues().
888 *
889 * On SMP this gets invoked from the CPU trampoline before
890 * the cpu has fully taken over the trap table from OBP,
891 * and it's kernel stack + %g6 thread register state is
892 * not fully cooked yet.
893 *
894 * Therefore you cannot make any OBP calls, not even prom_printf,
895 * from these two routines.
896 */
David S. Millerbd4352c2009-09-04 03:38:54 -0700897static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800898{
David S. Miller5cbc3072007-05-25 15:49:59 -0700899 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800900 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800901
David S. Miller94f87622006-02-16 14:26:53 -0800902 status = sun4v_cpu_qconf(type, paddr, num_entries);
903 if (status != HV_EOK) {
904 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
905 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800906 prom_halt();
907 }
908}
909
David S. Miller98430992008-09-16 11:44:00 -0700910void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800911{
David S. Millerb5a37e92006-02-11 23:07:13 -0800912 struct trap_per_cpu *tb = &trap_block[this_cpu];
913
David S. Miller5cbc3072007-05-25 15:49:59 -0700914 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
915 tb->cpu_mondo_qmask);
916 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
917 tb->dev_mondo_qmask);
918 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
919 tb->resum_qmask);
920 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
921 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800922}
923
David S. Miller14a2ff62009-06-25 19:00:47 -0700924/* Each queue region must be a power of 2 multiple of 64 bytes in
925 * size. The base real address must be aligned to the size of the
926 * region. Thus, an 8KB queue must be 8KB aligned, for example.
927 */
928static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800929{
David S. Miller5cbc3072007-05-25 15:49:59 -0700930 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller14a2ff62009-06-25 19:00:47 -0700931 unsigned long order = get_order(size);
932 unsigned long p;
933
934 p = __get_free_pages(GFP_KERNEL, order);
David S. Miller5cbc3072007-05-25 15:49:59 -0700935 if (!p) {
David S. Miller14a2ff62009-06-25 19:00:47 -0700936 prom_printf("SUN4V: Error, cannot allocate queue.\n");
David S. Miller5b0c0572006-02-08 02:53:50 -0800937 prom_halt();
938 }
939
David S. Miller5cbc3072007-05-25 15:49:59 -0700940 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800941}
942
David S. Millerb434e712007-08-08 17:32:33 -0700943static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800944{
945#ifdef CONFIG_SMP
David S. Miller14a2ff62009-06-25 19:00:47 -0700946 unsigned long page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800947
948 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
949
David S. Miller14a2ff62009-06-25 19:00:47 -0700950 page = get_zeroed_page(GFP_KERNEL);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800951 if (!page) {
952 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
953 prom_halt();
954 }
955
956 tb->cpu_mondo_block_pa = __pa(page);
957 tb->cpu_list_pa = __pa(page + 64);
958#endif
959}
960
David S. Millerb434e712007-08-08 17:32:33 -0700961/* Allocate mondo and error queues for all possible cpus. */
962static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800963{
David S. Millerb434e712007-08-08 17:32:33 -0700964 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800965
David S. Millerb434e712007-08-08 17:32:33 -0700966 for_each_possible_cpu(cpu) {
967 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800968
David S. Miller14a2ff62009-06-25 19:00:47 -0700969 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
970 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
971 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
972 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
973 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
974 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
975 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700976 }
977}
978
979static void __init init_send_mondo_info(void)
980{
981 int cpu;
982
983 for_each_possible_cpu(cpu) {
984 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -0700985
986 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -0800987 }
David S. Millerac29c112006-02-08 00:08:23 -0800988}
989
David S. Millere18e2a02006-06-20 01:23:32 -0700990static struct irqaction timer_irq_action = {
991 .name = "timer",
992};
993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994/* Only invoked on boot processor. */
995void __init init_IRQ(void)
996{
David S. Miller10397e42007-10-13 21:43:31 -0700997 unsigned long size;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 map_prom_timers();
1000 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
David S. Miller10397e42007-10-13 21:43:31 -07001002 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller14a2ff62009-06-25 19:00:47 -07001003 ivector_table = kzalloc(size, GFP_KERNEL);
David S. Miller10397e42007-10-13 21:43:31 -07001004 if (!ivector_table) {
1005 prom_printf("Fatal error, cannot allocate ivector_table\n");
1006 prom_halt();
1007 }
David S. Miller42d5f992007-10-13 23:03:21 -07001008 __flush_dcache_range((unsigned long) ivector_table,
1009 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -07001010
1011 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -07001012
David S. Millerac29c112006-02-08 00:08:23 -08001013 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -07001014 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -08001015
David S. Miller43f58922008-08-04 16:13:51 -07001016 init_send_mondo_info();
1017
1018 if (tlb_type == hypervisor) {
1019 /* Load up the boot cpu's entries. */
1020 sun4v_register_mondo_queues(hard_smp_processor_id());
1021 }
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 /* We need to clear any IRQ's pending in the soft interrupt
1024 * registers, a spurious one could be left around from the
1025 * PROM timer which we just disabled.
1026 */
1027 clear_softint(get_softint());
1028
1029 /* Now that ivector table is initialized, it is safe
1030 * to receive IRQ vector traps. We will normally take
1031 * one or two right now, in case some device PROM used
1032 * to boot us wants to speak to us. We just ignore them.
1033 */
1034 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1035 "or %%g1, %0, %%g1\n\t"
1036 "wrpr %%g1, 0x0, %%pstate"
1037 : /* No outputs */
1038 : "i" (PSTATE_IE)
1039 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -07001040
1041 irq_desc[0].action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042}