blob: 2b04c722cc3e3dd1fa20a79bae43c48f97700a52 [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -070010#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070023#include <linux/ftrace.h>
David S. Millere18e2a02006-06-20 01:23:32 -070024#include <linux/irq.h>
Frederic Weisbecker2e2dc1d2010-04-13 14:28:24 -070025#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27#include <asm/ptrace.h>
28#include <asm/processor.h>
29#include <asm/atomic.h>
30#include <asm/system.h>
31#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070032#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070036#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/timer.h>
38#include <asm/smp.h>
39#include <asm/starfire.h>
40#include <asm/uaccess.h>
41#include <asm/cache.h>
42#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070043#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080044#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070045#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070046#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David S. Millerd91aa122008-03-26 00:37:51 -070048#include "entry.h"
Hong H. Pham280ff972009-06-04 02:10:11 -070049#include "cpumap.h"
David S. Millere18e2a02006-06-20 01:23:32 -070050
51#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070052
David S. Miller10397e42007-10-13 21:43:31 -070053struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070054unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
David S. Miller42d5f992007-10-13 23:03:21 -070056/* On several sun4u processors, it is illegal to mix bypass and
57 * non-bypass accesses. Therefore we access all INO buckets
58 * using bypass accesses only.
59 */
60static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
61{
62 unsigned long ret;
63
64 __asm__ __volatile__("ldxa [%1] %2, %0"
65 : "=&r" (ret)
66 : "r" (bucket_pa +
67 offsetof(struct ino_bucket,
68 __irq_chain_pa)),
69 "i" (ASI_PHYS_USE_EC));
70
71 return ret;
72}
73
74static void bucket_clear_chain_pa(unsigned long bucket_pa)
75{
76 __asm__ __volatile__("stxa %%g0, [%0] %1"
77 : /* no outputs */
78 : "r" (bucket_pa +
79 offsetof(struct ino_bucket,
80 __irq_chain_pa)),
81 "i" (ASI_PHYS_USE_EC));
82}
83
84static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
85{
86 unsigned int ret;
87
88 __asm__ __volatile__("lduwa [%1] %2, %0"
89 : "=&r" (ret)
90 : "r" (bucket_pa +
91 offsetof(struct ino_bucket,
92 __virt_irq)),
93 "i" (ASI_PHYS_USE_EC));
94
95 return ret;
96}
97
98static void bucket_set_virt_irq(unsigned long bucket_pa,
99 unsigned int virt_irq)
100{
101 __asm__ __volatile__("stwa %0, [%1] %2"
102 : /* no outputs */
103 : "r" (virt_irq),
104 "r" (bucket_pa +
105 offsetof(struct ino_bucket,
106 __virt_irq)),
107 "i" (ASI_PHYS_USE_EC));
108}
109
David S. Millereb2d8d62007-10-13 21:42:46 -0700110#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
David S. Miller93b32382007-07-20 02:58:28 -0700112static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700113 unsigned int dev_handle;
114 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700115 unsigned int in_use;
David S. Miller45b3f4c2007-10-13 23:52:14 -0700116} virt_irq_table[NR_IRQS];
David S. Miller759f89e2007-10-11 03:16:13 -0700117static DEFINE_SPINLOCK(virt_irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700118
David S. Miller256c1df2007-10-13 23:50:38 -0700119unsigned char virt_irq_alloc(unsigned int dev_handle,
David S. Millerbb74b732007-10-13 23:27:48 -0700120 unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700121{
David S. Miller759f89e2007-10-11 03:16:13 -0700122 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700123 unsigned char ent;
124
125 BUILD_BUG_ON(NR_IRQS >= 256);
126
David S. Miller759f89e2007-10-11 03:16:13 -0700127 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
128
David S. Miller35a17eb2007-02-10 17:41:02 -0800129 for (ent = 1; ent < NR_IRQS; ent++) {
David S. Miller45b3f4c2007-10-13 23:52:14 -0700130 if (!virt_irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800131 break;
132 }
David S. Miller8047e242006-06-20 01:22:35 -0700133 if (ent >= NR_IRQS) {
134 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700135 ent = 0;
136 } else {
David S. Miller45b3f4c2007-10-13 23:52:14 -0700137 virt_irq_table[ent].dev_handle = dev_handle;
138 virt_irq_table[ent].dev_ino = dev_ino;
139 virt_irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700140 }
141
David S. Miller759f89e2007-10-11 03:16:13 -0700142 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700143
144 return ent;
145}
146
David S. Miller5746c992007-02-20 01:26:48 -0800147#ifdef CONFIG_PCI_MSI
David S. Miller759f89e2007-10-11 03:16:13 -0700148void virt_irq_free(unsigned int virt_irq)
David S. Miller8047e242006-06-20 01:22:35 -0700149{
David S. Miller759f89e2007-10-11 03:16:13 -0700150 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700151
David S. Miller35a17eb2007-02-10 17:41:02 -0800152 if (virt_irq >= NR_IRQS)
153 return;
154
David S. Miller759f89e2007-10-11 03:16:13 -0700155 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
156
David S. Miller45b3f4c2007-10-13 23:52:14 -0700157 virt_irq_table[virt_irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800158
David S. Miller759f89e2007-10-11 03:16:13 -0700159 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700160}
David S. Miller5746c992007-02-20 01:26:48 -0800161#endif
David S. Miller8047e242006-06-20 01:22:35 -0700162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163/*
David S. Millere18e2a02006-06-20 01:23:32 -0700164 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167int show_interrupts(struct seq_file *p, void *v)
168{
David S. Millere18e2a02006-06-20 01:23:32 -0700169 int i = *(loff_t *) v, j;
170 struct irqaction * action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
David S. Millere18e2a02006-06-20 01:23:32 -0700173 if (i == 0) {
174 seq_printf(p, " ");
175 for_each_online_cpu(j)
176 seq_printf(p, "CPU%d ",j);
177 seq_putc(p, '\n');
178 }
179
180 if (i < NR_IRQS) {
Thomas Gleixner239007b2009-11-17 16:46:45 +0100181 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
David S. Millere18e2a02006-06-20 01:23:32 -0700182 action = irq_desc[i].action;
183 if (!action)
184 goto skip;
185 seq_printf(p, "%3d: ",i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186#ifndef CONFIG_SMP
187 seq_printf(p, "%10u ", kstat_irqs(i));
188#else
David S. Millere18e2a02006-06-20 01:23:32 -0700189 for_each_online_cpu(j)
David Millere81838d2009-01-21 17:15:53 -0800190 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#endif
Thomas Gleixner89a71832009-11-17 12:50:40 +0000192 seq_printf(p, " %9s", irq_desc[i].chip->name);
David S. Millere18e2a02006-06-20 01:23:32 -0700193 seq_printf(p, " %s", action->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
David S. Millere18e2a02006-06-20 01:23:32 -0700195 for (action=action->next; action; action = action->next)
196 seq_printf(p, ", %s", action->name);
197
198 seq_putc(p, '\n');
199skip:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100200 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
David S. Millere5553a62009-01-29 21:22:47 -0800201 } else if (i == NR_IRQS) {
202 seq_printf(p, "NMI: ");
203 for_each_online_cpu(j)
204 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
205 seq_printf(p, " Non-maskable interrupts\n");
David S. Millere18e2a02006-06-20 01:23:32 -0700206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 return 0;
208}
209
David S. Millerebd8c562006-02-17 08:38:06 -0800210static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
211{
212 unsigned int tid;
213
214 if (this_is_starfire) {
215 tid = starfire_translate(imap, cpuid);
216 tid <<= IMAP_TID_SHIFT;
217 tid &= IMAP_TID_UPA;
218 } else {
219 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
220 unsigned long ver;
221
222 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
223 if ((ver >> 32UL) == __JALAPENO_ID ||
224 (ver >> 32UL) == __SERRANO_ID) {
225 tid = cpuid << IMAP_TID_SHIFT;
226 tid &= IMAP_TID_JBUS;
227 } else {
228 unsigned int a = cpuid & 0x1f;
229 unsigned int n = (cpuid >> 5) & 0x1f;
230
231 tid = ((a << IMAP_AID_SHIFT) |
232 (n << IMAP_NID_SHIFT));
233 tid &= (IMAP_AID_SAFARI |
Joe Perchesa419aef2009-08-18 11:18:35 -0700234 IMAP_NID_SAFARI);
David S. Millerebd8c562006-02-17 08:38:06 -0800235 }
236 } else {
237 tid = cpuid << IMAP_TID_SHIFT;
238 tid &= IMAP_TID_UPA;
239 }
240 }
241
242 return tid;
243}
244
David S. Millere18e2a02006-06-20 01:23:32 -0700245struct irq_handler_data {
246 unsigned long iclr;
247 unsigned long imap;
248
249 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700250 void *arg1;
251 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700252};
253
David S. Millere18e2a02006-06-20 01:23:32 -0700254#ifdef CONFIG_SMP
David S. Miller1091ce62010-01-20 19:30:49 -0800255static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
David S. Millere18e2a02006-06-20 01:23:32 -0700256{
Mike Travise65e49d2009-01-12 15:27:13 -0800257 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700258 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
David S. Miller1091ce62010-01-20 19:30:49 -0800260 cpumask_copy(&mask, affinity);
Hong H. Pham280ff972009-06-04 02:10:11 -0700261 if (cpus_equal(mask, cpu_online_map)) {
262 cpuid = map_to_cpu(virt_irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700263 } else {
264 cpumask_t tmp;
265
266 cpus_and(tmp, cpu_online_map, mask);
Hong H. Pham280ff972009-06-04 02:10:11 -0700267 cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
David S. Millere18e2a02006-06-20 01:23:32 -0700268 }
269
270 return cpuid;
271}
272#else
David S. Miller6abce772010-01-26 04:16:49 -0800273#define irq_choose_cpu(virt_irq, affinity) \
274 real_hard_smp_processor_id()
David S. Millere18e2a02006-06-20 01:23:32 -0700275#endif
276
277static void sun4u_irq_enable(unsigned int virt_irq)
278{
David S. Miller68c92182007-01-29 12:12:28 -0800279 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700280
281 if (likely(data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700282 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700283 unsigned int tid;
284
David S. Miller1091ce62010-01-20 19:30:49 -0800285 cpuid = irq_choose_cpu(virt_irq,
286 irq_desc[virt_irq].affinity);
David S. Millere18e2a02006-06-20 01:23:32 -0700287 imap = data->imap;
288
289 tid = sun4u_compute_tid(imap, cpuid);
290
David S. Miller861fe902007-05-02 17:31:36 -0700291 val = upa_readq(imap);
292 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
293 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
294 val |= tid | IMAP_VALID;
295 upa_writeq(val, imap);
David S. Miller227c3312008-04-26 02:19:18 -0700296 upa_writeq(ICLR_IDLE, data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700297 }
298}
299
Yinghai Lud5dedd42009-04-27 17:59:21 -0700300static int sun4u_set_affinity(unsigned int virt_irq,
Linus Torvaldsb840d792009-01-02 11:44:09 -0800301 const struct cpumask *mask)
David S. Millerb53bcb62007-07-14 03:16:13 -0700302{
David S. Miller1091ce62010-01-20 19:30:49 -0800303 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
304
305 if (likely(data)) {
306 unsigned long cpuid, imap, val;
307 unsigned int tid;
308
309 cpuid = irq_choose_cpu(virt_irq, mask);
310 imap = data->imap;
311
312 tid = sun4u_compute_tid(imap, cpuid);
313
314 val = upa_readq(imap);
315 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
316 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
317 val |= tid | IMAP_VALID;
318 upa_writeq(val, imap);
319 upa_writeq(ICLR_IDLE, data->iclr);
320 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700321
322 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700323}
324
David S. Millerd0cac392009-03-04 14:43:47 -0800325/* Don't do anything. The desc->status check for IRQ_DISABLED in
326 * handler_irq() will skip the handler call and that will leave the
327 * interrupt in the sent state. The next ->enable() call will hit the
328 * ICLR register to reset the state machine.
329 *
330 * This scheme is necessary, instead of clearing the Valid bit in the
331 * IMAP register, to handle the case of IMAP registers being shared by
332 * multiple INOs (and thus ICLR registers). Since we use a different
333 * virtual IRQ for each shared IMAP instance, the generic code thinks
334 * there is only one user so it prematurely calls ->disable() on
335 * free_irq().
336 *
337 * We have to provide an explicit ->disable() method instead of using
338 * NULL to get the default. The reason is that if the generic code
339 * sees that, it also hooks up a default ->shutdown method which
340 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
341 */
David S. Millere18e2a02006-06-20 01:23:32 -0700342static void sun4u_irq_disable(unsigned int virt_irq)
343{
David S. Millere18e2a02006-06-20 01:23:32 -0700344}
345
David S. Miller8d57d3a2007-10-22 02:16:45 -0700346static void sun4u_irq_eoi(unsigned int virt_irq)
David S. Millere18e2a02006-06-20 01:23:32 -0700347{
David S. Miller68c92182007-01-29 12:12:28 -0800348 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
David S. Miller5a606b72007-07-09 22:40:36 -0700349 struct irq_desc *desc = irq_desc + virt_irq;
350
351 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
352 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700353
354 if (likely(data))
David S. Miller861fe902007-05-02 17:31:36 -0700355 upa_writeq(ICLR_IDLE, data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700356}
357
358static void sun4v_irq_enable(unsigned int virt_irq)
359{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700360 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller1091ce62010-01-20 19:30:49 -0800361 unsigned long cpuid = irq_choose_cpu(virt_irq,
362 irq_desc[virt_irq].affinity);
David S. Miller771823002007-10-13 23:41:28 -0700363 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700364
David S. Miller771823002007-10-13 23:41:28 -0700365 err = sun4v_intr_settarget(ino, cpuid);
366 if (err != HV_EOK)
367 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
368 "err(%d)\n", ino, cpuid, err);
369 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
370 if (err != HV_EOK)
371 printk(KERN_ERR "sun4v_intr_setstate(%x): "
372 "err(%d)\n", ino, err);
373 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
374 if (err != HV_EOK)
375 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
376 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Yinghai Lud5dedd42009-04-27 17:59:21 -0700379static int sun4v_set_affinity(unsigned int virt_irq,
Linus Torvaldsb840d792009-01-02 11:44:09 -0800380 const struct cpumask *mask)
David S. Millerb53bcb62007-07-14 03:16:13 -0700381{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700382 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller1091ce62010-01-20 19:30:49 -0800383 unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
David S. Miller771823002007-10-13 23:41:28 -0700384 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700385
David S. Miller771823002007-10-13 23:41:28 -0700386 err = sun4v_intr_settarget(ino, cpuid);
387 if (err != HV_EOK)
388 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
389 "err(%d)\n", ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700390
391 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700392}
393
David S. Millere18e2a02006-06-20 01:23:32 -0700394static void sun4v_irq_disable(unsigned int virt_irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700396 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700397 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
David S. Miller771823002007-10-13 23:41:28 -0700399 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
400 if (err != HV_EOK)
401 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
402 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
David S. Miller8d57d3a2007-10-22 02:16:45 -0700405static void sun4v_irq_eoi(unsigned int virt_irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700406{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700407 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller5a606b72007-07-09 22:40:36 -0700408 struct irq_desc *desc = irq_desc + virt_irq;
David S. Miller771823002007-10-13 23:41:28 -0700409 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700410
411 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
412 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700413
David S. Miller771823002007-10-13 23:41:28 -0700414 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
415 if (err != HV_EOK)
416 printk(KERN_ERR "sun4v_intr_setstate(%x): "
417 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700418}
419
David S. Miller4a907de2007-06-13 00:01:04 -0700420static void sun4v_virq_enable(unsigned int virt_irq)
421{
David S. Miller771823002007-10-13 23:41:28 -0700422 unsigned long cpuid, dev_handle, dev_ino;
423 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700424
David S. Miller1091ce62010-01-20 19:30:49 -0800425 cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
David S. Miller4a907de2007-06-13 00:01:04 -0700426
David S. Miller45b3f4c2007-10-13 23:52:14 -0700427 dev_handle = virt_irq_table[virt_irq].dev_handle;
428 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700429
David S. Miller771823002007-10-13 23:41:28 -0700430 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
431 if (err != HV_EOK)
432 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
433 "err(%d)\n",
434 dev_handle, dev_ino, cpuid, err);
435 err = sun4v_vintr_set_state(dev_handle, dev_ino,
436 HV_INTR_STATE_IDLE);
437 if (err != HV_EOK)
438 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
439 "HV_INTR_STATE_IDLE): err(%d)\n",
440 dev_handle, dev_ino, err);
441 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
442 HV_INTR_ENABLED);
443 if (err != HV_EOK)
444 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
445 "HV_INTR_ENABLED): err(%d)\n",
446 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700447}
448
Yinghai Lud5dedd42009-04-27 17:59:21 -0700449static int sun4v_virt_set_affinity(unsigned int virt_irq,
Linus Torvaldsb840d792009-01-02 11:44:09 -0800450 const struct cpumask *mask)
David S. Millerb53bcb62007-07-14 03:16:13 -0700451{
David S. Miller771823002007-10-13 23:41:28 -0700452 unsigned long cpuid, dev_handle, dev_ino;
453 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700454
David S. Miller1091ce62010-01-20 19:30:49 -0800455 cpuid = irq_choose_cpu(virt_irq, mask);
David S. Millerb53bcb62007-07-14 03:16:13 -0700456
David S. Miller45b3f4c2007-10-13 23:52:14 -0700457 dev_handle = virt_irq_table[virt_irq].dev_handle;
458 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700459
David S. Miller771823002007-10-13 23:41:28 -0700460 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
461 if (err != HV_EOK)
462 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
463 "err(%d)\n",
464 dev_handle, dev_ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700465
466 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700467}
468
David S. Miller4a907de2007-06-13 00:01:04 -0700469static void sun4v_virq_disable(unsigned int virt_irq)
470{
David S. Miller771823002007-10-13 23:41:28 -0700471 unsigned long dev_handle, dev_ino;
472 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700473
David S. Miller45b3f4c2007-10-13 23:52:14 -0700474 dev_handle = virt_irq_table[virt_irq].dev_handle;
475 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700476
David S. Miller771823002007-10-13 23:41:28 -0700477 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
478 HV_INTR_DISABLED);
479 if (err != HV_EOK)
480 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
481 "HV_INTR_DISABLED): err(%d)\n",
482 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700483}
484
David S. Miller8d57d3a2007-10-22 02:16:45 -0700485static void sun4v_virq_eoi(unsigned int virt_irq)
David S. Miller4a907de2007-06-13 00:01:04 -0700486{
David S. Miller5a606b72007-07-09 22:40:36 -0700487 struct irq_desc *desc = irq_desc + virt_irq;
David S. Miller771823002007-10-13 23:41:28 -0700488 unsigned long dev_handle, dev_ino;
489 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700490
491 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
492 return;
David S. Miller4a907de2007-06-13 00:01:04 -0700493
David S. Miller45b3f4c2007-10-13 23:52:14 -0700494 dev_handle = virt_irq_table[virt_irq].dev_handle;
495 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700496
David S. Miller771823002007-10-13 23:41:28 -0700497 err = sun4v_vintr_set_state(dev_handle, dev_ino,
498 HV_INTR_STATE_IDLE);
499 if (err != HV_EOK)
500 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
501 "HV_INTR_STATE_IDLE): err(%d)\n",
502 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700503}
504
David S. Miller729e7d72006-12-12 00:59:12 -0800505static struct irq_chip sun4u_irq = {
Thomas Gleixner89a71832009-11-17 12:50:40 +0000506 .name = "sun4u",
David S. Millere18e2a02006-06-20 01:23:32 -0700507 .enable = sun4u_irq_enable,
508 .disable = sun4u_irq_disable,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700509 .eoi = sun4u_irq_eoi,
David S. Millerb53bcb62007-07-14 03:16:13 -0700510 .set_affinity = sun4u_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700511};
512
David S. Miller729e7d72006-12-12 00:59:12 -0800513static struct irq_chip sun4v_irq = {
Thomas Gleixner89a71832009-11-17 12:50:40 +0000514 .name = "sun4v",
David S. Millere18e2a02006-06-20 01:23:32 -0700515 .enable = sun4v_irq_enable,
516 .disable = sun4v_irq_disable,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700517 .eoi = sun4v_irq_eoi,
David S. Millerb53bcb62007-07-14 03:16:13 -0700518 .set_affinity = sun4v_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700519};
520
David S. Miller4a907de2007-06-13 00:01:04 -0700521static struct irq_chip sun4v_virq = {
Thomas Gleixner89a71832009-11-17 12:50:40 +0000522 .name = "vsun4v",
David S. Miller4a907de2007-06-13 00:01:04 -0700523 .enable = sun4v_virq_enable,
524 .disable = sun4v_virq_disable,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700525 .eoi = sun4v_virq_eoi,
David S. Millerb53bcb62007-07-14 03:16:13 -0700526 .set_affinity = sun4v_virt_set_affinity,
David S. Miller4a907de2007-06-13 00:01:04 -0700527};
528
Harvey Harrisonedde08f2008-02-08 04:19:57 -0800529static void pre_flow_handler(unsigned int virt_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700530 struct irq_desc *desc)
531{
532 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
533 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
534
535 data->pre_handler(ino, data->arg1, data->arg2);
536
537 handle_fasteoi_irq(virt_irq, desc);
538}
539
David S. Millere18e2a02006-06-20 01:23:32 -0700540void irq_install_pre_handler(int virt_irq,
541 void (*func)(unsigned int, void *, void *),
542 void *arg1, void *arg2)
543{
David S. Miller68c92182007-01-29 12:12:28 -0800544 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700545 struct irq_desc *desc = irq_desc + virt_irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700546
547 data->pre_handler = func;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700548 data->arg1 = arg1;
549 data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700550
David S. Miller8d57d3a2007-10-22 02:16:45 -0700551 desc->handle_irq = pre_flow_handler;
David S. Millere18e2a02006-06-20 01:23:32 -0700552}
553
554unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555{
556 struct ino_bucket *bucket;
David S. Millere18e2a02006-06-20 01:23:32 -0700557 struct irq_handler_data *data;
David S. Miller42d5f992007-10-13 23:03:21 -0700558 unsigned int virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 int ino;
560
David S. Miller10951ee2006-02-13 18:22:57 -0800561 BUG_ON(tlb_type == hypervisor);
562
David S. Miller861fe902007-05-02 17:31:36 -0700563 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700564 bucket = &ivector_table[ino];
David S. Miller42d5f992007-10-13 23:03:21 -0700565 virt_irq = bucket_get_virt_irq(__pa(bucket));
566 if (!virt_irq) {
David S. Miller256c1df2007-10-13 23:50:38 -0700567 virt_irq = virt_irq_alloc(0, ino);
David S. Miller42d5f992007-10-13 23:03:21 -0700568 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700569 set_irq_chip_and_handler_name(virt_irq,
570 &sun4u_irq,
571 handle_fasteoi_irq,
572 "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700573 }
574
David S. Miller42d5f992007-10-13 23:03:21 -0700575 data = get_irq_chip_data(virt_irq);
David S. Miller68c92182007-01-29 12:12:28 -0800576 if (unlikely(data))
David S. Millere18e2a02006-06-20 01:23:32 -0700577 goto out;
578
579 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
580 if (unlikely(!data)) {
581 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700582 prom_halt();
583 }
David S. Miller42d5f992007-10-13 23:03:21 -0700584 set_irq_chip_data(virt_irq, data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700585
David S. Millere18e2a02006-06-20 01:23:32 -0700586 data->imap = imap;
587 data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
David S. Miller088dd1f2005-07-04 13:24:38 -0700589out:
David S. Miller42d5f992007-10-13 23:03:21 -0700590 return virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591}
592
David S. Miller4a907de2007-06-13 00:01:04 -0700593static unsigned int sun4v_build_common(unsigned long sysino,
594 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800595{
596 struct ino_bucket *bucket;
David S. Millere18e2a02006-06-20 01:23:32 -0700597 struct irq_handler_data *data;
David S. Miller42d5f992007-10-13 23:03:21 -0700598 unsigned int virt_irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700599
600 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800601
David S. Millere3999572006-02-13 18:16:10 -0800602 bucket = &ivector_table[sysino];
David S. Miller42d5f992007-10-13 23:03:21 -0700603 virt_irq = bucket_get_virt_irq(__pa(bucket));
604 if (!virt_irq) {
David S. Miller256c1df2007-10-13 23:50:38 -0700605 virt_irq = virt_irq_alloc(0, sysino);
David S. Miller42d5f992007-10-13 23:03:21 -0700606 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700607 set_irq_chip_and_handler_name(virt_irq, chip,
608 handle_fasteoi_irq,
609 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700610 }
611
David S. Miller42d5f992007-10-13 23:03:21 -0700612 data = get_irq_chip_data(virt_irq);
David S. Miller68c92182007-01-29 12:12:28 -0800613 if (unlikely(data))
David S. Millere18e2a02006-06-20 01:23:32 -0700614 goto out;
615
616 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
617 if (unlikely(!data)) {
618 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
619 prom_halt();
620 }
David S. Miller42d5f992007-10-13 23:03:21 -0700621 set_irq_chip_data(virt_irq, data);
David S. Millere3999572006-02-13 18:16:10 -0800622
623 /* Catch accidental accesses to these things. IMAP/ICLR handling
624 * is done by hypervisor calls on sun4v platforms, not by direct
625 * register accesses.
626 */
David S. Millere18e2a02006-06-20 01:23:32 -0700627 data->imap = ~0UL;
628 data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800629
David S. Millere18e2a02006-06-20 01:23:32 -0700630out:
David S. Miller42d5f992007-10-13 23:03:21 -0700631 return virt_irq;
David S. Millere3999572006-02-13 18:16:10 -0800632}
633
David S. Miller4a907de2007-06-13 00:01:04 -0700634unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
635{
636 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
637
638 return sun4v_build_common(sysino, &sun4v_irq);
639}
640
641unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
642{
David S. Millerb80e6992007-10-13 21:51:37 -0700643 struct irq_handler_data *data;
David S. Millerb80e6992007-10-13 21:51:37 -0700644 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700645 struct ino_bucket *bucket;
646 struct irq_desc *desc;
David S. Miller42d5f992007-10-13 23:03:21 -0700647 unsigned int virt_irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700648
David S. Millerb80e6992007-10-13 21:51:37 -0700649 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
650 if (unlikely(!bucket))
651 return 0;
David S. Miller25ad4032010-04-10 20:24:22 -0700652
653 /* The only reference we store to the IRQ bucket is
654 * by physical address which kmemleak can't see, tell
655 * it that this object explicitly is not a leak and
656 * should be scanned.
657 */
658 kmemleak_not_leak(bucket);
659
David S. Miller42d5f992007-10-13 23:03:21 -0700660 __flush_dcache_range((unsigned long) bucket,
661 ((unsigned long) bucket +
662 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700663
David S. Miller256c1df2007-10-13 23:50:38 -0700664 virt_irq = virt_irq_alloc(devhandle, devino);
David S. Miller42d5f992007-10-13 23:03:21 -0700665 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700666
667 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
668 handle_fasteoi_irq,
669 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700670
David S. Millerb80e6992007-10-13 21:51:37 -0700671 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
672 if (unlikely(!data))
673 return 0;
674
David S. Millerb7c2a752008-07-22 22:34:29 -0700675 /* In order to make the LDC channel startup sequence easier,
676 * especially wrt. locking, we do not let request_irq() enable
677 * the interrupt.
678 */
679 desc = irq_desc + virt_irq;
680 desc->status |= IRQ_NOAUTOEN;
681
David S. Miller42d5f992007-10-13 23:03:21 -0700682 set_irq_chip_data(virt_irq, data);
David S. Millerb80e6992007-10-13 21:51:37 -0700683
684 /* Catch accidental accesses to these things. IMAP/ICLR handling
685 * is done by hypervisor calls on sun4v platforms, not by direct
686 * register accesses.
687 */
688 data->imap = ~0UL;
689 data->iclr = ~0UL;
690
691 cookie = ~__pa(bucket);
692 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700693 if (hv_err) {
694 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
695 "err=%lu\n", devhandle, devino, hv_err);
696 prom_halt();
697 }
698
David S. Miller42d5f992007-10-13 23:03:21 -0700699 return virt_irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700700}
701
David S. Millere18e2a02006-06-20 01:23:32 -0700702void ack_bad_irq(unsigned int virt_irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700703{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700704 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700705
David S. Miller771823002007-10-13 23:41:28 -0700706 if (!ino)
707 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700708
David S. Millere18e2a02006-06-20 01:23:32 -0700709 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
710 ino, virt_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
712
David S. Miller4f70f7a2008-08-12 18:33:56 -0700713void *hardirq_stack[NR_CPUS];
714void *softirq_stack[NR_CPUS];
715
716static __attribute__((always_inline)) void *set_hardirq_stack(void)
717{
718 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
719
720 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
721 if (orig_sp < sp ||
722 orig_sp > (sp + THREAD_SIZE)) {
723 sp += THREAD_SIZE - 192 - STACK_BIAS;
724 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
725 }
726
727 return orig_sp;
728}
729static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
730{
731 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
732}
733
David S. Miller9960e9e2010-04-07 04:41:33 -0700734void __irq_entry handler_irq(int irq, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
David S. Millereb2d8d62007-10-13 21:42:46 -0700736 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400737 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700738 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 clear_softint(1 << irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Al Viro6d24c8d2006-10-08 08:23:28 -0400742 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
David S. Millera650d382007-10-12 02:59:40 -0700745 /* Grab an atomic snapshot of the pending IVECs. */
746 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
747 "wrpr %0, %3, %%pstate\n\t"
748 "ldx [%2], %1\n\t"
749 "stx %%g0, [%2]\n\t"
750 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700751 : "=&r" (pstate), "=&r" (bucket_pa)
752 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700753 "i" (PSTATE_IE)
754 : "memory");
755
David S. Miller4f70f7a2008-08-12 18:33:56 -0700756 orig_sp = set_hardirq_stack();
757
David S. Millereb2d8d62007-10-13 21:42:46 -0700758 while (bucket_pa) {
David S. Miller8d57d3a2007-10-22 02:16:45 -0700759 struct irq_desc *desc;
David S. Millereb2d8d62007-10-13 21:42:46 -0700760 unsigned long next_pa;
761 unsigned int virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
David S. Miller42d5f992007-10-13 23:03:21 -0700763 next_pa = bucket_get_chain_pa(bucket_pa);
764 virt_irq = bucket_get_virt_irq(bucket_pa);
765 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700766
David S. Miller8d57d3a2007-10-22 02:16:45 -0700767 desc = irq_desc + virt_irq;
768
David S. Millerd0cac392009-03-04 14:43:47 -0800769 if (!(desc->status & IRQ_DISABLED))
770 desc->handle_irq(virt_irq, desc);
David S. Millereb2d8d62007-10-13 21:42:46 -0700771
772 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 }
David S. Millere18e2a02006-06-20 01:23:32 -0700774
David S. Miller4f70f7a2008-08-12 18:33:56 -0700775 restore_hardirq_stack(orig_sp);
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400778 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
David S. Miller4f70f7a2008-08-12 18:33:56 -0700781void do_softirq(void)
782{
783 unsigned long flags;
784
785 if (in_interrupt())
786 return;
787
788 local_irq_save(flags);
789
790 if (local_softirq_pending()) {
791 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
792
793 sp += THREAD_SIZE - 192 - STACK_BIAS;
794
795 __asm__ __volatile__("mov %%sp, %0\n\t"
796 "mov %1, %%sp"
797 : "=&r" (orig_sp)
798 : "r" (sp));
799 __do_softirq();
800 __asm__ __volatile__("mov %0, %%sp"
801 : : "r" (orig_sp));
802 }
803
804 local_irq_restore(flags);
805}
806
David S. Millere0204402007-07-16 03:49:40 -0700807#ifdef CONFIG_HOTPLUG_CPU
808void fixup_irqs(void)
809{
810 unsigned int irq;
811
812 for (irq = 0; irq < NR_IRQS; irq++) {
813 unsigned long flags;
814
Thomas Gleixner239007b2009-11-17 16:46:45 +0100815 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700816 if (irq_desc[irq].action &&
817 !(irq_desc[irq].status & IRQ_PER_CPU)) {
818 if (irq_desc[irq].chip->set_affinity)
819 irq_desc[irq].chip->set_affinity(irq,
Mike Travise65e49d2009-01-12 15:27:13 -0800820 irq_desc[irq].affinity);
David S. Millere0204402007-07-16 03:49:40 -0700821 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100822 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700823 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700824
825 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700826}
827#endif
828
David S. Millercdd51862005-07-24 19:36:13 -0700829struct sun5_timer {
830 u64 count0;
831 u64 limit0;
832 u64 count1;
833 u64 limit1;
834};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
David S. Millercdd51862005-07-24 19:36:13 -0700836static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837static u64 prom_limit0, prom_limit1;
838
839static void map_prom_timers(void)
840{
David S. Miller25c75812006-06-22 20:21:22 -0700841 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700842 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700845 dp = of_find_node_by_path("/");
846 dp = dp->child;
847 while (dp) {
848 if (!strcmp(dp->name, "counter-timer"))
849 break;
850 dp = dp->sibling;
851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
853 /* Assume if node is not present, PROM uses different tick mechanism
854 * which we should not care about.
855 */
David S. Miller25c75812006-06-22 20:21:22 -0700856 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 prom_timers = (struct sun5_timer *) 0;
858 return;
859 }
860
861 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700862 addr = of_get_property(dp, "address", NULL);
863 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 prom_printf("PROM does not have timer mapped, trying to continue.\n");
865 prom_timers = (struct sun5_timer *) 0;
866 return;
867 }
868 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
869}
870
871static void kill_prom_timer(void)
872{
873 if (!prom_timers)
874 return;
875
876 /* Save them away for later. */
877 prom_limit0 = prom_timers->limit0;
878 prom_limit1 = prom_timers->limit1;
879
880 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
881 * We turn both off here just to be paranoid.
882 */
883 prom_timers->limit0 = 0;
884 prom_timers->limit1 = 0;
885
886 /* Wheee, eat the interrupt packet too... */
887 __asm__ __volatile__(
888" mov 0x40, %%g2\n"
889" ldxa [%%g0] %0, %%g1\n"
890" ldxa [%%g2] %1, %%g1\n"
891" stxa %%g0, [%%g0] %0\n"
892" membar #Sync\n"
893 : /* no outputs */
894 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
895 : "g1", "g2");
896}
897
David S. Miller98430992008-09-16 11:44:00 -0700898void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 int cpu = hard_smp_processor_id();
901
David S. Millereb2d8d62007-10-13 21:42:46 -0700902 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903}
904
David S. Miller5cbc3072007-05-25 15:49:59 -0700905/* Please be very careful with register_one_mondo() and
906 * sun4v_register_mondo_queues().
907 *
908 * On SMP this gets invoked from the CPU trampoline before
909 * the cpu has fully taken over the trap table from OBP,
910 * and it's kernel stack + %g6 thread register state is
911 * not fully cooked yet.
912 *
913 * Therefore you cannot make any OBP calls, not even prom_printf,
914 * from these two routines.
915 */
David S. Millerbd4352c2009-09-04 03:38:54 -0700916static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800917{
David S. Miller5cbc3072007-05-25 15:49:59 -0700918 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800919 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800920
David S. Miller94f87622006-02-16 14:26:53 -0800921 status = sun4v_cpu_qconf(type, paddr, num_entries);
922 if (status != HV_EOK) {
923 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
924 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800925 prom_halt();
926 }
927}
928
David S. Miller98430992008-09-16 11:44:00 -0700929void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800930{
David S. Millerb5a37e92006-02-11 23:07:13 -0800931 struct trap_per_cpu *tb = &trap_block[this_cpu];
932
David S. Miller5cbc3072007-05-25 15:49:59 -0700933 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
934 tb->cpu_mondo_qmask);
935 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
936 tb->dev_mondo_qmask);
937 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
938 tb->resum_qmask);
939 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
940 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800941}
942
David S. Miller14a2ff62009-06-25 19:00:47 -0700943/* Each queue region must be a power of 2 multiple of 64 bytes in
944 * size. The base real address must be aligned to the size of the
945 * region. Thus, an 8KB queue must be 8KB aligned, for example.
946 */
947static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800948{
David S. Miller5cbc3072007-05-25 15:49:59 -0700949 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller14a2ff62009-06-25 19:00:47 -0700950 unsigned long order = get_order(size);
951 unsigned long p;
952
953 p = __get_free_pages(GFP_KERNEL, order);
David S. Miller5cbc3072007-05-25 15:49:59 -0700954 if (!p) {
David S. Miller14a2ff62009-06-25 19:00:47 -0700955 prom_printf("SUN4V: Error, cannot allocate queue.\n");
David S. Miller5b0c0572006-02-08 02:53:50 -0800956 prom_halt();
957 }
958
David S. Miller5cbc3072007-05-25 15:49:59 -0700959 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800960}
961
David S. Millerb434e712007-08-08 17:32:33 -0700962static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800963{
964#ifdef CONFIG_SMP
David S. Miller14a2ff62009-06-25 19:00:47 -0700965 unsigned long page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800966
967 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
968
David S. Miller14a2ff62009-06-25 19:00:47 -0700969 page = get_zeroed_page(GFP_KERNEL);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800970 if (!page) {
971 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
972 prom_halt();
973 }
974
975 tb->cpu_mondo_block_pa = __pa(page);
976 tb->cpu_list_pa = __pa(page + 64);
977#endif
978}
979
David S. Millerb434e712007-08-08 17:32:33 -0700980/* Allocate mondo and error queues for all possible cpus. */
981static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800982{
David S. Millerb434e712007-08-08 17:32:33 -0700983 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800984
David S. Millerb434e712007-08-08 17:32:33 -0700985 for_each_possible_cpu(cpu) {
986 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800987
David S. Miller14a2ff62009-06-25 19:00:47 -0700988 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
989 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
990 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
991 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
992 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
993 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
994 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700995 }
996}
997
998static void __init init_send_mondo_info(void)
999{
1000 int cpu;
1001
1002 for_each_possible_cpu(cpu) {
1003 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -07001004
1005 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -08001006 }
David S. Millerac29c112006-02-08 00:08:23 -08001007}
1008
David S. Millere18e2a02006-06-20 01:23:32 -07001009static struct irqaction timer_irq_action = {
1010 .name = "timer",
1011};
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013/* Only invoked on boot processor. */
1014void __init init_IRQ(void)
1015{
David S. Miller10397e42007-10-13 21:43:31 -07001016 unsigned long size;
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 map_prom_timers();
1019 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
David S. Miller10397e42007-10-13 21:43:31 -07001021 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller14a2ff62009-06-25 19:00:47 -07001022 ivector_table = kzalloc(size, GFP_KERNEL);
David S. Miller10397e42007-10-13 21:43:31 -07001023 if (!ivector_table) {
1024 prom_printf("Fatal error, cannot allocate ivector_table\n");
1025 prom_halt();
1026 }
David S. Miller42d5f992007-10-13 23:03:21 -07001027 __flush_dcache_range((unsigned long) ivector_table,
1028 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -07001029
1030 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -07001031
David S. Millerac29c112006-02-08 00:08:23 -08001032 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -07001033 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -08001034
David S. Miller43f58922008-08-04 16:13:51 -07001035 init_send_mondo_info();
1036
1037 if (tlb_type == hypervisor) {
1038 /* Load up the boot cpu's entries. */
1039 sun4v_register_mondo_queues(hard_smp_processor_id());
1040 }
1041
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 /* We need to clear any IRQ's pending in the soft interrupt
1043 * registers, a spurious one could be left around from the
1044 * PROM timer which we just disabled.
1045 */
1046 clear_softint(get_softint());
1047
1048 /* Now that ivector table is initialized, it is safe
1049 * to receive IRQ vector traps. We will normally take
1050 * one or two right now, in case some device PROM used
1051 * to boot us wants to speak to us. We just ignore them.
1052 */
1053 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1054 "or %%g1, %0, %%g1\n\t"
1055 "wrpr %%g1, 0x0, %%pstate"
1056 : /* No outputs */
1057 : "i" (PSTATE_IE)
1058 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -07001059
1060 irq_desc[0].action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061}