blob: f356e4cd742085cdfa08e555b615d81e35fc316f [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -070010#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070023#include <linux/ftrace.h>
David S. Millere18e2a02006-06-20 01:23:32 -070024#include <linux/irq.h>
Frederic Weisbecker2e2dc1d2010-04-13 14:28:24 -070025#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27#include <asm/ptrace.h>
28#include <asm/processor.h>
29#include <asm/atomic.h>
30#include <asm/system.h>
31#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070032#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070036#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/timer.h>
38#include <asm/smp.h>
39#include <asm/starfire.h>
40#include <asm/uaccess.h>
41#include <asm/cache.h>
42#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070043#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080044#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070045#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070046#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David S. Millerd91aa122008-03-26 00:37:51 -070048#include "entry.h"
Hong H. Pham280ff972009-06-04 02:10:11 -070049#include "cpumap.h"
David S. Millerec687882010-04-14 02:04:29 -070050#include "kstack.h"
David S. Millere18e2a02006-06-20 01:23:32 -070051
52#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070053
David S. Miller10397e42007-10-13 21:43:31 -070054struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070055unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller42d5f992007-10-13 23:03:21 -070057/* On several sun4u processors, it is illegal to mix bypass and
58 * non-bypass accesses. Therefore we access all INO buckets
59 * using bypass accesses only.
60 */
61static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
62{
63 unsigned long ret;
64
65 __asm__ __volatile__("ldxa [%1] %2, %0"
66 : "=&r" (ret)
67 : "r" (bucket_pa +
68 offsetof(struct ino_bucket,
69 __irq_chain_pa)),
70 "i" (ASI_PHYS_USE_EC));
71
72 return ret;
73}
74
75static void bucket_clear_chain_pa(unsigned long bucket_pa)
76{
77 __asm__ __volatile__("stxa %%g0, [%0] %1"
78 : /* no outputs */
79 : "r" (bucket_pa +
80 offsetof(struct ino_bucket,
81 __irq_chain_pa)),
82 "i" (ASI_PHYS_USE_EC));
83}
84
85static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
86{
87 unsigned int ret;
88
89 __asm__ __volatile__("lduwa [%1] %2, %0"
90 : "=&r" (ret)
91 : "r" (bucket_pa +
92 offsetof(struct ino_bucket,
93 __virt_irq)),
94 "i" (ASI_PHYS_USE_EC));
95
96 return ret;
97}
98
99static void bucket_set_virt_irq(unsigned long bucket_pa,
100 unsigned int virt_irq)
101{
102 __asm__ __volatile__("stwa %0, [%1] %2"
103 : /* no outputs */
104 : "r" (virt_irq),
105 "r" (bucket_pa +
106 offsetof(struct ino_bucket,
107 __virt_irq)),
108 "i" (ASI_PHYS_USE_EC));
109}
110
David S. Millereb2d8d62007-10-13 21:42:46 -0700111#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
David S. Miller93b32382007-07-20 02:58:28 -0700113static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700114 unsigned int dev_handle;
115 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700116 unsigned int in_use;
David S. Miller45b3f4c2007-10-13 23:52:14 -0700117} virt_irq_table[NR_IRQS];
David S. Miller759f89e2007-10-11 03:16:13 -0700118static DEFINE_SPINLOCK(virt_irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700119
David S. Miller256c1df2007-10-13 23:50:38 -0700120unsigned char virt_irq_alloc(unsigned int dev_handle,
David S. Millerbb74b732007-10-13 23:27:48 -0700121 unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700122{
David S. Miller759f89e2007-10-11 03:16:13 -0700123 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700124 unsigned char ent;
125
126 BUILD_BUG_ON(NR_IRQS >= 256);
127
David S. Miller759f89e2007-10-11 03:16:13 -0700128 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
129
David S. Miller35a17eb2007-02-10 17:41:02 -0800130 for (ent = 1; ent < NR_IRQS; ent++) {
David S. Miller45b3f4c2007-10-13 23:52:14 -0700131 if (!virt_irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800132 break;
133 }
David S. Miller8047e242006-06-20 01:22:35 -0700134 if (ent >= NR_IRQS) {
135 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700136 ent = 0;
137 } else {
David S. Miller45b3f4c2007-10-13 23:52:14 -0700138 virt_irq_table[ent].dev_handle = dev_handle;
139 virt_irq_table[ent].dev_ino = dev_ino;
140 virt_irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700141 }
142
David S. Miller759f89e2007-10-11 03:16:13 -0700143 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700144
145 return ent;
146}
147
David S. Miller5746c992007-02-20 01:26:48 -0800148#ifdef CONFIG_PCI_MSI
David S. Miller759f89e2007-10-11 03:16:13 -0700149void virt_irq_free(unsigned int virt_irq)
David S. Miller8047e242006-06-20 01:22:35 -0700150{
David S. Miller759f89e2007-10-11 03:16:13 -0700151 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700152
David S. Miller35a17eb2007-02-10 17:41:02 -0800153 if (virt_irq >= NR_IRQS)
154 return;
155
David S. Miller759f89e2007-10-11 03:16:13 -0700156 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
157
David S. Miller45b3f4c2007-10-13 23:52:14 -0700158 virt_irq_table[virt_irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800159
David S. Miller759f89e2007-10-11 03:16:13 -0700160 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700161}
David S. Miller5746c992007-02-20 01:26:48 -0800162#endif
David S. Miller8047e242006-06-20 01:22:35 -0700163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164/*
David S. Millere18e2a02006-06-20 01:23:32 -0700165 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168int show_interrupts(struct seq_file *p, void *v)
169{
David S. Millere18e2a02006-06-20 01:23:32 -0700170 int i = *(loff_t *) v, j;
171 struct irqaction * action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
David S. Millere18e2a02006-06-20 01:23:32 -0700174 if (i == 0) {
175 seq_printf(p, " ");
176 for_each_online_cpu(j)
177 seq_printf(p, "CPU%d ",j);
178 seq_putc(p, '\n');
179 }
180
181 if (i < NR_IRQS) {
Thomas Gleixner239007b2009-11-17 16:46:45 +0100182 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
David S. Millere18e2a02006-06-20 01:23:32 -0700183 action = irq_desc[i].action;
184 if (!action)
185 goto skip;
186 seq_printf(p, "%3d: ",i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187#ifndef CONFIG_SMP
188 seq_printf(p, "%10u ", kstat_irqs(i));
189#else
David S. Millere18e2a02006-06-20 01:23:32 -0700190 for_each_online_cpu(j)
David Millere81838d2009-01-21 17:15:53 -0800191 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#endif
Sam Ravnborg9f2264a2011-01-22 11:32:14 +0000193 seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
David S. Millere18e2a02006-06-20 01:23:32 -0700194 seq_printf(p, " %s", action->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
David S. Millere18e2a02006-06-20 01:23:32 -0700196 for (action=action->next; action; action = action->next)
197 seq_printf(p, ", %s", action->name);
198
199 seq_putc(p, '\n');
200skip:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100201 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
David S. Millere5553a62009-01-29 21:22:47 -0800202 } else if (i == NR_IRQS) {
203 seq_printf(p, "NMI: ");
204 for_each_online_cpu(j)
205 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
206 seq_printf(p, " Non-maskable interrupts\n");
David S. Millere18e2a02006-06-20 01:23:32 -0700207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return 0;
209}
210
David S. Millerebd8c562006-02-17 08:38:06 -0800211static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
212{
213 unsigned int tid;
214
215 if (this_is_starfire) {
216 tid = starfire_translate(imap, cpuid);
217 tid <<= IMAP_TID_SHIFT;
218 tid &= IMAP_TID_UPA;
219 } else {
220 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
221 unsigned long ver;
222
223 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
224 if ((ver >> 32UL) == __JALAPENO_ID ||
225 (ver >> 32UL) == __SERRANO_ID) {
226 tid = cpuid << IMAP_TID_SHIFT;
227 tid &= IMAP_TID_JBUS;
228 } else {
229 unsigned int a = cpuid & 0x1f;
230 unsigned int n = (cpuid >> 5) & 0x1f;
231
232 tid = ((a << IMAP_AID_SHIFT) |
233 (n << IMAP_NID_SHIFT));
234 tid &= (IMAP_AID_SAFARI |
Joe Perchesa419aef2009-08-18 11:18:35 -0700235 IMAP_NID_SAFARI);
David S. Millerebd8c562006-02-17 08:38:06 -0800236 }
237 } else {
238 tid = cpuid << IMAP_TID_SHIFT;
239 tid &= IMAP_TID_UPA;
240 }
241 }
242
243 return tid;
244}
245
David S. Millere18e2a02006-06-20 01:23:32 -0700246struct irq_handler_data {
247 unsigned long iclr;
248 unsigned long imap;
249
250 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700251 void *arg1;
252 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700253};
254
David S. Millere18e2a02006-06-20 01:23:32 -0700255#ifdef CONFIG_SMP
David S. Miller1091ce62010-01-20 19:30:49 -0800256static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
David S. Millere18e2a02006-06-20 01:23:32 -0700257{
Mike Travise65e49d2009-01-12 15:27:13 -0800258 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700259 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
David S. Miller1091ce62010-01-20 19:30:49 -0800261 cpumask_copy(&mask, affinity);
Hong H. Pham280ff972009-06-04 02:10:11 -0700262 if (cpus_equal(mask, cpu_online_map)) {
263 cpuid = map_to_cpu(virt_irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700264 } else {
265 cpumask_t tmp;
266
267 cpus_and(tmp, cpu_online_map, mask);
Hong H. Pham280ff972009-06-04 02:10:11 -0700268 cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
David S. Millere18e2a02006-06-20 01:23:32 -0700269 }
270
271 return cpuid;
272}
273#else
David S. Miller6abce772010-01-26 04:16:49 -0800274#define irq_choose_cpu(virt_irq, affinity) \
275 real_hard_smp_processor_id()
David S. Millere18e2a02006-06-20 01:23:32 -0700276#endif
277
Sam Ravnborg4832b992011-01-22 11:32:18 +0000278static void sun4u_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700279{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000280 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700281
Sam Ravnborgcae787282011-01-22 11:32:16 +0000282 if (likely(handler_data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700283 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700284 unsigned int tid;
285
Sam Ravnborg4832b992011-01-22 11:32:18 +0000286 cpuid = irq_choose_cpu(data->irq, data->affinity);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000287 imap = handler_data->imap;
David S. Millere18e2a02006-06-20 01:23:32 -0700288
289 tid = sun4u_compute_tid(imap, cpuid);
290
David S. Miller861fe902007-05-02 17:31:36 -0700291 val = upa_readq(imap);
292 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
293 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
294 val |= tid | IMAP_VALID;
295 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000296 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700297 }
298}
299
Sam Ravnborg4832b992011-01-22 11:32:18 +0000300static int sun4u_set_affinity(struct irq_data *data,
301 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700302{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000303 struct irq_handler_data *handler_data = data->handler_data;
David S. Miller1091ce62010-01-20 19:30:49 -0800304
Sam Ravnborgcae787282011-01-22 11:32:16 +0000305 if (likely(handler_data)) {
David S. Miller1091ce62010-01-20 19:30:49 -0800306 unsigned long cpuid, imap, val;
307 unsigned int tid;
308
Sam Ravnborg4832b992011-01-22 11:32:18 +0000309 cpuid = irq_choose_cpu(data->irq, mask);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000310 imap = handler_data->imap;
David S. Miller1091ce62010-01-20 19:30:49 -0800311
312 tid = sun4u_compute_tid(imap, cpuid);
313
314 val = upa_readq(imap);
315 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
316 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
317 val |= tid | IMAP_VALID;
318 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000319 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Miller1091ce62010-01-20 19:30:49 -0800320 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700321
322 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700323}
324
David S. Millerd0cac392009-03-04 14:43:47 -0800325/* Don't do anything. The desc->status check for IRQ_DISABLED in
326 * handler_irq() will skip the handler call and that will leave the
327 * interrupt in the sent state. The next ->enable() call will hit the
328 * ICLR register to reset the state machine.
329 *
330 * This scheme is necessary, instead of clearing the Valid bit in the
331 * IMAP register, to handle the case of IMAP registers being shared by
332 * multiple INOs (and thus ICLR registers). Since we use a different
333 * virtual IRQ for each shared IMAP instance, the generic code thinks
334 * there is only one user so it prematurely calls ->disable() on
335 * free_irq().
336 *
337 * We have to provide an explicit ->disable() method instead of using
338 * NULL to get the default. The reason is that if the generic code
339 * sees that, it also hooks up a default ->shutdown method which
340 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
341 */
Sam Ravnborg4832b992011-01-22 11:32:18 +0000342static void sun4u_irq_disable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700343{
David S. Millere18e2a02006-06-20 01:23:32 -0700344}
345
Sam Ravnborg4832b992011-01-22 11:32:18 +0000346static void sun4u_irq_eoi(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700347{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000348 struct irq_handler_data *handler_data = data->handler_data;
349 struct irq_desc *desc = irq_desc + data->irq;
David S. Miller5a606b72007-07-09 22:40:36 -0700350
351 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
352 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700353
Sam Ravnborgcae787282011-01-22 11:32:16 +0000354 if (likely(handler_data))
355 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700356}
357
Sam Ravnborg4832b992011-01-22 11:32:18 +0000358static void sun4v_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700359{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000360 unsigned int ino = virt_irq_table[data->irq].dev_ino;
361 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller771823002007-10-13 23:41:28 -0700362 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700363
David S. Miller771823002007-10-13 23:41:28 -0700364 err = sun4v_intr_settarget(ino, cpuid);
365 if (err != HV_EOK)
366 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
367 "err(%d)\n", ino, cpuid, err);
368 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
369 if (err != HV_EOK)
370 printk(KERN_ERR "sun4v_intr_setstate(%x): "
371 "err(%d)\n", ino, err);
372 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
373 if (err != HV_EOK)
374 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
375 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
Sam Ravnborg4832b992011-01-22 11:32:18 +0000378static int sun4v_set_affinity(struct irq_data *data,
379 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700380{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000381 unsigned int ino = virt_irq_table[data->irq].dev_ino;
382 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
David S. Miller771823002007-10-13 23:41:28 -0700383 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700384
David S. Miller771823002007-10-13 23:41:28 -0700385 err = sun4v_intr_settarget(ino, cpuid);
386 if (err != HV_EOK)
387 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
388 "err(%d)\n", ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700389
390 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700391}
392
Sam Ravnborg4832b992011-01-22 11:32:18 +0000393static void sun4v_irq_disable(struct irq_data *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000395 unsigned int ino = virt_irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700396 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
David S. Miller771823002007-10-13 23:41:28 -0700398 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
399 if (err != HV_EOK)
400 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
401 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
Sam Ravnborg4832b992011-01-22 11:32:18 +0000404static void sun4v_irq_eoi(struct irq_data *data)
David S. Miller088dd1f2005-07-04 13:24:38 -0700405{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000406 unsigned int ino = virt_irq_table[data->irq].dev_ino;
407 struct irq_desc *desc = irq_desc + data->irq;
David S. Miller771823002007-10-13 23:41:28 -0700408 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700409
410 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
411 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700412
David S. Miller771823002007-10-13 23:41:28 -0700413 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
414 if (err != HV_EOK)
415 printk(KERN_ERR "sun4v_intr_setstate(%x): "
416 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700417}
418
Sam Ravnborg4832b992011-01-22 11:32:18 +0000419static void sun4v_virq_enable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700420{
David S. Miller771823002007-10-13 23:41:28 -0700421 unsigned long cpuid, dev_handle, dev_ino;
422 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700423
Sam Ravnborg4832b992011-01-22 11:32:18 +0000424 cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller4a907de2007-06-13 00:01:04 -0700425
Sam Ravnborg4832b992011-01-22 11:32:18 +0000426 dev_handle = virt_irq_table[data->irq].dev_handle;
427 dev_ino = virt_irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700428
David S. Miller771823002007-10-13 23:41:28 -0700429 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
430 if (err != HV_EOK)
431 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
432 "err(%d)\n",
433 dev_handle, dev_ino, cpuid, err);
434 err = sun4v_vintr_set_state(dev_handle, dev_ino,
435 HV_INTR_STATE_IDLE);
436 if (err != HV_EOK)
437 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
438 "HV_INTR_STATE_IDLE): err(%d)\n",
439 dev_handle, dev_ino, err);
440 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
441 HV_INTR_ENABLED);
442 if (err != HV_EOK)
443 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
444 "HV_INTR_ENABLED): err(%d)\n",
445 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700446}
447
Sam Ravnborg4832b992011-01-22 11:32:18 +0000448static int sun4v_virt_set_affinity(struct irq_data *data,
449 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700450{
David S. Miller771823002007-10-13 23:41:28 -0700451 unsigned long cpuid, dev_handle, dev_ino;
452 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700453
Sam Ravnborg4832b992011-01-22 11:32:18 +0000454 cpuid = irq_choose_cpu(data->irq, mask);
David S. Millerb53bcb62007-07-14 03:16:13 -0700455
Sam Ravnborg4832b992011-01-22 11:32:18 +0000456 dev_handle = virt_irq_table[data->irq].dev_handle;
457 dev_ino = virt_irq_table[data->irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700458
David S. Miller771823002007-10-13 23:41:28 -0700459 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
460 if (err != HV_EOK)
461 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
462 "err(%d)\n",
463 dev_handle, dev_ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700464
465 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700466}
467
Sam Ravnborg4832b992011-01-22 11:32:18 +0000468static void sun4v_virq_disable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700469{
David S. Miller771823002007-10-13 23:41:28 -0700470 unsigned long dev_handle, dev_ino;
471 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700472
Sam Ravnborg4832b992011-01-22 11:32:18 +0000473 dev_handle = virt_irq_table[data->irq].dev_handle;
474 dev_ino = virt_irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700475
David S. Miller771823002007-10-13 23:41:28 -0700476 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
477 HV_INTR_DISABLED);
478 if (err != HV_EOK)
479 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
480 "HV_INTR_DISABLED): err(%d)\n",
481 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700482}
483
Sam Ravnborg4832b992011-01-22 11:32:18 +0000484static void sun4v_virq_eoi(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700485{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000486 struct irq_desc *desc = irq_desc + data->irq;
David S. Miller771823002007-10-13 23:41:28 -0700487 unsigned long dev_handle, dev_ino;
488 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700489
490 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
491 return;
David S. Miller4a907de2007-06-13 00:01:04 -0700492
Sam Ravnborg4832b992011-01-22 11:32:18 +0000493 dev_handle = virt_irq_table[data->irq].dev_handle;
494 dev_ino = virt_irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700495
David S. Miller771823002007-10-13 23:41:28 -0700496 err = sun4v_vintr_set_state(dev_handle, dev_ino,
497 HV_INTR_STATE_IDLE);
498 if (err != HV_EOK)
499 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
500 "HV_INTR_STATE_IDLE): err(%d)\n",
501 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700502}
503
David S. Miller729e7d72006-12-12 00:59:12 -0800504static struct irq_chip sun4u_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000505 .name = "sun4u",
506 .irq_enable = sun4u_irq_enable,
507 .irq_disable = sun4u_irq_disable,
508 .irq_eoi = sun4u_irq_eoi,
509 .irq_set_affinity = sun4u_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700510};
511
David S. Miller729e7d72006-12-12 00:59:12 -0800512static struct irq_chip sun4v_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000513 .name = "sun4v",
514 .irq_enable = sun4v_irq_enable,
515 .irq_disable = sun4v_irq_disable,
516 .irq_eoi = sun4v_irq_eoi,
517 .irq_set_affinity = sun4v_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700518};
519
David S. Miller4a907de2007-06-13 00:01:04 -0700520static struct irq_chip sun4v_virq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000521 .name = "vsun4v",
522 .irq_enable = sun4v_virq_enable,
523 .irq_disable = sun4v_virq_disable,
524 .irq_eoi = sun4v_virq_eoi,
525 .irq_set_affinity = sun4v_virt_set_affinity,
David S. Miller4a907de2007-06-13 00:01:04 -0700526};
527
Harvey Harrisonedde08f2008-02-08 04:19:57 -0800528static void pre_flow_handler(unsigned int virt_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700529 struct irq_desc *desc)
530{
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000531 struct irq_handler_data *handler_data = get_irq_data(virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700532 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
533
Sam Ravnborgcae787282011-01-22 11:32:16 +0000534 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700535
536 handle_fasteoi_irq(virt_irq, desc);
537}
538
David S. Millere18e2a02006-06-20 01:23:32 -0700539void irq_install_pre_handler(int virt_irq,
540 void (*func)(unsigned int, void *, void *),
541 void *arg1, void *arg2)
542{
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000543 struct irq_handler_data *handler_data = get_irq_data(virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700544 struct irq_desc *desc = irq_desc + virt_irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700545
Sam Ravnborgcae787282011-01-22 11:32:16 +0000546 handler_data->pre_handler = func;
547 handler_data->arg1 = arg1;
548 handler_data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700549
David S. Miller8d57d3a2007-10-22 02:16:45 -0700550 desc->handle_irq = pre_flow_handler;
David S. Millere18e2a02006-06-20 01:23:32 -0700551}
552
553unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
555 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000556 struct irq_handler_data *handler_data;
David S. Miller42d5f992007-10-13 23:03:21 -0700557 unsigned int virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int ino;
559
David S. Miller10951ee2006-02-13 18:22:57 -0800560 BUG_ON(tlb_type == hypervisor);
561
David S. Miller861fe902007-05-02 17:31:36 -0700562 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700563 bucket = &ivector_table[ino];
David S. Miller42d5f992007-10-13 23:03:21 -0700564 virt_irq = bucket_get_virt_irq(__pa(bucket));
565 if (!virt_irq) {
David S. Miller256c1df2007-10-13 23:50:38 -0700566 virt_irq = virt_irq_alloc(0, ino);
David S. Miller42d5f992007-10-13 23:03:21 -0700567 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700568 set_irq_chip_and_handler_name(virt_irq,
569 &sun4u_irq,
570 handle_fasteoi_irq,
571 "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700572 }
573
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000574 handler_data = get_irq_data(virt_irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000575 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700576 goto out;
577
Sam Ravnborgcae787282011-01-22 11:32:16 +0000578 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
579 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700580 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700581 prom_halt();
582 }
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000583 set_irq_data(virt_irq, handler_data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700584
Sam Ravnborgcae787282011-01-22 11:32:16 +0000585 handler_data->imap = imap;
586 handler_data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
David S. Miller088dd1f2005-07-04 13:24:38 -0700588out:
David S. Miller42d5f992007-10-13 23:03:21 -0700589 return virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
591
David S. Miller4a907de2007-06-13 00:01:04 -0700592static unsigned int sun4v_build_common(unsigned long sysino,
593 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800594{
595 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000596 struct irq_handler_data *handler_data;
David S. Miller42d5f992007-10-13 23:03:21 -0700597 unsigned int virt_irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700598
599 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800600
David S. Millere3999572006-02-13 18:16:10 -0800601 bucket = &ivector_table[sysino];
David S. Miller42d5f992007-10-13 23:03:21 -0700602 virt_irq = bucket_get_virt_irq(__pa(bucket));
603 if (!virt_irq) {
David S. Miller256c1df2007-10-13 23:50:38 -0700604 virt_irq = virt_irq_alloc(0, sysino);
David S. Miller42d5f992007-10-13 23:03:21 -0700605 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700606 set_irq_chip_and_handler_name(virt_irq, chip,
607 handle_fasteoi_irq,
608 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700609 }
610
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000611 handler_data = get_irq_data(virt_irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000612 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700613 goto out;
614
Sam Ravnborgcae787282011-01-22 11:32:16 +0000615 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
616 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700617 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
618 prom_halt();
619 }
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000620 set_irq_data(virt_irq, handler_data);
David S. Millere3999572006-02-13 18:16:10 -0800621
622 /* Catch accidental accesses to these things. IMAP/ICLR handling
623 * is done by hypervisor calls on sun4v platforms, not by direct
624 * register accesses.
625 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000626 handler_data->imap = ~0UL;
627 handler_data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800628
David S. Millere18e2a02006-06-20 01:23:32 -0700629out:
David S. Miller42d5f992007-10-13 23:03:21 -0700630 return virt_irq;
David S. Millere3999572006-02-13 18:16:10 -0800631}
632
David S. Miller4a907de2007-06-13 00:01:04 -0700633unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
634{
635 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
636
637 return sun4v_build_common(sysino, &sun4v_irq);
638}
639
640unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
641{
Sam Ravnborgcae787282011-01-22 11:32:16 +0000642 struct irq_handler_data *handler_data;
David S. Millerb80e6992007-10-13 21:51:37 -0700643 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700644 struct ino_bucket *bucket;
645 struct irq_desc *desc;
David S. Miller42d5f992007-10-13 23:03:21 -0700646 unsigned int virt_irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700647
David S. Millerb80e6992007-10-13 21:51:37 -0700648 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
649 if (unlikely(!bucket))
650 return 0;
David S. Miller25ad4032010-04-10 20:24:22 -0700651
652 /* The only reference we store to the IRQ bucket is
653 * by physical address which kmemleak can't see, tell
654 * it that this object explicitly is not a leak and
655 * should be scanned.
656 */
657 kmemleak_not_leak(bucket);
658
David S. Miller42d5f992007-10-13 23:03:21 -0700659 __flush_dcache_range((unsigned long) bucket,
660 ((unsigned long) bucket +
661 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700662
David S. Miller256c1df2007-10-13 23:50:38 -0700663 virt_irq = virt_irq_alloc(devhandle, devino);
David S. Miller42d5f992007-10-13 23:03:21 -0700664 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700665
666 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
667 handle_fasteoi_irq,
668 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700669
Sam Ravnborgcae787282011-01-22 11:32:16 +0000670 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
671 if (unlikely(!handler_data))
David S. Millerb80e6992007-10-13 21:51:37 -0700672 return 0;
673
David S. Millerb7c2a752008-07-22 22:34:29 -0700674 /* In order to make the LDC channel startup sequence easier,
675 * especially wrt. locking, we do not let request_irq() enable
676 * the interrupt.
677 */
678 desc = irq_desc + virt_irq;
679 desc->status |= IRQ_NOAUTOEN;
680
Sam Ravnborge6ebd522011-01-22 11:32:17 +0000681 set_irq_data(virt_irq, handler_data);
David S. Millerb80e6992007-10-13 21:51:37 -0700682
683 /* Catch accidental accesses to these things. IMAP/ICLR handling
684 * is done by hypervisor calls on sun4v platforms, not by direct
685 * register accesses.
686 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000687 handler_data->imap = ~0UL;
688 handler_data->iclr = ~0UL;
David S. Millerb80e6992007-10-13 21:51:37 -0700689
690 cookie = ~__pa(bucket);
691 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700692 if (hv_err) {
693 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
694 "err=%lu\n", devhandle, devino, hv_err);
695 prom_halt();
696 }
697
David S. Miller42d5f992007-10-13 23:03:21 -0700698 return virt_irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700699}
700
David S. Millere18e2a02006-06-20 01:23:32 -0700701void ack_bad_irq(unsigned int virt_irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700702{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700703 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700704
David S. Miller771823002007-10-13 23:41:28 -0700705 if (!ino)
706 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700707
David S. Millere18e2a02006-06-20 01:23:32 -0700708 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
709 ino, virt_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710}
711
David S. Miller4f70f7a2008-08-12 18:33:56 -0700712void *hardirq_stack[NR_CPUS];
713void *softirq_stack[NR_CPUS];
714
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000715void __irq_entry handler_irq(int pil, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
David S. Millereb2d8d62007-10-13 21:42:46 -0700717 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400718 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700719 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000721 clear_softint(1 << pil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Al Viro6d24c8d2006-10-08 08:23:28 -0400723 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
David S. Millera650d382007-10-12 02:59:40 -0700726 /* Grab an atomic snapshot of the pending IVECs. */
727 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
728 "wrpr %0, %3, %%pstate\n\t"
729 "ldx [%2], %1\n\t"
730 "stx %%g0, [%2]\n\t"
731 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700732 : "=&r" (pstate), "=&r" (bucket_pa)
733 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700734 "i" (PSTATE_IE)
735 : "memory");
736
David S. Miller4f70f7a2008-08-12 18:33:56 -0700737 orig_sp = set_hardirq_stack();
738
David S. Millereb2d8d62007-10-13 21:42:46 -0700739 while (bucket_pa) {
David S. Miller8d57d3a2007-10-22 02:16:45 -0700740 struct irq_desc *desc;
David S. Millereb2d8d62007-10-13 21:42:46 -0700741 unsigned long next_pa;
742 unsigned int virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
David S. Miller42d5f992007-10-13 23:03:21 -0700744 next_pa = bucket_get_chain_pa(bucket_pa);
745 virt_irq = bucket_get_virt_irq(bucket_pa);
746 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700747
David S. Miller8d57d3a2007-10-22 02:16:45 -0700748 desc = irq_desc + virt_irq;
749
David S. Millerd0cac392009-03-04 14:43:47 -0800750 if (!(desc->status & IRQ_DISABLED))
751 desc->handle_irq(virt_irq, desc);
David S. Millereb2d8d62007-10-13 21:42:46 -0700752
753 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 }
David S. Millere18e2a02006-06-20 01:23:32 -0700755
David S. Miller4f70f7a2008-08-12 18:33:56 -0700756 restore_hardirq_stack(orig_sp);
757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400759 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
David S. Miller4f70f7a2008-08-12 18:33:56 -0700762void do_softirq(void)
763{
764 unsigned long flags;
765
766 if (in_interrupt())
767 return;
768
769 local_irq_save(flags);
770
771 if (local_softirq_pending()) {
772 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
773
774 sp += THREAD_SIZE - 192 - STACK_BIAS;
775
776 __asm__ __volatile__("mov %%sp, %0\n\t"
777 "mov %1, %%sp"
778 : "=&r" (orig_sp)
779 : "r" (sp));
780 __do_softirq();
781 __asm__ __volatile__("mov %0, %%sp"
782 : : "r" (orig_sp));
783 }
784
785 local_irq_restore(flags);
786}
787
David S. Millere0204402007-07-16 03:49:40 -0700788#ifdef CONFIG_HOTPLUG_CPU
789void fixup_irqs(void)
790{
791 unsigned int irq;
792
793 for (irq = 0; irq < NR_IRQS; irq++) {
794 unsigned long flags;
795
Thomas Gleixner239007b2009-11-17 16:46:45 +0100796 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700797 if (irq_desc[irq].action &&
798 !(irq_desc[irq].status & IRQ_PER_CPU)) {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000799 struct irq_data *data = irq_get_irq_data(irq);
800
801 if (data->chip->irq_set_affinity)
802 data->chip->irq_set_affinity(data,
803 data->affinity,
804 false);
David S. Millere0204402007-07-16 03:49:40 -0700805 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100806 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700807 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700808
809 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700810}
811#endif
812
David S. Millercdd51862005-07-24 19:36:13 -0700813struct sun5_timer {
814 u64 count0;
815 u64 limit0;
816 u64 count1;
817 u64 limit1;
818};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
David S. Millercdd51862005-07-24 19:36:13 -0700820static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821static u64 prom_limit0, prom_limit1;
822
823static void map_prom_timers(void)
824{
David S. Miller25c75812006-06-22 20:21:22 -0700825 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700826 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700829 dp = of_find_node_by_path("/");
830 dp = dp->child;
831 while (dp) {
832 if (!strcmp(dp->name, "counter-timer"))
833 break;
834 dp = dp->sibling;
835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 /* Assume if node is not present, PROM uses different tick mechanism
838 * which we should not care about.
839 */
David S. Miller25c75812006-06-22 20:21:22 -0700840 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 prom_timers = (struct sun5_timer *) 0;
842 return;
843 }
844
845 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700846 addr = of_get_property(dp, "address", NULL);
847 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 prom_printf("PROM does not have timer mapped, trying to continue.\n");
849 prom_timers = (struct sun5_timer *) 0;
850 return;
851 }
852 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
853}
854
855static void kill_prom_timer(void)
856{
857 if (!prom_timers)
858 return;
859
860 /* Save them away for later. */
861 prom_limit0 = prom_timers->limit0;
862 prom_limit1 = prom_timers->limit1;
863
864 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
865 * We turn both off here just to be paranoid.
866 */
867 prom_timers->limit0 = 0;
868 prom_timers->limit1 = 0;
869
870 /* Wheee, eat the interrupt packet too... */
871 __asm__ __volatile__(
872" mov 0x40, %%g2\n"
873" ldxa [%%g0] %0, %%g1\n"
874" ldxa [%%g2] %1, %%g1\n"
875" stxa %%g0, [%%g0] %0\n"
876" membar #Sync\n"
877 : /* no outputs */
878 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
879 : "g1", "g2");
880}
881
David S. Miller98430992008-09-16 11:44:00 -0700882void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 int cpu = hard_smp_processor_id();
885
David S. Millereb2d8d62007-10-13 21:42:46 -0700886 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887}
888
David S. Miller5cbc3072007-05-25 15:49:59 -0700889/* Please be very careful with register_one_mondo() and
890 * sun4v_register_mondo_queues().
891 *
892 * On SMP this gets invoked from the CPU trampoline before
893 * the cpu has fully taken over the trap table from OBP,
894 * and it's kernel stack + %g6 thread register state is
895 * not fully cooked yet.
896 *
897 * Therefore you cannot make any OBP calls, not even prom_printf,
898 * from these two routines.
899 */
David S. Millerbd4352c2009-09-04 03:38:54 -0700900static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800901{
David S. Miller5cbc3072007-05-25 15:49:59 -0700902 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800903 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800904
David S. Miller94f87622006-02-16 14:26:53 -0800905 status = sun4v_cpu_qconf(type, paddr, num_entries);
906 if (status != HV_EOK) {
907 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
908 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800909 prom_halt();
910 }
911}
912
David S. Miller98430992008-09-16 11:44:00 -0700913void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800914{
David S. Millerb5a37e92006-02-11 23:07:13 -0800915 struct trap_per_cpu *tb = &trap_block[this_cpu];
916
David S. Miller5cbc3072007-05-25 15:49:59 -0700917 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
918 tb->cpu_mondo_qmask);
919 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
920 tb->dev_mondo_qmask);
921 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
922 tb->resum_qmask);
923 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
924 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800925}
926
David S. Miller14a2ff62009-06-25 19:00:47 -0700927/* Each queue region must be a power of 2 multiple of 64 bytes in
928 * size. The base real address must be aligned to the size of the
929 * region. Thus, an 8KB queue must be 8KB aligned, for example.
930 */
931static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800932{
David S. Miller5cbc3072007-05-25 15:49:59 -0700933 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller14a2ff62009-06-25 19:00:47 -0700934 unsigned long order = get_order(size);
935 unsigned long p;
936
937 p = __get_free_pages(GFP_KERNEL, order);
David S. Miller5cbc3072007-05-25 15:49:59 -0700938 if (!p) {
David S. Miller14a2ff62009-06-25 19:00:47 -0700939 prom_printf("SUN4V: Error, cannot allocate queue.\n");
David S. Miller5b0c0572006-02-08 02:53:50 -0800940 prom_halt();
941 }
942
David S. Miller5cbc3072007-05-25 15:49:59 -0700943 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800944}
945
David S. Millerb434e712007-08-08 17:32:33 -0700946static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800947{
948#ifdef CONFIG_SMP
David S. Miller14a2ff62009-06-25 19:00:47 -0700949 unsigned long page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800950
951 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
952
David S. Miller14a2ff62009-06-25 19:00:47 -0700953 page = get_zeroed_page(GFP_KERNEL);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800954 if (!page) {
955 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
956 prom_halt();
957 }
958
959 tb->cpu_mondo_block_pa = __pa(page);
960 tb->cpu_list_pa = __pa(page + 64);
961#endif
962}
963
David S. Millerb434e712007-08-08 17:32:33 -0700964/* Allocate mondo and error queues for all possible cpus. */
965static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800966{
David S. Millerb434e712007-08-08 17:32:33 -0700967 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800968
David S. Millerb434e712007-08-08 17:32:33 -0700969 for_each_possible_cpu(cpu) {
970 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800971
David S. Miller14a2ff62009-06-25 19:00:47 -0700972 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
973 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
974 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
975 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
976 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
977 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
978 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700979 }
980}
981
982static void __init init_send_mondo_info(void)
983{
984 int cpu;
985
986 for_each_possible_cpu(cpu) {
987 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -0700988
989 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -0800990 }
David S. Millerac29c112006-02-08 00:08:23 -0800991}
992
David S. Millere18e2a02006-06-20 01:23:32 -0700993static struct irqaction timer_irq_action = {
994 .name = "timer",
995};
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997/* Only invoked on boot processor. */
998void __init init_IRQ(void)
999{
David S. Miller10397e42007-10-13 21:43:31 -07001000 unsigned long size;
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 map_prom_timers();
1003 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
David S. Miller10397e42007-10-13 21:43:31 -07001005 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller14a2ff62009-06-25 19:00:47 -07001006 ivector_table = kzalloc(size, GFP_KERNEL);
David S. Miller10397e42007-10-13 21:43:31 -07001007 if (!ivector_table) {
1008 prom_printf("Fatal error, cannot allocate ivector_table\n");
1009 prom_halt();
1010 }
David S. Miller42d5f992007-10-13 23:03:21 -07001011 __flush_dcache_range((unsigned long) ivector_table,
1012 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -07001013
1014 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -07001015
David S. Millerac29c112006-02-08 00:08:23 -08001016 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -07001017 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -08001018
David S. Miller43f58922008-08-04 16:13:51 -07001019 init_send_mondo_info();
1020
1021 if (tlb_type == hypervisor) {
1022 /* Load up the boot cpu's entries. */
1023 sun4v_register_mondo_queues(hard_smp_processor_id());
1024 }
1025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 /* We need to clear any IRQ's pending in the soft interrupt
1027 * registers, a spurious one could be left around from the
1028 * PROM timer which we just disabled.
1029 */
1030 clear_softint(get_softint());
1031
1032 /* Now that ivector table is initialized, it is safe
1033 * to receive IRQ vector traps. We will normally take
1034 * one or two right now, in case some device PROM used
1035 * to boot us wants to speak to us. We just ignore them.
1036 */
1037 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1038 "or %%g1, %0, %%g1\n\t"
1039 "wrpr %%g1, 0x0, %%pstate"
1040 : /* No outputs */
1041 : "i" (PSTATE_IE)
1042 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -07001043
1044 irq_desc[0].action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045}