blob: 5deabe921a47b6ebdcd5f6371faf7eaab3c135e4 [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -070010#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
David S. Millerb5a37e92006-02-11 23:07:13 -080023#include <linux/bootmem.h>
David S. Millere18e2a02006-06-20 01:23:32 -070024#include <linux/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/ptrace.h>
27#include <asm/processor.h>
28#include <asm/atomic.h>
29#include <asm/system.h>
30#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070031#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/iommu.h>
33#include <asm/upa.h>
34#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070035#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/timer.h>
37#include <asm/smp.h>
38#include <asm/starfire.h>
39#include <asm/uaccess.h>
40#include <asm/cache.h>
41#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070042#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080043#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070044#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070045#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
David S. Millerd91aa122008-03-26 00:37:51 -070047#include "entry.h"
David S. Millere18e2a02006-06-20 01:23:32 -070048
49#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070050
David S. Miller10397e42007-10-13 21:43:31 -070051struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070052unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
David S. Miller42d5f992007-10-13 23:03:21 -070054/* On several sun4u processors, it is illegal to mix bypass and
55 * non-bypass accesses. Therefore we access all INO buckets
56 * using bypass accesses only.
57 */
58static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
59{
60 unsigned long ret;
61
62 __asm__ __volatile__("ldxa [%1] %2, %0"
63 : "=&r" (ret)
64 : "r" (bucket_pa +
65 offsetof(struct ino_bucket,
66 __irq_chain_pa)),
67 "i" (ASI_PHYS_USE_EC));
68
69 return ret;
70}
71
72static void bucket_clear_chain_pa(unsigned long bucket_pa)
73{
74 __asm__ __volatile__("stxa %%g0, [%0] %1"
75 : /* no outputs */
76 : "r" (bucket_pa +
77 offsetof(struct ino_bucket,
78 __irq_chain_pa)),
79 "i" (ASI_PHYS_USE_EC));
80}
81
82static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
83{
84 unsigned int ret;
85
86 __asm__ __volatile__("lduwa [%1] %2, %0"
87 : "=&r" (ret)
88 : "r" (bucket_pa +
89 offsetof(struct ino_bucket,
90 __virt_irq)),
91 "i" (ASI_PHYS_USE_EC));
92
93 return ret;
94}
95
96static void bucket_set_virt_irq(unsigned long bucket_pa,
97 unsigned int virt_irq)
98{
99 __asm__ __volatile__("stwa %0, [%1] %2"
100 : /* no outputs */
101 : "r" (virt_irq),
102 "r" (bucket_pa +
103 offsetof(struct ino_bucket,
104 __virt_irq)),
105 "i" (ASI_PHYS_USE_EC));
106}
107
David S. Millereb2d8d62007-10-13 21:42:46 -0700108#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
David S. Miller93b32382007-07-20 02:58:28 -0700110static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700111 unsigned int dev_handle;
112 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700113 unsigned int in_use;
David S. Miller45b3f4c2007-10-13 23:52:14 -0700114} virt_irq_table[NR_IRQS];
David S. Miller759f89e2007-10-11 03:16:13 -0700115static DEFINE_SPINLOCK(virt_irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700116
David S. Miller256c1df2007-10-13 23:50:38 -0700117unsigned char virt_irq_alloc(unsigned int dev_handle,
David S. Millerbb74b732007-10-13 23:27:48 -0700118 unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700119{
David S. Miller759f89e2007-10-11 03:16:13 -0700120 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700121 unsigned char ent;
122
123 BUILD_BUG_ON(NR_IRQS >= 256);
124
David S. Miller759f89e2007-10-11 03:16:13 -0700125 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
126
David S. Miller35a17eb2007-02-10 17:41:02 -0800127 for (ent = 1; ent < NR_IRQS; ent++) {
David S. Miller45b3f4c2007-10-13 23:52:14 -0700128 if (!virt_irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800129 break;
130 }
David S. Miller8047e242006-06-20 01:22:35 -0700131 if (ent >= NR_IRQS) {
132 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700133 ent = 0;
134 } else {
David S. Miller45b3f4c2007-10-13 23:52:14 -0700135 virt_irq_table[ent].dev_handle = dev_handle;
136 virt_irq_table[ent].dev_ino = dev_ino;
137 virt_irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700138 }
139
David S. Miller759f89e2007-10-11 03:16:13 -0700140 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700141
142 return ent;
143}
144
David S. Miller5746c992007-02-20 01:26:48 -0800145#ifdef CONFIG_PCI_MSI
David S. Miller759f89e2007-10-11 03:16:13 -0700146void virt_irq_free(unsigned int virt_irq)
David S. Miller8047e242006-06-20 01:22:35 -0700147{
David S. Miller759f89e2007-10-11 03:16:13 -0700148 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700149
David S. Miller35a17eb2007-02-10 17:41:02 -0800150 if (virt_irq >= NR_IRQS)
151 return;
152
David S. Miller759f89e2007-10-11 03:16:13 -0700153 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
154
David S. Miller45b3f4c2007-10-13 23:52:14 -0700155 virt_irq_table[virt_irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800156
David S. Miller759f89e2007-10-11 03:16:13 -0700157 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700158}
David S. Miller5746c992007-02-20 01:26:48 -0800159#endif
David S. Miller8047e242006-06-20 01:22:35 -0700160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161/*
David S. Millere18e2a02006-06-20 01:23:32 -0700162 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165int show_interrupts(struct seq_file *p, void *v)
166{
David S. Millere18e2a02006-06-20 01:23:32 -0700167 int i = *(loff_t *) v, j;
168 struct irqaction * action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
David S. Millere18e2a02006-06-20 01:23:32 -0700171 if (i == 0) {
172 seq_printf(p, " ");
173 for_each_online_cpu(j)
174 seq_printf(p, "CPU%d ",j);
175 seq_putc(p, '\n');
176 }
177
178 if (i < NR_IRQS) {
179 spin_lock_irqsave(&irq_desc[i].lock, flags);
180 action = irq_desc[i].action;
181 if (!action)
182 goto skip;
183 seq_printf(p, "%3d: ",i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#ifndef CONFIG_SMP
185 seq_printf(p, "%10u ", kstat_irqs(i));
186#else
David S. Millere18e2a02006-06-20 01:23:32 -0700187 for_each_online_cpu(j)
David Millere81838d2009-01-21 17:15:53 -0800188 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#endif
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700190 seq_printf(p, " %9s", irq_desc[i].chip->typename);
David S. Millere18e2a02006-06-20 01:23:32 -0700191 seq_printf(p, " %s", action->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
David S. Millere18e2a02006-06-20 01:23:32 -0700193 for (action=action->next; action; action = action->next)
194 seq_printf(p, ", %s", action->name);
195
196 seq_putc(p, '\n');
197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
David S. Millere5553a62009-01-29 21:22:47 -0800199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j)
202 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
203 seq_printf(p, " Non-maskable interrupts\n");
David S. Millere18e2a02006-06-20 01:23:32 -0700204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return 0;
206}
207
David S. Millerebd8c562006-02-17 08:38:06 -0800208static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
209{
210 unsigned int tid;
211
212 if (this_is_starfire) {
213 tid = starfire_translate(imap, cpuid);
214 tid <<= IMAP_TID_SHIFT;
215 tid &= IMAP_TID_UPA;
216 } else {
217 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
218 unsigned long ver;
219
220 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
221 if ((ver >> 32UL) == __JALAPENO_ID ||
222 (ver >> 32UL) == __SERRANO_ID) {
223 tid = cpuid << IMAP_TID_SHIFT;
224 tid &= IMAP_TID_JBUS;
225 } else {
226 unsigned int a = cpuid & 0x1f;
227 unsigned int n = (cpuid >> 5) & 0x1f;
228
229 tid = ((a << IMAP_AID_SHIFT) |
230 (n << IMAP_NID_SHIFT));
231 tid &= (IMAP_AID_SAFARI |
232 IMAP_NID_SAFARI);;
233 }
234 } else {
235 tid = cpuid << IMAP_TID_SHIFT;
236 tid &= IMAP_TID_UPA;
237 }
238 }
239
240 return tid;
241}
242
David S. Millere18e2a02006-06-20 01:23:32 -0700243struct irq_handler_data {
244 unsigned long iclr;
245 unsigned long imap;
246
247 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700248 void *arg1;
249 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700250};
251
David S. Millere18e2a02006-06-20 01:23:32 -0700252#ifdef CONFIG_SMP
253static int irq_choose_cpu(unsigned int virt_irq)
254{
Mike Travise65e49d2009-01-12 15:27:13 -0800255 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700256 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Mike Travise65e49d2009-01-12 15:27:13 -0800258 cpumask_copy(&mask, irq_desc[virt_irq].affinity);
David S. Millere18e2a02006-06-20 01:23:32 -0700259 if (cpus_equal(mask, CPU_MASK_ALL)) {
260 static int irq_rover;
261 static DEFINE_SPINLOCK(irq_rover_lock);
262 unsigned long flags;
David S. Millerebd8c562006-02-17 08:38:06 -0800263
David S. Millere18e2a02006-06-20 01:23:32 -0700264 /* Round-robin distribution... */
265 do_round_robin:
266 spin_lock_irqsave(&irq_rover_lock, flags);
267
268 while (!cpu_online(irq_rover)) {
Rusty Russelle305cb82009-03-16 14:40:23 +1030269 if (++irq_rover >= nr_cpu_ids)
David S. Millere18e2a02006-06-20 01:23:32 -0700270 irq_rover = 0;
271 }
272 cpuid = irq_rover;
273 do {
Rusty Russelle305cb82009-03-16 14:40:23 +1030274 if (++irq_rover >= nr_cpu_ids)
David S. Millere18e2a02006-06-20 01:23:32 -0700275 irq_rover = 0;
276 } while (!cpu_online(irq_rover));
277
278 spin_unlock_irqrestore(&irq_rover_lock, flags);
279 } else {
280 cpumask_t tmp;
281
282 cpus_and(tmp, cpu_online_map, mask);
283
284 if (cpus_empty(tmp))
285 goto do_round_robin;
286
287 cpuid = first_cpu(tmp);
288 }
289
290 return cpuid;
291}
292#else
293static int irq_choose_cpu(unsigned int virt_irq)
294{
295 return real_hard_smp_processor_id();
296}
297#endif
298
299static void sun4u_irq_enable(unsigned int virt_irq)
300{
David S. Miller68c92182007-01-29 12:12:28 -0800301 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700302
303 if (likely(data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700304 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700305 unsigned int tid;
306
307 cpuid = irq_choose_cpu(virt_irq);
308 imap = data->imap;
309
310 tid = sun4u_compute_tid(imap, cpuid);
311
David S. Miller861fe902007-05-02 17:31:36 -0700312 val = upa_readq(imap);
313 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
314 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
315 val |= tid | IMAP_VALID;
316 upa_writeq(val, imap);
David S. Miller227c3312008-04-26 02:19:18 -0700317 upa_writeq(ICLR_IDLE, data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700318 }
319}
320
Linus Torvaldsb840d792009-01-02 11:44:09 -0800321static void sun4u_set_affinity(unsigned int virt_irq,
322 const struct cpumask *mask)
David S. Millerb53bcb62007-07-14 03:16:13 -0700323{
324 sun4u_irq_enable(virt_irq);
325}
326
David S. Millerd0cac392009-03-04 14:43:47 -0800327/* Don't do anything. The desc->status check for IRQ_DISABLED in
328 * handler_irq() will skip the handler call and that will leave the
329 * interrupt in the sent state. The next ->enable() call will hit the
330 * ICLR register to reset the state machine.
331 *
332 * This scheme is necessary, instead of clearing the Valid bit in the
333 * IMAP register, to handle the case of IMAP registers being shared by
334 * multiple INOs (and thus ICLR registers). Since we use a different
335 * virtual IRQ for each shared IMAP instance, the generic code thinks
336 * there is only one user so it prematurely calls ->disable() on
337 * free_irq().
338 *
339 * We have to provide an explicit ->disable() method instead of using
340 * NULL to get the default. The reason is that if the generic code
341 * sees that, it also hooks up a default ->shutdown method which
342 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
343 */
David S. Millere18e2a02006-06-20 01:23:32 -0700344static void sun4u_irq_disable(unsigned int virt_irq)
345{
David S. Millere18e2a02006-06-20 01:23:32 -0700346}
347
David S. Miller8d57d3a2007-10-22 02:16:45 -0700348static void sun4u_irq_eoi(unsigned int virt_irq)
David S. Millere18e2a02006-06-20 01:23:32 -0700349{
David S. Miller68c92182007-01-29 12:12:28 -0800350 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
David S. Miller5a606b72007-07-09 22:40:36 -0700351 struct irq_desc *desc = irq_desc + virt_irq;
352
353 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
354 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700355
356 if (likely(data))
David S. Miller861fe902007-05-02 17:31:36 -0700357 upa_writeq(ICLR_IDLE, data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700358}
359
360static void sun4v_irq_enable(unsigned int virt_irq)
361{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700362 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700363 unsigned long cpuid = irq_choose_cpu(virt_irq);
364 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700365
David S. Miller771823002007-10-13 23:41:28 -0700366 err = sun4v_intr_settarget(ino, cpuid);
367 if (err != HV_EOK)
368 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
369 "err(%d)\n", ino, cpuid, err);
370 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
371 if (err != HV_EOK)
372 printk(KERN_ERR "sun4v_intr_setstate(%x): "
373 "err(%d)\n", ino, err);
374 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
375 if (err != HV_EOK)
376 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
377 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Linus Torvaldsb840d792009-01-02 11:44:09 -0800380static void sun4v_set_affinity(unsigned int virt_irq,
381 const struct cpumask *mask)
David S. Millerb53bcb62007-07-14 03:16:13 -0700382{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700383 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700384 unsigned long cpuid = irq_choose_cpu(virt_irq);
385 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700386
David S. Miller771823002007-10-13 23:41:28 -0700387 err = sun4v_intr_settarget(ino, cpuid);
388 if (err != HV_EOK)
389 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
390 "err(%d)\n", ino, cpuid, err);
David S. Millerb53bcb62007-07-14 03:16:13 -0700391}
392
David S. Millere18e2a02006-06-20 01:23:32 -0700393static void sun4v_irq_disable(unsigned int virt_irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700395 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700396 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
David S. Miller771823002007-10-13 23:41:28 -0700398 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
399 if (err != HV_EOK)
400 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
401 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
David S. Miller8d57d3a2007-10-22 02:16:45 -0700404static void sun4v_irq_eoi(unsigned int virt_irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700405{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700406 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller5a606b72007-07-09 22:40:36 -0700407 struct irq_desc *desc = irq_desc + virt_irq;
David S. Miller771823002007-10-13 23:41:28 -0700408 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700409
410 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
411 return;
David S. Millere18e2a02006-06-20 01:23:32 -0700412
David S. Miller771823002007-10-13 23:41:28 -0700413 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
414 if (err != HV_EOK)
415 printk(KERN_ERR "sun4v_intr_setstate(%x): "
416 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700417}
418
David S. Miller4a907de2007-06-13 00:01:04 -0700419static void sun4v_virq_enable(unsigned int virt_irq)
420{
David S. Miller771823002007-10-13 23:41:28 -0700421 unsigned long cpuid, dev_handle, dev_ino;
422 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700423
David S. Miller771823002007-10-13 23:41:28 -0700424 cpuid = irq_choose_cpu(virt_irq);
David S. Miller4a907de2007-06-13 00:01:04 -0700425
David S. Miller45b3f4c2007-10-13 23:52:14 -0700426 dev_handle = virt_irq_table[virt_irq].dev_handle;
427 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700428
David S. Miller771823002007-10-13 23:41:28 -0700429 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
430 if (err != HV_EOK)
431 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
432 "err(%d)\n",
433 dev_handle, dev_ino, cpuid, err);
434 err = sun4v_vintr_set_state(dev_handle, dev_ino,
435 HV_INTR_STATE_IDLE);
436 if (err != HV_EOK)
437 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
438 "HV_INTR_STATE_IDLE): err(%d)\n",
439 dev_handle, dev_ino, err);
440 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
441 HV_INTR_ENABLED);
442 if (err != HV_EOK)
443 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
444 "HV_INTR_ENABLED): err(%d)\n",
445 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700446}
447
Linus Torvaldsb840d792009-01-02 11:44:09 -0800448static void sun4v_virt_set_affinity(unsigned int virt_irq,
449 const struct cpumask *mask)
David S. Millerb53bcb62007-07-14 03:16:13 -0700450{
David S. Miller771823002007-10-13 23:41:28 -0700451 unsigned long cpuid, dev_handle, dev_ino;
452 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700453
David S. Miller771823002007-10-13 23:41:28 -0700454 cpuid = irq_choose_cpu(virt_irq);
David S. Millerb53bcb62007-07-14 03:16:13 -0700455
David S. Miller45b3f4c2007-10-13 23:52:14 -0700456 dev_handle = virt_irq_table[virt_irq].dev_handle;
457 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700458
David S. Miller771823002007-10-13 23:41:28 -0700459 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
460 if (err != HV_EOK)
461 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
462 "err(%d)\n",
463 dev_handle, dev_ino, cpuid, err);
David S. Millerb53bcb62007-07-14 03:16:13 -0700464}
465
David S. Miller4a907de2007-06-13 00:01:04 -0700466static void sun4v_virq_disable(unsigned int virt_irq)
467{
David S. Miller771823002007-10-13 23:41:28 -0700468 unsigned long dev_handle, dev_ino;
469 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700470
David S. Miller45b3f4c2007-10-13 23:52:14 -0700471 dev_handle = virt_irq_table[virt_irq].dev_handle;
472 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700473
David S. Miller771823002007-10-13 23:41:28 -0700474 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
475 HV_INTR_DISABLED);
476 if (err != HV_EOK)
477 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
478 "HV_INTR_DISABLED): err(%d)\n",
479 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700480}
481
David S. Miller8d57d3a2007-10-22 02:16:45 -0700482static void sun4v_virq_eoi(unsigned int virt_irq)
David S. Miller4a907de2007-06-13 00:01:04 -0700483{
David S. Miller5a606b72007-07-09 22:40:36 -0700484 struct irq_desc *desc = irq_desc + virt_irq;
David S. Miller771823002007-10-13 23:41:28 -0700485 unsigned long dev_handle, dev_ino;
486 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700487
488 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
489 return;
David S. Miller4a907de2007-06-13 00:01:04 -0700490
David S. Miller45b3f4c2007-10-13 23:52:14 -0700491 dev_handle = virt_irq_table[virt_irq].dev_handle;
492 dev_ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700493
David S. Miller771823002007-10-13 23:41:28 -0700494 err = sun4v_vintr_set_state(dev_handle, dev_ino,
495 HV_INTR_STATE_IDLE);
496 if (err != HV_EOK)
497 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
498 "HV_INTR_STATE_IDLE): err(%d)\n",
499 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700500}
501
David S. Miller729e7d72006-12-12 00:59:12 -0800502static struct irq_chip sun4u_irq = {
David S. Millere18e2a02006-06-20 01:23:32 -0700503 .typename = "sun4u",
504 .enable = sun4u_irq_enable,
505 .disable = sun4u_irq_disable,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700506 .eoi = sun4u_irq_eoi,
David S. Millerb53bcb62007-07-14 03:16:13 -0700507 .set_affinity = sun4u_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700508};
509
David S. Miller729e7d72006-12-12 00:59:12 -0800510static struct irq_chip sun4v_irq = {
David S. Millere18e2a02006-06-20 01:23:32 -0700511 .typename = "sun4v",
512 .enable = sun4v_irq_enable,
513 .disable = sun4v_irq_disable,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700514 .eoi = sun4v_irq_eoi,
David S. Millerb53bcb62007-07-14 03:16:13 -0700515 .set_affinity = sun4v_set_affinity,
David S. Millere18e2a02006-06-20 01:23:32 -0700516};
517
David S. Miller4a907de2007-06-13 00:01:04 -0700518static struct irq_chip sun4v_virq = {
519 .typename = "vsun4v",
520 .enable = sun4v_virq_enable,
521 .disable = sun4v_virq_disable,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700522 .eoi = sun4v_virq_eoi,
David S. Millerb53bcb62007-07-14 03:16:13 -0700523 .set_affinity = sun4v_virt_set_affinity,
David S. Miller4a907de2007-06-13 00:01:04 -0700524};
525
Harvey Harrisonedde08f2008-02-08 04:19:57 -0800526static void pre_flow_handler(unsigned int virt_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700527 struct irq_desc *desc)
528{
529 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
530 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
531
532 data->pre_handler(ino, data->arg1, data->arg2);
533
534 handle_fasteoi_irq(virt_irq, desc);
535}
536
David S. Millere18e2a02006-06-20 01:23:32 -0700537void irq_install_pre_handler(int virt_irq,
538 void (*func)(unsigned int, void *, void *),
539 void *arg1, void *arg2)
540{
David S. Miller68c92182007-01-29 12:12:28 -0800541 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700542 struct irq_desc *desc = irq_desc + virt_irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700543
544 data->pre_handler = func;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700545 data->arg1 = arg1;
546 data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700547
David S. Miller8d57d3a2007-10-22 02:16:45 -0700548 desc->handle_irq = pre_flow_handler;
David S. Millere18e2a02006-06-20 01:23:32 -0700549}
550
551unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
553 struct ino_bucket *bucket;
David S. Millere18e2a02006-06-20 01:23:32 -0700554 struct irq_handler_data *data;
David S. Miller42d5f992007-10-13 23:03:21 -0700555 unsigned int virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 int ino;
557
David S. Miller10951ee2006-02-13 18:22:57 -0800558 BUG_ON(tlb_type == hypervisor);
559
David S. Miller861fe902007-05-02 17:31:36 -0700560 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700561 bucket = &ivector_table[ino];
David S. Miller42d5f992007-10-13 23:03:21 -0700562 virt_irq = bucket_get_virt_irq(__pa(bucket));
563 if (!virt_irq) {
David S. Miller256c1df2007-10-13 23:50:38 -0700564 virt_irq = virt_irq_alloc(0, ino);
David S. Miller42d5f992007-10-13 23:03:21 -0700565 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700566 set_irq_chip_and_handler_name(virt_irq,
567 &sun4u_irq,
568 handle_fasteoi_irq,
569 "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700570 }
571
David S. Miller42d5f992007-10-13 23:03:21 -0700572 data = get_irq_chip_data(virt_irq);
David S. Miller68c92182007-01-29 12:12:28 -0800573 if (unlikely(data))
David S. Millere18e2a02006-06-20 01:23:32 -0700574 goto out;
575
576 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
577 if (unlikely(!data)) {
578 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700579 prom_halt();
580 }
David S. Miller42d5f992007-10-13 23:03:21 -0700581 set_irq_chip_data(virt_irq, data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700582
David S. Millere18e2a02006-06-20 01:23:32 -0700583 data->imap = imap;
584 data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
David S. Miller088dd1f2005-07-04 13:24:38 -0700586out:
David S. Miller42d5f992007-10-13 23:03:21 -0700587 return virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
David S. Miller4a907de2007-06-13 00:01:04 -0700590static unsigned int sun4v_build_common(unsigned long sysino,
591 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800592{
593 struct ino_bucket *bucket;
David S. Millere18e2a02006-06-20 01:23:32 -0700594 struct irq_handler_data *data;
David S. Miller42d5f992007-10-13 23:03:21 -0700595 unsigned int virt_irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700596
597 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800598
David S. Millere3999572006-02-13 18:16:10 -0800599 bucket = &ivector_table[sysino];
David S. Miller42d5f992007-10-13 23:03:21 -0700600 virt_irq = bucket_get_virt_irq(__pa(bucket));
601 if (!virt_irq) {
David S. Miller256c1df2007-10-13 23:50:38 -0700602 virt_irq = virt_irq_alloc(0, sysino);
David S. Miller42d5f992007-10-13 23:03:21 -0700603 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700604 set_irq_chip_and_handler_name(virt_irq, chip,
605 handle_fasteoi_irq,
606 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700607 }
608
David S. Miller42d5f992007-10-13 23:03:21 -0700609 data = get_irq_chip_data(virt_irq);
David S. Miller68c92182007-01-29 12:12:28 -0800610 if (unlikely(data))
David S. Millere18e2a02006-06-20 01:23:32 -0700611 goto out;
612
613 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
614 if (unlikely(!data)) {
615 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
616 prom_halt();
617 }
David S. Miller42d5f992007-10-13 23:03:21 -0700618 set_irq_chip_data(virt_irq, data);
David S. Millere3999572006-02-13 18:16:10 -0800619
620 /* Catch accidental accesses to these things. IMAP/ICLR handling
621 * is done by hypervisor calls on sun4v platforms, not by direct
622 * register accesses.
623 */
David S. Millere18e2a02006-06-20 01:23:32 -0700624 data->imap = ~0UL;
625 data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800626
David S. Millere18e2a02006-06-20 01:23:32 -0700627out:
David S. Miller42d5f992007-10-13 23:03:21 -0700628 return virt_irq;
David S. Millere3999572006-02-13 18:16:10 -0800629}
630
David S. Miller4a907de2007-06-13 00:01:04 -0700631unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
632{
633 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
634
635 return sun4v_build_common(sysino, &sun4v_irq);
636}
637
638unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
639{
David S. Millerb80e6992007-10-13 21:51:37 -0700640 struct irq_handler_data *data;
David S. Millerb80e6992007-10-13 21:51:37 -0700641 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700642 struct ino_bucket *bucket;
643 struct irq_desc *desc;
David S. Miller42d5f992007-10-13 23:03:21 -0700644 unsigned int virt_irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700645
David S. Millerb80e6992007-10-13 21:51:37 -0700646 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
647 if (unlikely(!bucket))
648 return 0;
David S. Miller42d5f992007-10-13 23:03:21 -0700649 __flush_dcache_range((unsigned long) bucket,
650 ((unsigned long) bucket +
651 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700652
David S. Miller256c1df2007-10-13 23:50:38 -0700653 virt_irq = virt_irq_alloc(devhandle, devino);
David S. Miller42d5f992007-10-13 23:03:21 -0700654 bucket_set_virt_irq(__pa(bucket), virt_irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700655
656 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
657 handle_fasteoi_irq,
658 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700659
David S. Millerb80e6992007-10-13 21:51:37 -0700660 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
661 if (unlikely(!data))
662 return 0;
663
David S. Millerb7c2a752008-07-22 22:34:29 -0700664 /* In order to make the LDC channel startup sequence easier,
665 * especially wrt. locking, we do not let request_irq() enable
666 * the interrupt.
667 */
668 desc = irq_desc + virt_irq;
669 desc->status |= IRQ_NOAUTOEN;
670
David S. Miller42d5f992007-10-13 23:03:21 -0700671 set_irq_chip_data(virt_irq, data);
David S. Millerb80e6992007-10-13 21:51:37 -0700672
673 /* Catch accidental accesses to these things. IMAP/ICLR handling
674 * is done by hypervisor calls on sun4v platforms, not by direct
675 * register accesses.
676 */
677 data->imap = ~0UL;
678 data->iclr = ~0UL;
679
680 cookie = ~__pa(bucket);
681 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700682 if (hv_err) {
683 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
684 "err=%lu\n", devhandle, devino, hv_err);
685 prom_halt();
686 }
687
David S. Miller42d5f992007-10-13 23:03:21 -0700688 return virt_irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700689}
690
David S. Millere18e2a02006-06-20 01:23:32 -0700691void ack_bad_irq(unsigned int virt_irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700692{
David S. Miller45b3f4c2007-10-13 23:52:14 -0700693 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700694
David S. Miller771823002007-10-13 23:41:28 -0700695 if (!ino)
696 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700697
David S. Millere18e2a02006-06-20 01:23:32 -0700698 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
699 ino, virt_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
David S. Miller4f70f7a2008-08-12 18:33:56 -0700702void *hardirq_stack[NR_CPUS];
703void *softirq_stack[NR_CPUS];
704
705static __attribute__((always_inline)) void *set_hardirq_stack(void)
706{
707 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
708
709 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
710 if (orig_sp < sp ||
711 orig_sp > (sp + THREAD_SIZE)) {
712 sp += THREAD_SIZE - 192 - STACK_BIAS;
713 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
714 }
715
716 return orig_sp;
717}
718static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
719{
720 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
721}
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723void handler_irq(int irq, struct pt_regs *regs)
724{
David S. Millereb2d8d62007-10-13 21:42:46 -0700725 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400726 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700727 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 clear_softint(1 << irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Al Viro6d24c8d2006-10-08 08:23:28 -0400731 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
David S. Millera650d382007-10-12 02:59:40 -0700734 /* Grab an atomic snapshot of the pending IVECs. */
735 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
736 "wrpr %0, %3, %%pstate\n\t"
737 "ldx [%2], %1\n\t"
738 "stx %%g0, [%2]\n\t"
739 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700740 : "=&r" (pstate), "=&r" (bucket_pa)
741 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700742 "i" (PSTATE_IE)
743 : "memory");
744
David S. Miller4f70f7a2008-08-12 18:33:56 -0700745 orig_sp = set_hardirq_stack();
746
David S. Millereb2d8d62007-10-13 21:42:46 -0700747 while (bucket_pa) {
David S. Miller8d57d3a2007-10-22 02:16:45 -0700748 struct irq_desc *desc;
David S. Millereb2d8d62007-10-13 21:42:46 -0700749 unsigned long next_pa;
750 unsigned int virt_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
David S. Miller42d5f992007-10-13 23:03:21 -0700752 next_pa = bucket_get_chain_pa(bucket_pa);
753 virt_irq = bucket_get_virt_irq(bucket_pa);
754 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700755
David S. Miller8d57d3a2007-10-22 02:16:45 -0700756 desc = irq_desc + virt_irq;
757
David S. Millerd0cac392009-03-04 14:43:47 -0800758 if (!(desc->status & IRQ_DISABLED))
759 desc->handle_irq(virt_irq, desc);
David S. Millereb2d8d62007-10-13 21:42:46 -0700760
761 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
David S. Millere18e2a02006-06-20 01:23:32 -0700763
David S. Miller4f70f7a2008-08-12 18:33:56 -0700764 restore_hardirq_stack(orig_sp);
765
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400767 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768}
769
David S. Miller4f70f7a2008-08-12 18:33:56 -0700770void do_softirq(void)
771{
772 unsigned long flags;
773
774 if (in_interrupt())
775 return;
776
777 local_irq_save(flags);
778
779 if (local_softirq_pending()) {
780 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
781
782 sp += THREAD_SIZE - 192 - STACK_BIAS;
783
784 __asm__ __volatile__("mov %%sp, %0\n\t"
785 "mov %1, %%sp"
786 : "=&r" (orig_sp)
787 : "r" (sp));
788 __do_softirq();
789 __asm__ __volatile__("mov %0, %%sp"
790 : : "r" (orig_sp));
791 }
792
793 local_irq_restore(flags);
794}
795
David S. Millere0204402007-07-16 03:49:40 -0700796#ifdef CONFIG_HOTPLUG_CPU
797void fixup_irqs(void)
798{
799 unsigned int irq;
800
801 for (irq = 0; irq < NR_IRQS; irq++) {
802 unsigned long flags;
803
804 spin_lock_irqsave(&irq_desc[irq].lock, flags);
805 if (irq_desc[irq].action &&
806 !(irq_desc[irq].status & IRQ_PER_CPU)) {
807 if (irq_desc[irq].chip->set_affinity)
808 irq_desc[irq].chip->set_affinity(irq,
Mike Travise65e49d2009-01-12 15:27:13 -0800809 irq_desc[irq].affinity);
David S. Millere0204402007-07-16 03:49:40 -0700810 }
811 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
812 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700813
814 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700815}
816#endif
817
David S. Millercdd51862005-07-24 19:36:13 -0700818struct sun5_timer {
819 u64 count0;
820 u64 limit0;
821 u64 count1;
822 u64 limit1;
823};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
David S. Millercdd51862005-07-24 19:36:13 -0700825static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826static u64 prom_limit0, prom_limit1;
827
828static void map_prom_timers(void)
829{
David S. Miller25c75812006-06-22 20:21:22 -0700830 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700831 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
833 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700834 dp = of_find_node_by_path("/");
835 dp = dp->child;
836 while (dp) {
837 if (!strcmp(dp->name, "counter-timer"))
838 break;
839 dp = dp->sibling;
840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842 /* Assume if node is not present, PROM uses different tick mechanism
843 * which we should not care about.
844 */
David S. Miller25c75812006-06-22 20:21:22 -0700845 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 prom_timers = (struct sun5_timer *) 0;
847 return;
848 }
849
850 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700851 addr = of_get_property(dp, "address", NULL);
852 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 prom_printf("PROM does not have timer mapped, trying to continue.\n");
854 prom_timers = (struct sun5_timer *) 0;
855 return;
856 }
857 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
858}
859
860static void kill_prom_timer(void)
861{
862 if (!prom_timers)
863 return;
864
865 /* Save them away for later. */
866 prom_limit0 = prom_timers->limit0;
867 prom_limit1 = prom_timers->limit1;
868
869 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
870 * We turn both off here just to be paranoid.
871 */
872 prom_timers->limit0 = 0;
873 prom_timers->limit1 = 0;
874
875 /* Wheee, eat the interrupt packet too... */
876 __asm__ __volatile__(
877" mov 0x40, %%g2\n"
878" ldxa [%%g0] %0, %%g1\n"
879" ldxa [%%g2] %1, %%g1\n"
880" stxa %%g0, [%%g0] %0\n"
881" membar #Sync\n"
882 : /* no outputs */
883 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
884 : "g1", "g2");
885}
886
David S. Miller98430992008-09-16 11:44:00 -0700887void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 int cpu = hard_smp_processor_id();
890
David S. Millereb2d8d62007-10-13 21:42:46 -0700891 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
David S. Miller5cbc3072007-05-25 15:49:59 -0700894/* Please be very careful with register_one_mondo() and
895 * sun4v_register_mondo_queues().
896 *
897 * On SMP this gets invoked from the CPU trampoline before
898 * the cpu has fully taken over the trap table from OBP,
899 * and it's kernel stack + %g6 thread register state is
900 * not fully cooked yet.
901 *
902 * Therefore you cannot make any OBP calls, not even prom_printf,
903 * from these two routines.
904 */
905static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800906{
David S. Miller5cbc3072007-05-25 15:49:59 -0700907 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800908 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800909
David S. Miller94f87622006-02-16 14:26:53 -0800910 status = sun4v_cpu_qconf(type, paddr, num_entries);
911 if (status != HV_EOK) {
912 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
913 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800914 prom_halt();
915 }
916}
917
David S. Miller98430992008-09-16 11:44:00 -0700918void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800919{
David S. Millerb5a37e92006-02-11 23:07:13 -0800920 struct trap_per_cpu *tb = &trap_block[this_cpu];
921
David S. Miller5cbc3072007-05-25 15:49:59 -0700922 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
923 tb->cpu_mondo_qmask);
924 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
925 tb->dev_mondo_qmask);
926 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
927 tb->resum_qmask);
928 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
929 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800930}
931
David S. Millerb434e712007-08-08 17:32:33 -0700932static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800933{
David S. Miller5cbc3072007-05-25 15:49:59 -0700934 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller719023f2007-10-15 16:43:19 -0700935 void *p = __alloc_bootmem(size, size, 0);
David S. Miller5cbc3072007-05-25 15:49:59 -0700936 if (!p) {
David S. Millerb5a37e92006-02-11 23:07:13 -0800937 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
938 prom_halt();
939 }
940
David S. Miller5cbc3072007-05-25 15:49:59 -0700941 *pa_ptr = __pa(p);
David S. Millerb5a37e92006-02-11 23:07:13 -0800942}
943
David S. Millerb434e712007-08-08 17:32:33 -0700944static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800945{
David S. Miller5cbc3072007-05-25 15:49:59 -0700946 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller719023f2007-10-15 16:43:19 -0700947 void *p = __alloc_bootmem(size, size, 0);
David S. Miller5b0c0572006-02-08 02:53:50 -0800948
David S. Miller5cbc3072007-05-25 15:49:59 -0700949 if (!p) {
David S. Miller5b0c0572006-02-08 02:53:50 -0800950 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
951 prom_halt();
952 }
953
David S. Miller5cbc3072007-05-25 15:49:59 -0700954 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800955}
956
David S. Millerb434e712007-08-08 17:32:33 -0700957static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800958{
959#ifdef CONFIG_SMP
David S. Millerb5a37e92006-02-11 23:07:13 -0800960 void *page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800961
962 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
963
David S. Miller719023f2007-10-15 16:43:19 -0700964 page = alloc_bootmem_pages(PAGE_SIZE);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800965 if (!page) {
966 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
967 prom_halt();
968 }
969
970 tb->cpu_mondo_block_pa = __pa(page);
971 tb->cpu_list_pa = __pa(page + 64);
972#endif
973}
974
David S. Millerb434e712007-08-08 17:32:33 -0700975/* Allocate mondo and error queues for all possible cpus. */
976static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800977{
David S. Millerb434e712007-08-08 17:32:33 -0700978 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800979
David S. Millerb434e712007-08-08 17:32:33 -0700980 for_each_possible_cpu(cpu) {
981 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800982
David S. Millerb434e712007-08-08 17:32:33 -0700983 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
984 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
985 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
986 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
987 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
988 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
989 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700990 }
991}
992
993static void __init init_send_mondo_info(void)
994{
995 int cpu;
996
997 for_each_possible_cpu(cpu) {
998 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -0700999
1000 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -08001001 }
David S. Millerac29c112006-02-08 00:08:23 -08001002}
1003
David S. Millere18e2a02006-06-20 01:23:32 -07001004static struct irqaction timer_irq_action = {
1005 .name = "timer",
1006};
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008/* Only invoked on boot processor. */
1009void __init init_IRQ(void)
1010{
David S. Miller10397e42007-10-13 21:43:31 -07001011 unsigned long size;
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 map_prom_timers();
1014 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
David S. Miller10397e42007-10-13 21:43:31 -07001016 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller719023f2007-10-15 16:43:19 -07001017 ivector_table = alloc_bootmem(size);
David S. Miller10397e42007-10-13 21:43:31 -07001018 if (!ivector_table) {
1019 prom_printf("Fatal error, cannot allocate ivector_table\n");
1020 prom_halt();
1021 }
David S. Miller42d5f992007-10-13 23:03:21 -07001022 __flush_dcache_range((unsigned long) ivector_table,
1023 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -07001024
1025 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -07001026
David S. Millerac29c112006-02-08 00:08:23 -08001027 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -07001028 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -08001029
David S. Miller43f58922008-08-04 16:13:51 -07001030 init_send_mondo_info();
1031
1032 if (tlb_type == hypervisor) {
1033 /* Load up the boot cpu's entries. */
1034 sun4v_register_mondo_queues(hard_smp_processor_id());
1035 }
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 /* We need to clear any IRQ's pending in the soft interrupt
1038 * registers, a spurious one could be left around from the
1039 * PROM timer which we just disabled.
1040 */
1041 clear_softint(get_softint());
1042
1043 /* Now that ivector table is initialized, it is safe
1044 * to receive IRQ vector traps. We will normally take
1045 * one or two right now, in case some device PROM used
1046 * to boot us wants to speak to us. We just ignore them.
1047 */
1048 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1049 "or %%g1, %0, %%g1\n\t"
1050 "wrpr %%g1, 0x0, %%pstate"
1051 : /* No outputs */
1052 : "i" (PSTATE_IE)
1053 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -07001054
1055 irq_desc[0].action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056}