blob: d45b710ea7e429eb86c5fbcb1c7fb55c5a2a1f6a [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -07009#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/ptrace.h>
11#include <linux/errno.h>
12#include <linux/kernel_stat.h>
13#include <linux/signal.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070022#include <linux/ftrace.h>
David S. Millere18e2a02006-06-20 01:23:32 -070023#include <linux/irq.h>
Frederic Weisbecker2e2dc1d2010-04-13 14:28:24 -070024#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/ptrace.h>
27#include <asm/processor.h>
Arun Sharma600634972011-07-26 16:09:06 -070028#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/system.h>
30#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070031#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/iommu.h>
33#include <asm/upa.h>
34#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070035#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/timer.h>
37#include <asm/smp.h>
38#include <asm/starfire.h>
39#include <asm/uaccess.h>
40#include <asm/cache.h>
41#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070042#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080043#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070044#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070045#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
David S. Millerd91aa122008-03-26 00:37:51 -070047#include "entry.h"
Hong H. Pham280ff972009-06-04 02:10:11 -070048#include "cpumap.h"
David S. Millerec687882010-04-14 02:04:29 -070049#include "kstack.h"
David S. Millere18e2a02006-06-20 01:23:32 -070050
51#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070052
David S. Miller10397e42007-10-13 21:43:31 -070053struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070054unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
David S. Miller42d5f992007-10-13 23:03:21 -070056/* On several sun4u processors, it is illegal to mix bypass and
57 * non-bypass accesses. Therefore we access all INO buckets
58 * using bypass accesses only.
59 */
60static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
61{
62 unsigned long ret;
63
64 __asm__ __volatile__("ldxa [%1] %2, %0"
65 : "=&r" (ret)
66 : "r" (bucket_pa +
67 offsetof(struct ino_bucket,
68 __irq_chain_pa)),
69 "i" (ASI_PHYS_USE_EC));
70
71 return ret;
72}
73
74static void bucket_clear_chain_pa(unsigned long bucket_pa)
75{
76 __asm__ __volatile__("stxa %%g0, [%0] %1"
77 : /* no outputs */
78 : "r" (bucket_pa +
79 offsetof(struct ino_bucket,
80 __irq_chain_pa)),
81 "i" (ASI_PHYS_USE_EC));
82}
83
Sam Ravnborgfe414932011-01-22 11:32:19 +000084static unsigned int bucket_get_irq(unsigned long bucket_pa)
David S. Miller42d5f992007-10-13 23:03:21 -070085{
86 unsigned int ret;
87
88 __asm__ __volatile__("lduwa [%1] %2, %0"
89 : "=&r" (ret)
90 : "r" (bucket_pa +
91 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +000092 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -070093 "i" (ASI_PHYS_USE_EC));
94
95 return ret;
96}
97
Sam Ravnborgfe414932011-01-22 11:32:19 +000098static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
David S. Miller42d5f992007-10-13 23:03:21 -070099{
100 __asm__ __volatile__("stwa %0, [%1] %2"
101 : /* no outputs */
Sam Ravnborgfe414932011-01-22 11:32:19 +0000102 : "r" (irq),
David S. Miller42d5f992007-10-13 23:03:21 -0700103 "r" (bucket_pa +
104 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +0000105 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -0700106 "i" (ASI_PHYS_USE_EC));
107}
108
David S. Millereb2d8d62007-10-13 21:42:46 -0700109#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
David S. Miller93b32382007-07-20 02:58:28 -0700111static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700112 unsigned int dev_handle;
113 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700114 unsigned int in_use;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000115} irq_table[NR_IRQS];
116static DEFINE_SPINLOCK(irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700117
Sam Ravnborgfe414932011-01-22 11:32:19 +0000118unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700119{
David S. Miller759f89e2007-10-11 03:16:13 -0700120 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700121 unsigned char ent;
122
123 BUILD_BUG_ON(NR_IRQS >= 256);
124
Sam Ravnborgfe414932011-01-22 11:32:19 +0000125 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700126
David S. Miller35a17eb2007-02-10 17:41:02 -0800127 for (ent = 1; ent < NR_IRQS; ent++) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000128 if (!irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800129 break;
130 }
David S. Miller8047e242006-06-20 01:22:35 -0700131 if (ent >= NR_IRQS) {
132 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700133 ent = 0;
134 } else {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000135 irq_table[ent].dev_handle = dev_handle;
136 irq_table[ent].dev_ino = dev_ino;
137 irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700138 }
139
Sam Ravnborgfe414932011-01-22 11:32:19 +0000140 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700141
142 return ent;
143}
144
David S. Miller5746c992007-02-20 01:26:48 -0800145#ifdef CONFIG_PCI_MSI
Sam Ravnborgfe414932011-01-22 11:32:19 +0000146void irq_free(unsigned int irq)
David S. Miller8047e242006-06-20 01:22:35 -0700147{
David S. Miller759f89e2007-10-11 03:16:13 -0700148 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700149
Sam Ravnborgfe414932011-01-22 11:32:19 +0000150 if (irq >= NR_IRQS)
David S. Miller35a17eb2007-02-10 17:41:02 -0800151 return;
152
Sam Ravnborgfe414932011-01-22 11:32:19 +0000153 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700154
Sam Ravnborgfe414932011-01-22 11:32:19 +0000155 irq_table[irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800156
Sam Ravnborgfe414932011-01-22 11:32:19 +0000157 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700158}
David S. Miller5746c992007-02-20 01:26:48 -0800159#endif
David S. Miller8047e242006-06-20 01:22:35 -0700160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161/*
David S. Millere18e2a02006-06-20 01:23:32 -0700162 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 */
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100164int arch_show_interrupts(struct seq_file *p, int prec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100166 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100168 seq_printf(p, "NMI: ");
169 for_each_online_cpu(j)
170 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
171 seq_printf(p, " Non-maskable interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 return 0;
173}
174
David S. Millerebd8c562006-02-17 08:38:06 -0800175static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
176{
177 unsigned int tid;
178
179 if (this_is_starfire) {
180 tid = starfire_translate(imap, cpuid);
181 tid <<= IMAP_TID_SHIFT;
182 tid &= IMAP_TID_UPA;
183 } else {
184 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
185 unsigned long ver;
186
187 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
188 if ((ver >> 32UL) == __JALAPENO_ID ||
189 (ver >> 32UL) == __SERRANO_ID) {
190 tid = cpuid << IMAP_TID_SHIFT;
191 tid &= IMAP_TID_JBUS;
192 } else {
193 unsigned int a = cpuid & 0x1f;
194 unsigned int n = (cpuid >> 5) & 0x1f;
195
196 tid = ((a << IMAP_AID_SHIFT) |
197 (n << IMAP_NID_SHIFT));
198 tid &= (IMAP_AID_SAFARI |
Joe Perchesa419aef2009-08-18 11:18:35 -0700199 IMAP_NID_SAFARI);
David S. Millerebd8c562006-02-17 08:38:06 -0800200 }
201 } else {
202 tid = cpuid << IMAP_TID_SHIFT;
203 tid &= IMAP_TID_UPA;
204 }
205 }
206
207 return tid;
208}
209
David S. Millere18e2a02006-06-20 01:23:32 -0700210struct irq_handler_data {
211 unsigned long iclr;
212 unsigned long imap;
213
214 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700215 void *arg1;
216 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700217};
218
David S. Millere18e2a02006-06-20 01:23:32 -0700219#ifdef CONFIG_SMP
Sam Ravnborgfe414932011-01-22 11:32:19 +0000220static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
David S. Millere18e2a02006-06-20 01:23:32 -0700221{
Mike Travise65e49d2009-01-12 15:27:13 -0800222 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700223 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
David S. Miller1091ce62010-01-20 19:30:49 -0800225 cpumask_copy(&mask, affinity);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700226 if (cpumask_equal(&mask, cpu_online_mask)) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000227 cpuid = map_to_cpu(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700228 } else {
229 cpumask_t tmp;
230
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700231 cpumask_and(&tmp, cpu_online_mask, &mask);
232 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
David S. Millere18e2a02006-06-20 01:23:32 -0700233 }
234
235 return cpuid;
236}
237#else
Sam Ravnborgfe414932011-01-22 11:32:19 +0000238#define irq_choose_cpu(irq, affinity) \
David S. Miller6abce772010-01-26 04:16:49 -0800239 real_hard_smp_processor_id()
David S. Millere18e2a02006-06-20 01:23:32 -0700240#endif
241
Sam Ravnborg4832b992011-01-22 11:32:18 +0000242static void sun4u_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700243{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000244 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700245
Sam Ravnborgcae787282011-01-22 11:32:16 +0000246 if (likely(handler_data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700247 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700248 unsigned int tid;
249
Sam Ravnborg4832b992011-01-22 11:32:18 +0000250 cpuid = irq_choose_cpu(data->irq, data->affinity);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000251 imap = handler_data->imap;
David S. Millere18e2a02006-06-20 01:23:32 -0700252
253 tid = sun4u_compute_tid(imap, cpuid);
254
David S. Miller861fe902007-05-02 17:31:36 -0700255 val = upa_readq(imap);
256 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
257 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
258 val |= tid | IMAP_VALID;
259 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000260 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700261 }
262}
263
Sam Ravnborg4832b992011-01-22 11:32:18 +0000264static int sun4u_set_affinity(struct irq_data *data,
265 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700266{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000267 struct irq_handler_data *handler_data = data->handler_data;
David S. Miller1091ce62010-01-20 19:30:49 -0800268
Sam Ravnborgcae787282011-01-22 11:32:16 +0000269 if (likely(handler_data)) {
David S. Miller1091ce62010-01-20 19:30:49 -0800270 unsigned long cpuid, imap, val;
271 unsigned int tid;
272
Sam Ravnborg4832b992011-01-22 11:32:18 +0000273 cpuid = irq_choose_cpu(data->irq, mask);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000274 imap = handler_data->imap;
David S. Miller1091ce62010-01-20 19:30:49 -0800275
276 tid = sun4u_compute_tid(imap, cpuid);
277
278 val = upa_readq(imap);
279 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
280 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
281 val |= tid | IMAP_VALID;
282 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000283 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Miller1091ce62010-01-20 19:30:49 -0800284 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700285
286 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700287}
288
David S. Millerd0cac392009-03-04 14:43:47 -0800289/* Don't do anything. The desc->status check for IRQ_DISABLED in
290 * handler_irq() will skip the handler call and that will leave the
291 * interrupt in the sent state. The next ->enable() call will hit the
292 * ICLR register to reset the state machine.
293 *
294 * This scheme is necessary, instead of clearing the Valid bit in the
295 * IMAP register, to handle the case of IMAP registers being shared by
296 * multiple INOs (and thus ICLR registers). Since we use a different
297 * virtual IRQ for each shared IMAP instance, the generic code thinks
298 * there is only one user so it prematurely calls ->disable() on
299 * free_irq().
300 *
301 * We have to provide an explicit ->disable() method instead of using
302 * NULL to get the default. The reason is that if the generic code
303 * sees that, it also hooks up a default ->shutdown method which
304 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
305 */
Sam Ravnborg4832b992011-01-22 11:32:18 +0000306static void sun4u_irq_disable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700307{
David S. Millere18e2a02006-06-20 01:23:32 -0700308}
309
Sam Ravnborg4832b992011-01-22 11:32:18 +0000310static void sun4u_irq_eoi(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700311{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000312 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700313
Sam Ravnborgcae787282011-01-22 11:32:16 +0000314 if (likely(handler_data))
315 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700316}
317
Sam Ravnborg4832b992011-01-22 11:32:18 +0000318static void sun4v_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700319{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000320 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000321 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller771823002007-10-13 23:41:28 -0700322 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700323
David S. Miller771823002007-10-13 23:41:28 -0700324 err = sun4v_intr_settarget(ino, cpuid);
325 if (err != HV_EOK)
326 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
327 "err(%d)\n", ino, cpuid, err);
328 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
329 if (err != HV_EOK)
330 printk(KERN_ERR "sun4v_intr_setstate(%x): "
331 "err(%d)\n", ino, err);
332 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
333 if (err != HV_EOK)
334 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
335 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Sam Ravnborg4832b992011-01-22 11:32:18 +0000338static int sun4v_set_affinity(struct irq_data *data,
339 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700340{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000341 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000342 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
David S. Miller771823002007-10-13 23:41:28 -0700343 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700344
David S. Miller771823002007-10-13 23:41:28 -0700345 err = sun4v_intr_settarget(ino, cpuid);
346 if (err != HV_EOK)
347 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
348 "err(%d)\n", ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700349
350 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700351}
352
Sam Ravnborg4832b992011-01-22 11:32:18 +0000353static void sun4v_irq_disable(struct irq_data *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000355 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700356 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
David S. Miller771823002007-10-13 23:41:28 -0700358 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
359 if (err != HV_EOK)
360 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
361 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
Sam Ravnborg4832b992011-01-22 11:32:18 +0000364static void sun4v_irq_eoi(struct irq_data *data)
David S. Miller088dd1f2005-07-04 13:24:38 -0700365{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000366 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700367 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700368
David S. Miller771823002007-10-13 23:41:28 -0700369 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
370 if (err != HV_EOK)
371 printk(KERN_ERR "sun4v_intr_setstate(%x): "
372 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700373}
374
Sam Ravnborg4832b992011-01-22 11:32:18 +0000375static void sun4v_virq_enable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700376{
David S. Miller771823002007-10-13 23:41:28 -0700377 unsigned long cpuid, dev_handle, dev_ino;
378 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700379
Sam Ravnborg4832b992011-01-22 11:32:18 +0000380 cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller4a907de2007-06-13 00:01:04 -0700381
Sam Ravnborgfe414932011-01-22 11:32:19 +0000382 dev_handle = irq_table[data->irq].dev_handle;
383 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700384
David S. Miller771823002007-10-13 23:41:28 -0700385 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
386 if (err != HV_EOK)
387 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
388 "err(%d)\n",
389 dev_handle, dev_ino, cpuid, err);
390 err = sun4v_vintr_set_state(dev_handle, dev_ino,
391 HV_INTR_STATE_IDLE);
392 if (err != HV_EOK)
393 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
394 "HV_INTR_STATE_IDLE): err(%d)\n",
395 dev_handle, dev_ino, err);
396 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
397 HV_INTR_ENABLED);
398 if (err != HV_EOK)
399 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
400 "HV_INTR_ENABLED): err(%d)\n",
401 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700402}
403
Sam Ravnborg4832b992011-01-22 11:32:18 +0000404static int sun4v_virt_set_affinity(struct irq_data *data,
405 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700406{
David S. Miller771823002007-10-13 23:41:28 -0700407 unsigned long cpuid, dev_handle, dev_ino;
408 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700409
Sam Ravnborg4832b992011-01-22 11:32:18 +0000410 cpuid = irq_choose_cpu(data->irq, mask);
David S. Millerb53bcb62007-07-14 03:16:13 -0700411
Sam Ravnborgfe414932011-01-22 11:32:19 +0000412 dev_handle = irq_table[data->irq].dev_handle;
413 dev_ino = irq_table[data->irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700414
David S. Miller771823002007-10-13 23:41:28 -0700415 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
416 if (err != HV_EOK)
417 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
418 "err(%d)\n",
419 dev_handle, dev_ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700420
421 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700422}
423
Sam Ravnborg4832b992011-01-22 11:32:18 +0000424static void sun4v_virq_disable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700425{
David S. Miller771823002007-10-13 23:41:28 -0700426 unsigned long dev_handle, dev_ino;
427 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700428
Sam Ravnborgfe414932011-01-22 11:32:19 +0000429 dev_handle = irq_table[data->irq].dev_handle;
430 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700431
David S. Miller771823002007-10-13 23:41:28 -0700432 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
433 HV_INTR_DISABLED);
434 if (err != HV_EOK)
435 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
436 "HV_INTR_DISABLED): err(%d)\n",
437 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700438}
439
Sam Ravnborg4832b992011-01-22 11:32:18 +0000440static void sun4v_virq_eoi(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700441{
David S. Miller771823002007-10-13 23:41:28 -0700442 unsigned long dev_handle, dev_ino;
443 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700444
Sam Ravnborgfe414932011-01-22 11:32:19 +0000445 dev_handle = irq_table[data->irq].dev_handle;
446 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700447
David S. Miller771823002007-10-13 23:41:28 -0700448 err = sun4v_vintr_set_state(dev_handle, dev_ino,
449 HV_INTR_STATE_IDLE);
450 if (err != HV_EOK)
451 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
452 "HV_INTR_STATE_IDLE): err(%d)\n",
453 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700454}
455
David S. Miller729e7d72006-12-12 00:59:12 -0800456static struct irq_chip sun4u_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000457 .name = "sun4u",
458 .irq_enable = sun4u_irq_enable,
459 .irq_disable = sun4u_irq_disable,
460 .irq_eoi = sun4u_irq_eoi,
461 .irq_set_affinity = sun4u_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100462 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Millere18e2a02006-06-20 01:23:32 -0700463};
464
David S. Miller729e7d72006-12-12 00:59:12 -0800465static struct irq_chip sun4v_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000466 .name = "sun4v",
467 .irq_enable = sun4v_irq_enable,
468 .irq_disable = sun4v_irq_disable,
469 .irq_eoi = sun4v_irq_eoi,
470 .irq_set_affinity = sun4v_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100471 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Millere18e2a02006-06-20 01:23:32 -0700472};
473
David S. Miller4a907de2007-06-13 00:01:04 -0700474static struct irq_chip sun4v_virq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000475 .name = "vsun4v",
476 .irq_enable = sun4v_virq_enable,
477 .irq_disable = sun4v_virq_disable,
478 .irq_eoi = sun4v_virq_eoi,
479 .irq_set_affinity = sun4v_virt_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100480 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Miller4a907de2007-06-13 00:01:04 -0700481};
482
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100483static void pre_flow_handler(struct irq_data *d)
David S. Miller8d57d3a2007-10-22 02:16:45 -0700484{
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100485 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
486 unsigned int ino = irq_table[d->irq].dev_ino;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700487
Sam Ravnborgcae787282011-01-22 11:32:16 +0000488 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700489}
490
Sam Ravnborgfe414932011-01-22 11:32:19 +0000491void irq_install_pre_handler(int irq,
David S. Millere18e2a02006-06-20 01:23:32 -0700492 void (*func)(unsigned int, void *, void *),
493 void *arg1, void *arg2)
494{
Thomas Gleixner394d4412011-03-24 17:52:54 +0100495 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700496
Sam Ravnborgcae787282011-01-22 11:32:16 +0000497 handler_data->pre_handler = func;
498 handler_data->arg1 = arg1;
499 handler_data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700500
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100501 __irq_set_preflow_handler(irq, pre_flow_handler);
David S. Millere18e2a02006-06-20 01:23:32 -0700502}
503
504unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
506 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000507 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000508 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 int ino;
510
David S. Miller10951ee2006-02-13 18:22:57 -0800511 BUG_ON(tlb_type == hypervisor);
512
David S. Miller861fe902007-05-02 17:31:36 -0700513 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700514 bucket = &ivector_table[ino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000515 irq = bucket_get_irq(__pa(bucket));
516 if (!irq) {
517 irq = irq_alloc(0, ino);
518 bucket_set_irq(__pa(bucket), irq);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100519 irq_set_chip_and_handler_name(irq, &sun4u_irq,
520 handle_fasteoi_irq, "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700521 }
522
Thomas Gleixner394d4412011-03-24 17:52:54 +0100523 handler_data = irq_get_handler_data(irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000524 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700525 goto out;
526
Sam Ravnborgcae787282011-01-22 11:32:16 +0000527 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
528 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700529 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700530 prom_halt();
531 }
Thomas Gleixner394d4412011-03-24 17:52:54 +0100532 irq_set_handler_data(irq, handler_data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700533
Sam Ravnborgcae787282011-01-22 11:32:16 +0000534 handler_data->imap = imap;
535 handler_data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
David S. Miller088dd1f2005-07-04 13:24:38 -0700537out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000538 return irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539}
540
David S. Miller4a907de2007-06-13 00:01:04 -0700541static unsigned int sun4v_build_common(unsigned long sysino,
542 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800543{
544 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000545 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000546 unsigned int irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700547
548 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800549
David S. Millere3999572006-02-13 18:16:10 -0800550 bucket = &ivector_table[sysino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000551 irq = bucket_get_irq(__pa(bucket));
552 if (!irq) {
553 irq = irq_alloc(0, sysino);
554 bucket_set_irq(__pa(bucket), irq);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100555 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700556 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700557 }
558
Thomas Gleixner394d4412011-03-24 17:52:54 +0100559 handler_data = irq_get_handler_data(irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000560 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700561 goto out;
562
Sam Ravnborgcae787282011-01-22 11:32:16 +0000563 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
564 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700565 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
566 prom_halt();
567 }
Thomas Gleixner394d4412011-03-24 17:52:54 +0100568 irq_set_handler_data(irq, handler_data);
David S. Millere3999572006-02-13 18:16:10 -0800569
570 /* Catch accidental accesses to these things. IMAP/ICLR handling
571 * is done by hypervisor calls on sun4v platforms, not by direct
572 * register accesses.
573 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000574 handler_data->imap = ~0UL;
575 handler_data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800576
David S. Millere18e2a02006-06-20 01:23:32 -0700577out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000578 return irq;
David S. Millere3999572006-02-13 18:16:10 -0800579}
580
David S. Miller4a907de2007-06-13 00:01:04 -0700581unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
582{
583 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
584
585 return sun4v_build_common(sysino, &sun4v_irq);
586}
587
588unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
589{
Sam Ravnborgcae787282011-01-22 11:32:16 +0000590 struct irq_handler_data *handler_data;
David S. Millerb80e6992007-10-13 21:51:37 -0700591 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700592 struct ino_bucket *bucket;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000593 unsigned int irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700594
David S. Millerb80e6992007-10-13 21:51:37 -0700595 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
596 if (unlikely(!bucket))
597 return 0;
David S. Miller25ad4032010-04-10 20:24:22 -0700598
599 /* The only reference we store to the IRQ bucket is
600 * by physical address which kmemleak can't see, tell
601 * it that this object explicitly is not a leak and
602 * should be scanned.
603 */
604 kmemleak_not_leak(bucket);
605
David S. Miller42d5f992007-10-13 23:03:21 -0700606 __flush_dcache_range((unsigned long) bucket,
607 ((unsigned long) bucket +
608 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700609
Sam Ravnborgfe414932011-01-22 11:32:19 +0000610 irq = irq_alloc(devhandle, devino);
611 bucket_set_irq(__pa(bucket), irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700612
Thomas Gleixner394d4412011-03-24 17:52:54 +0100613 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700614 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700615
Sam Ravnborgcae787282011-01-22 11:32:16 +0000616 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
617 if (unlikely(!handler_data))
David S. Millerb80e6992007-10-13 21:51:37 -0700618 return 0;
619
David S. Millerb7c2a752008-07-22 22:34:29 -0700620 /* In order to make the LDC channel startup sequence easier,
621 * especially wrt. locking, we do not let request_irq() enable
622 * the interrupt.
623 */
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100624 irq_set_status_flags(irq, IRQ_NOAUTOEN);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100625 irq_set_handler_data(irq, handler_data);
David S. Millerb80e6992007-10-13 21:51:37 -0700626
627 /* Catch accidental accesses to these things. IMAP/ICLR handling
628 * is done by hypervisor calls on sun4v platforms, not by direct
629 * register accesses.
630 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000631 handler_data->imap = ~0UL;
632 handler_data->iclr = ~0UL;
David S. Millerb80e6992007-10-13 21:51:37 -0700633
634 cookie = ~__pa(bucket);
635 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700636 if (hv_err) {
637 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
638 "err=%lu\n", devhandle, devino, hv_err);
639 prom_halt();
640 }
641
Sam Ravnborgfe414932011-01-22 11:32:19 +0000642 return irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700643}
644
Sam Ravnborgfe414932011-01-22 11:32:19 +0000645void ack_bad_irq(unsigned int irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700646{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000647 unsigned int ino = irq_table[irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700648
David S. Miller771823002007-10-13 23:41:28 -0700649 if (!ino)
650 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700651
Sam Ravnborgfe414932011-01-22 11:32:19 +0000652 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
653 ino, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
David S. Miller4f70f7a2008-08-12 18:33:56 -0700656void *hardirq_stack[NR_CPUS];
657void *softirq_stack[NR_CPUS];
658
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000659void __irq_entry handler_irq(int pil, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
David S. Millereb2d8d62007-10-13 21:42:46 -0700661 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400662 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700663 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000665 clear_softint(1 << pil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Al Viro6d24c8d2006-10-08 08:23:28 -0400667 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
David S. Millera650d382007-10-12 02:59:40 -0700670 /* Grab an atomic snapshot of the pending IVECs. */
671 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
672 "wrpr %0, %3, %%pstate\n\t"
673 "ldx [%2], %1\n\t"
674 "stx %%g0, [%2]\n\t"
675 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700676 : "=&r" (pstate), "=&r" (bucket_pa)
677 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700678 "i" (PSTATE_IE)
679 : "memory");
680
David S. Miller4f70f7a2008-08-12 18:33:56 -0700681 orig_sp = set_hardirq_stack();
682
David S. Millereb2d8d62007-10-13 21:42:46 -0700683 while (bucket_pa) {
684 unsigned long next_pa;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000685 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
David S. Miller42d5f992007-10-13 23:03:21 -0700687 next_pa = bucket_get_chain_pa(bucket_pa);
Sam Ravnborgfe414932011-01-22 11:32:19 +0000688 irq = bucket_get_irq(bucket_pa);
David S. Miller42d5f992007-10-13 23:03:21 -0700689 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700690
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100691 generic_handle_irq(irq);
David S. Millereb2d8d62007-10-13 21:42:46 -0700692
693 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 }
David S. Millere18e2a02006-06-20 01:23:32 -0700695
David S. Miller4f70f7a2008-08-12 18:33:56 -0700696 restore_hardirq_stack(orig_sp);
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400699 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
David S. Miller4f70f7a2008-08-12 18:33:56 -0700702void do_softirq(void)
703{
704 unsigned long flags;
705
706 if (in_interrupt())
707 return;
708
709 local_irq_save(flags);
710
711 if (local_softirq_pending()) {
712 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
713
714 sp += THREAD_SIZE - 192 - STACK_BIAS;
715
716 __asm__ __volatile__("mov %%sp, %0\n\t"
717 "mov %1, %%sp"
718 : "=&r" (orig_sp)
719 : "r" (sp));
720 __do_softirq();
721 __asm__ __volatile__("mov %0, %%sp"
722 : : "r" (orig_sp));
723 }
724
725 local_irq_restore(flags);
726}
727
David S. Millere0204402007-07-16 03:49:40 -0700728#ifdef CONFIG_HOTPLUG_CPU
729void fixup_irqs(void)
730{
731 unsigned int irq;
732
733 for (irq = 0; irq < NR_IRQS; irq++) {
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100734 struct irq_desc *desc = irq_to_desc(irq);
735 struct irq_data *data = irq_desc_get_irq_data(desc);
David S. Millere0204402007-07-16 03:49:40 -0700736 unsigned long flags;
737
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100738 raw_spin_lock_irqsave(&desc->lock, flags);
739 if (desc->action && !irqd_is_per_cpu(data)) {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000740 if (data->chip->irq_set_affinity)
741 data->chip->irq_set_affinity(data,
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100742 data->affinity,
743 false);
David S. Millere0204402007-07-16 03:49:40 -0700744 }
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100745 raw_spin_unlock_irqrestore(&desc->lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700746 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700747
748 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700749}
750#endif
751
David S. Millercdd51862005-07-24 19:36:13 -0700752struct sun5_timer {
753 u64 count0;
754 u64 limit0;
755 u64 count1;
756 u64 limit1;
757};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
David S. Millercdd51862005-07-24 19:36:13 -0700759static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760static u64 prom_limit0, prom_limit1;
761
762static void map_prom_timers(void)
763{
David S. Miller25c75812006-06-22 20:21:22 -0700764 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700765 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700768 dp = of_find_node_by_path("/");
769 dp = dp->child;
770 while (dp) {
771 if (!strcmp(dp->name, "counter-timer"))
772 break;
773 dp = dp->sibling;
774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776 /* Assume if node is not present, PROM uses different tick mechanism
777 * which we should not care about.
778 */
David S. Miller25c75812006-06-22 20:21:22 -0700779 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 prom_timers = (struct sun5_timer *) 0;
781 return;
782 }
783
784 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700785 addr = of_get_property(dp, "address", NULL);
786 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 prom_printf("PROM does not have timer mapped, trying to continue.\n");
788 prom_timers = (struct sun5_timer *) 0;
789 return;
790 }
791 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
792}
793
794static void kill_prom_timer(void)
795{
796 if (!prom_timers)
797 return;
798
799 /* Save them away for later. */
800 prom_limit0 = prom_timers->limit0;
801 prom_limit1 = prom_timers->limit1;
802
803 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
804 * We turn both off here just to be paranoid.
805 */
806 prom_timers->limit0 = 0;
807 prom_timers->limit1 = 0;
808
809 /* Wheee, eat the interrupt packet too... */
810 __asm__ __volatile__(
811" mov 0x40, %%g2\n"
812" ldxa [%%g0] %0, %%g1\n"
813" ldxa [%%g2] %1, %%g1\n"
814" stxa %%g0, [%%g0] %0\n"
815" membar #Sync\n"
816 : /* no outputs */
817 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
818 : "g1", "g2");
819}
820
David S. Miller98430992008-09-16 11:44:00 -0700821void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 int cpu = hard_smp_processor_id();
824
David S. Millereb2d8d62007-10-13 21:42:46 -0700825 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
David S. Miller5cbc3072007-05-25 15:49:59 -0700828/* Please be very careful with register_one_mondo() and
829 * sun4v_register_mondo_queues().
830 *
831 * On SMP this gets invoked from the CPU trampoline before
832 * the cpu has fully taken over the trap table from OBP,
833 * and it's kernel stack + %g6 thread register state is
834 * not fully cooked yet.
835 *
836 * Therefore you cannot make any OBP calls, not even prom_printf,
837 * from these two routines.
838 */
David S. Millerbd4352c2009-09-04 03:38:54 -0700839static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800840{
David S. Miller5cbc3072007-05-25 15:49:59 -0700841 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800842 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800843
David S. Miller94f87622006-02-16 14:26:53 -0800844 status = sun4v_cpu_qconf(type, paddr, num_entries);
845 if (status != HV_EOK) {
846 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
847 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800848 prom_halt();
849 }
850}
851
David S. Miller98430992008-09-16 11:44:00 -0700852void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800853{
David S. Millerb5a37e92006-02-11 23:07:13 -0800854 struct trap_per_cpu *tb = &trap_block[this_cpu];
855
David S. Miller5cbc3072007-05-25 15:49:59 -0700856 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
857 tb->cpu_mondo_qmask);
858 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
859 tb->dev_mondo_qmask);
860 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
861 tb->resum_qmask);
862 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
863 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800864}
865
David S. Miller14a2ff62009-06-25 19:00:47 -0700866/* Each queue region must be a power of 2 multiple of 64 bytes in
867 * size. The base real address must be aligned to the size of the
868 * region. Thus, an 8KB queue must be 8KB aligned, for example.
869 */
870static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800871{
David S. Miller5cbc3072007-05-25 15:49:59 -0700872 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller14a2ff62009-06-25 19:00:47 -0700873 unsigned long order = get_order(size);
874 unsigned long p;
875
876 p = __get_free_pages(GFP_KERNEL, order);
David S. Miller5cbc3072007-05-25 15:49:59 -0700877 if (!p) {
David S. Miller14a2ff62009-06-25 19:00:47 -0700878 prom_printf("SUN4V: Error, cannot allocate queue.\n");
David S. Miller5b0c0572006-02-08 02:53:50 -0800879 prom_halt();
880 }
881
David S. Miller5cbc3072007-05-25 15:49:59 -0700882 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800883}
884
David S. Millerb434e712007-08-08 17:32:33 -0700885static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800886{
887#ifdef CONFIG_SMP
David S. Miller14a2ff62009-06-25 19:00:47 -0700888 unsigned long page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800889
890 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
891
David S. Miller14a2ff62009-06-25 19:00:47 -0700892 page = get_zeroed_page(GFP_KERNEL);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800893 if (!page) {
894 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
895 prom_halt();
896 }
897
898 tb->cpu_mondo_block_pa = __pa(page);
899 tb->cpu_list_pa = __pa(page + 64);
900#endif
901}
902
David S. Millerb434e712007-08-08 17:32:33 -0700903/* Allocate mondo and error queues for all possible cpus. */
904static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800905{
David S. Millerb434e712007-08-08 17:32:33 -0700906 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800907
David S. Millerb434e712007-08-08 17:32:33 -0700908 for_each_possible_cpu(cpu) {
909 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800910
David S. Miller14a2ff62009-06-25 19:00:47 -0700911 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
912 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
913 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
914 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
915 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
916 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
917 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700918 }
919}
920
921static void __init init_send_mondo_info(void)
922{
923 int cpu;
924
925 for_each_possible_cpu(cpu) {
926 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -0700927
928 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -0800929 }
David S. Millerac29c112006-02-08 00:08:23 -0800930}
931
David S. Millere18e2a02006-06-20 01:23:32 -0700932static struct irqaction timer_irq_action = {
933 .name = "timer",
934};
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936/* Only invoked on boot processor. */
937void __init init_IRQ(void)
938{
David S. Miller10397e42007-10-13 21:43:31 -0700939 unsigned long size;
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 map_prom_timers();
942 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
David S. Miller10397e42007-10-13 21:43:31 -0700944 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller14a2ff62009-06-25 19:00:47 -0700945 ivector_table = kzalloc(size, GFP_KERNEL);
David S. Miller10397e42007-10-13 21:43:31 -0700946 if (!ivector_table) {
947 prom_printf("Fatal error, cannot allocate ivector_table\n");
948 prom_halt();
949 }
David S. Miller42d5f992007-10-13 23:03:21 -0700950 __flush_dcache_range((unsigned long) ivector_table,
951 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -0700952
953 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -0700954
David S. Millerac29c112006-02-08 00:08:23 -0800955 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -0700956 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -0800957
David S. Miller43f58922008-08-04 16:13:51 -0700958 init_send_mondo_info();
959
960 if (tlb_type == hypervisor) {
961 /* Load up the boot cpu's entries. */
962 sun4v_register_mondo_queues(hard_smp_processor_id());
963 }
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 /* We need to clear any IRQ's pending in the soft interrupt
966 * registers, a spurious one could be left around from the
967 * PROM timer which we just disabled.
968 */
969 clear_softint(get_softint());
970
971 /* Now that ivector table is initialized, it is safe
972 * to receive IRQ vector traps. We will normally take
973 * one or two right now, in case some device PROM used
974 * to boot us wants to speak to us. We just ignore them.
975 */
976 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
977 "or %%g1, %0, %%g1\n\t"
978 "wrpr %%g1, 0x0, %%pstate"
979 : /* No outputs */
980 : "i" (PSTATE_IE)
981 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -0700982
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100983 irq_to_desc(0)->action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}