blob: 0dd8422a469cc3a035fcd98fa47ee9e9a682f7a1 [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -070010#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070023#include <linux/ftrace.h>
David S. Millere18e2a02006-06-20 01:23:32 -070024#include <linux/irq.h>
Frederic Weisbecker2e2dc1d2010-04-13 14:28:24 -070025#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27#include <asm/ptrace.h>
28#include <asm/processor.h>
Arun Sharma600634972011-07-26 16:09:06 -070029#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/system.h>
31#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070032#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070036#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/timer.h>
38#include <asm/smp.h>
39#include <asm/starfire.h>
40#include <asm/uaccess.h>
41#include <asm/cache.h>
42#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070043#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080044#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070045#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070046#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David S. Millerd91aa122008-03-26 00:37:51 -070048#include "entry.h"
Hong H. Pham280ff972009-06-04 02:10:11 -070049#include "cpumap.h"
David S. Millerec687882010-04-14 02:04:29 -070050#include "kstack.h"
David S. Millere18e2a02006-06-20 01:23:32 -070051
52#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070053
David S. Miller10397e42007-10-13 21:43:31 -070054struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070055unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller42d5f992007-10-13 23:03:21 -070057/* On several sun4u processors, it is illegal to mix bypass and
58 * non-bypass accesses. Therefore we access all INO buckets
59 * using bypass accesses only.
60 */
61static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
62{
63 unsigned long ret;
64
65 __asm__ __volatile__("ldxa [%1] %2, %0"
66 : "=&r" (ret)
67 : "r" (bucket_pa +
68 offsetof(struct ino_bucket,
69 __irq_chain_pa)),
70 "i" (ASI_PHYS_USE_EC));
71
72 return ret;
73}
74
75static void bucket_clear_chain_pa(unsigned long bucket_pa)
76{
77 __asm__ __volatile__("stxa %%g0, [%0] %1"
78 : /* no outputs */
79 : "r" (bucket_pa +
80 offsetof(struct ino_bucket,
81 __irq_chain_pa)),
82 "i" (ASI_PHYS_USE_EC));
83}
84
Sam Ravnborgfe414932011-01-22 11:32:19 +000085static unsigned int bucket_get_irq(unsigned long bucket_pa)
David S. Miller42d5f992007-10-13 23:03:21 -070086{
87 unsigned int ret;
88
89 __asm__ __volatile__("lduwa [%1] %2, %0"
90 : "=&r" (ret)
91 : "r" (bucket_pa +
92 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +000093 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -070094 "i" (ASI_PHYS_USE_EC));
95
96 return ret;
97}
98
Sam Ravnborgfe414932011-01-22 11:32:19 +000099static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
David S. Miller42d5f992007-10-13 23:03:21 -0700100{
101 __asm__ __volatile__("stwa %0, [%1] %2"
102 : /* no outputs */
Sam Ravnborgfe414932011-01-22 11:32:19 +0000103 : "r" (irq),
David S. Miller42d5f992007-10-13 23:03:21 -0700104 "r" (bucket_pa +
105 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +0000106 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -0700107 "i" (ASI_PHYS_USE_EC));
108}
109
David S. Millereb2d8d62007-10-13 21:42:46 -0700110#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
David S. Miller93b32382007-07-20 02:58:28 -0700112static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700113 unsigned int dev_handle;
114 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700115 unsigned int in_use;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000116} irq_table[NR_IRQS];
117static DEFINE_SPINLOCK(irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700118
Sam Ravnborgfe414932011-01-22 11:32:19 +0000119unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700120{
David S. Miller759f89e2007-10-11 03:16:13 -0700121 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700122 unsigned char ent;
123
124 BUILD_BUG_ON(NR_IRQS >= 256);
125
Sam Ravnborgfe414932011-01-22 11:32:19 +0000126 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700127
David S. Miller35a17eb2007-02-10 17:41:02 -0800128 for (ent = 1; ent < NR_IRQS; ent++) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000129 if (!irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800130 break;
131 }
David S. Miller8047e242006-06-20 01:22:35 -0700132 if (ent >= NR_IRQS) {
133 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700134 ent = 0;
135 } else {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000136 irq_table[ent].dev_handle = dev_handle;
137 irq_table[ent].dev_ino = dev_ino;
138 irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700139 }
140
Sam Ravnborgfe414932011-01-22 11:32:19 +0000141 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700142
143 return ent;
144}
145
David S. Miller5746c992007-02-20 01:26:48 -0800146#ifdef CONFIG_PCI_MSI
Sam Ravnborgfe414932011-01-22 11:32:19 +0000147void irq_free(unsigned int irq)
David S. Miller8047e242006-06-20 01:22:35 -0700148{
David S. Miller759f89e2007-10-11 03:16:13 -0700149 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700150
Sam Ravnborgfe414932011-01-22 11:32:19 +0000151 if (irq >= NR_IRQS)
David S. Miller35a17eb2007-02-10 17:41:02 -0800152 return;
153
Sam Ravnborgfe414932011-01-22 11:32:19 +0000154 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700155
Sam Ravnborgfe414932011-01-22 11:32:19 +0000156 irq_table[irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800157
Sam Ravnborgfe414932011-01-22 11:32:19 +0000158 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700159}
David S. Miller5746c992007-02-20 01:26:48 -0800160#endif
David S. Miller8047e242006-06-20 01:22:35 -0700161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
David S. Millere18e2a02006-06-20 01:23:32 -0700163 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 */
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100165int arch_show_interrupts(struct seq_file *p, int prec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100167 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100169 seq_printf(p, "NMI: ");
170 for_each_online_cpu(j)
171 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
172 seq_printf(p, " Non-maskable interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 return 0;
174}
175
David S. Millerebd8c562006-02-17 08:38:06 -0800176static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
177{
178 unsigned int tid;
179
180 if (this_is_starfire) {
181 tid = starfire_translate(imap, cpuid);
182 tid <<= IMAP_TID_SHIFT;
183 tid &= IMAP_TID_UPA;
184 } else {
185 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
186 unsigned long ver;
187
188 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
189 if ((ver >> 32UL) == __JALAPENO_ID ||
190 (ver >> 32UL) == __SERRANO_ID) {
191 tid = cpuid << IMAP_TID_SHIFT;
192 tid &= IMAP_TID_JBUS;
193 } else {
194 unsigned int a = cpuid & 0x1f;
195 unsigned int n = (cpuid >> 5) & 0x1f;
196
197 tid = ((a << IMAP_AID_SHIFT) |
198 (n << IMAP_NID_SHIFT));
199 tid &= (IMAP_AID_SAFARI |
Joe Perchesa419aef2009-08-18 11:18:35 -0700200 IMAP_NID_SAFARI);
David S. Millerebd8c562006-02-17 08:38:06 -0800201 }
202 } else {
203 tid = cpuid << IMAP_TID_SHIFT;
204 tid &= IMAP_TID_UPA;
205 }
206 }
207
208 return tid;
209}
210
David S. Millere18e2a02006-06-20 01:23:32 -0700211struct irq_handler_data {
212 unsigned long iclr;
213 unsigned long imap;
214
215 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700216 void *arg1;
217 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700218};
219
David S. Millere18e2a02006-06-20 01:23:32 -0700220#ifdef CONFIG_SMP
Sam Ravnborgfe414932011-01-22 11:32:19 +0000221static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
David S. Millere18e2a02006-06-20 01:23:32 -0700222{
Mike Travise65e49d2009-01-12 15:27:13 -0800223 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700224 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
David S. Miller1091ce62010-01-20 19:30:49 -0800226 cpumask_copy(&mask, affinity);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700227 if (cpumask_equal(&mask, cpu_online_mask)) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000228 cpuid = map_to_cpu(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700229 } else {
230 cpumask_t tmp;
231
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700232 cpumask_and(&tmp, cpu_online_mask, &mask);
233 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
David S. Millere18e2a02006-06-20 01:23:32 -0700234 }
235
236 return cpuid;
237}
238#else
Sam Ravnborgfe414932011-01-22 11:32:19 +0000239#define irq_choose_cpu(irq, affinity) \
David S. Miller6abce772010-01-26 04:16:49 -0800240 real_hard_smp_processor_id()
David S. Millere18e2a02006-06-20 01:23:32 -0700241#endif
242
Sam Ravnborg4832b992011-01-22 11:32:18 +0000243static void sun4u_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700244{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000245 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700246
Sam Ravnborgcae787282011-01-22 11:32:16 +0000247 if (likely(handler_data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700248 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700249 unsigned int tid;
250
Sam Ravnborg4832b992011-01-22 11:32:18 +0000251 cpuid = irq_choose_cpu(data->irq, data->affinity);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000252 imap = handler_data->imap;
David S. Millere18e2a02006-06-20 01:23:32 -0700253
254 tid = sun4u_compute_tid(imap, cpuid);
255
David S. Miller861fe902007-05-02 17:31:36 -0700256 val = upa_readq(imap);
257 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
258 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
259 val |= tid | IMAP_VALID;
260 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000261 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700262 }
263}
264
Sam Ravnborg4832b992011-01-22 11:32:18 +0000265static int sun4u_set_affinity(struct irq_data *data,
266 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700267{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000268 struct irq_handler_data *handler_data = data->handler_data;
David S. Miller1091ce62010-01-20 19:30:49 -0800269
Sam Ravnborgcae787282011-01-22 11:32:16 +0000270 if (likely(handler_data)) {
David S. Miller1091ce62010-01-20 19:30:49 -0800271 unsigned long cpuid, imap, val;
272 unsigned int tid;
273
Sam Ravnborg4832b992011-01-22 11:32:18 +0000274 cpuid = irq_choose_cpu(data->irq, mask);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000275 imap = handler_data->imap;
David S. Miller1091ce62010-01-20 19:30:49 -0800276
277 tid = sun4u_compute_tid(imap, cpuid);
278
279 val = upa_readq(imap);
280 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
281 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
282 val |= tid | IMAP_VALID;
283 upa_writeq(val, imap);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000284 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Miller1091ce62010-01-20 19:30:49 -0800285 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700286
287 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700288}
289
David S. Millerd0cac392009-03-04 14:43:47 -0800290/* Don't do anything. The desc->status check for IRQ_DISABLED in
291 * handler_irq() will skip the handler call and that will leave the
292 * interrupt in the sent state. The next ->enable() call will hit the
293 * ICLR register to reset the state machine.
294 *
295 * This scheme is necessary, instead of clearing the Valid bit in the
296 * IMAP register, to handle the case of IMAP registers being shared by
297 * multiple INOs (and thus ICLR registers). Since we use a different
298 * virtual IRQ for each shared IMAP instance, the generic code thinks
299 * there is only one user so it prematurely calls ->disable() on
300 * free_irq().
301 *
302 * We have to provide an explicit ->disable() method instead of using
303 * NULL to get the default. The reason is that if the generic code
304 * sees that, it also hooks up a default ->shutdown method which
305 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
306 */
Sam Ravnborg4832b992011-01-22 11:32:18 +0000307static void sun4u_irq_disable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700308{
David S. Millere18e2a02006-06-20 01:23:32 -0700309}
310
Sam Ravnborg4832b992011-01-22 11:32:18 +0000311static void sun4u_irq_eoi(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700312{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000313 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700314
Sam Ravnborgcae787282011-01-22 11:32:16 +0000315 if (likely(handler_data))
316 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700317}
318
Sam Ravnborg4832b992011-01-22 11:32:18 +0000319static void sun4v_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700320{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000321 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000322 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller771823002007-10-13 23:41:28 -0700323 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700324
David S. Miller771823002007-10-13 23:41:28 -0700325 err = sun4v_intr_settarget(ino, cpuid);
326 if (err != HV_EOK)
327 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
328 "err(%d)\n", ino, cpuid, err);
329 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
330 if (err != HV_EOK)
331 printk(KERN_ERR "sun4v_intr_setstate(%x): "
332 "err(%d)\n", ino, err);
333 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
334 if (err != HV_EOK)
335 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
336 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Sam Ravnborg4832b992011-01-22 11:32:18 +0000339static int sun4v_set_affinity(struct irq_data *data,
340 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700341{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000342 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000343 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
David S. Miller771823002007-10-13 23:41:28 -0700344 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700345
David S. Miller771823002007-10-13 23:41:28 -0700346 err = sun4v_intr_settarget(ino, cpuid);
347 if (err != HV_EOK)
348 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
349 "err(%d)\n", ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700350
351 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700352}
353
Sam Ravnborg4832b992011-01-22 11:32:18 +0000354static void sun4v_irq_disable(struct irq_data *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000356 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700357 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
David S. Miller771823002007-10-13 23:41:28 -0700359 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
360 if (err != HV_EOK)
361 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
362 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
364
Sam Ravnborg4832b992011-01-22 11:32:18 +0000365static void sun4v_irq_eoi(struct irq_data *data)
David S. Miller088dd1f2005-07-04 13:24:38 -0700366{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000367 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700368 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700369
David S. Miller771823002007-10-13 23:41:28 -0700370 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
371 if (err != HV_EOK)
372 printk(KERN_ERR "sun4v_intr_setstate(%x): "
373 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700374}
375
Sam Ravnborg4832b992011-01-22 11:32:18 +0000376static void sun4v_virq_enable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700377{
David S. Miller771823002007-10-13 23:41:28 -0700378 unsigned long cpuid, dev_handle, dev_ino;
379 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700380
Sam Ravnborg4832b992011-01-22 11:32:18 +0000381 cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller4a907de2007-06-13 00:01:04 -0700382
Sam Ravnborgfe414932011-01-22 11:32:19 +0000383 dev_handle = irq_table[data->irq].dev_handle;
384 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700385
David S. Miller771823002007-10-13 23:41:28 -0700386 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
387 if (err != HV_EOK)
388 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
389 "err(%d)\n",
390 dev_handle, dev_ino, cpuid, err);
391 err = sun4v_vintr_set_state(dev_handle, dev_ino,
392 HV_INTR_STATE_IDLE);
393 if (err != HV_EOK)
394 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
395 "HV_INTR_STATE_IDLE): err(%d)\n",
396 dev_handle, dev_ino, err);
397 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
398 HV_INTR_ENABLED);
399 if (err != HV_EOK)
400 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
401 "HV_INTR_ENABLED): err(%d)\n",
402 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700403}
404
Sam Ravnborg4832b992011-01-22 11:32:18 +0000405static int sun4v_virt_set_affinity(struct irq_data *data,
406 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700407{
David S. Miller771823002007-10-13 23:41:28 -0700408 unsigned long cpuid, dev_handle, dev_ino;
409 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700410
Sam Ravnborg4832b992011-01-22 11:32:18 +0000411 cpuid = irq_choose_cpu(data->irq, mask);
David S. Millerb53bcb62007-07-14 03:16:13 -0700412
Sam Ravnborgfe414932011-01-22 11:32:19 +0000413 dev_handle = irq_table[data->irq].dev_handle;
414 dev_ino = irq_table[data->irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700415
David S. Miller771823002007-10-13 23:41:28 -0700416 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
417 if (err != HV_EOK)
418 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
419 "err(%d)\n",
420 dev_handle, dev_ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700421
422 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700423}
424
Sam Ravnborg4832b992011-01-22 11:32:18 +0000425static void sun4v_virq_disable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700426{
David S. Miller771823002007-10-13 23:41:28 -0700427 unsigned long dev_handle, dev_ino;
428 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700429
Sam Ravnborgfe414932011-01-22 11:32:19 +0000430 dev_handle = irq_table[data->irq].dev_handle;
431 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700432
David S. Miller771823002007-10-13 23:41:28 -0700433 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
434 HV_INTR_DISABLED);
435 if (err != HV_EOK)
436 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
437 "HV_INTR_DISABLED): err(%d)\n",
438 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700439}
440
Sam Ravnborg4832b992011-01-22 11:32:18 +0000441static void sun4v_virq_eoi(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700442{
David S. Miller771823002007-10-13 23:41:28 -0700443 unsigned long dev_handle, dev_ino;
444 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700445
Sam Ravnborgfe414932011-01-22 11:32:19 +0000446 dev_handle = irq_table[data->irq].dev_handle;
447 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700448
David S. Miller771823002007-10-13 23:41:28 -0700449 err = sun4v_vintr_set_state(dev_handle, dev_ino,
450 HV_INTR_STATE_IDLE);
451 if (err != HV_EOK)
452 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
453 "HV_INTR_STATE_IDLE): err(%d)\n",
454 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700455}
456
David S. Miller729e7d72006-12-12 00:59:12 -0800457static struct irq_chip sun4u_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000458 .name = "sun4u",
459 .irq_enable = sun4u_irq_enable,
460 .irq_disable = sun4u_irq_disable,
461 .irq_eoi = sun4u_irq_eoi,
462 .irq_set_affinity = sun4u_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100463 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Millere18e2a02006-06-20 01:23:32 -0700464};
465
David S. Miller729e7d72006-12-12 00:59:12 -0800466static struct irq_chip sun4v_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000467 .name = "sun4v",
468 .irq_enable = sun4v_irq_enable,
469 .irq_disable = sun4v_irq_disable,
470 .irq_eoi = sun4v_irq_eoi,
471 .irq_set_affinity = sun4v_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100472 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Millere18e2a02006-06-20 01:23:32 -0700473};
474
David S. Miller4a907de2007-06-13 00:01:04 -0700475static struct irq_chip sun4v_virq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000476 .name = "vsun4v",
477 .irq_enable = sun4v_virq_enable,
478 .irq_disable = sun4v_virq_disable,
479 .irq_eoi = sun4v_virq_eoi,
480 .irq_set_affinity = sun4v_virt_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100481 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Miller4a907de2007-06-13 00:01:04 -0700482};
483
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100484static void pre_flow_handler(struct irq_data *d)
David S. Miller8d57d3a2007-10-22 02:16:45 -0700485{
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100486 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
487 unsigned int ino = irq_table[d->irq].dev_ino;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700488
Sam Ravnborgcae787282011-01-22 11:32:16 +0000489 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700490}
491
Sam Ravnborgfe414932011-01-22 11:32:19 +0000492void irq_install_pre_handler(int irq,
David S. Millere18e2a02006-06-20 01:23:32 -0700493 void (*func)(unsigned int, void *, void *),
494 void *arg1, void *arg2)
495{
Thomas Gleixner394d4412011-03-24 17:52:54 +0100496 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700497
Sam Ravnborgcae787282011-01-22 11:32:16 +0000498 handler_data->pre_handler = func;
499 handler_data->arg1 = arg1;
500 handler_data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700501
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100502 __irq_set_preflow_handler(irq, pre_flow_handler);
David S. Millere18e2a02006-06-20 01:23:32 -0700503}
504
505unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
507 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000508 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000509 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 int ino;
511
David S. Miller10951ee2006-02-13 18:22:57 -0800512 BUG_ON(tlb_type == hypervisor);
513
David S. Miller861fe902007-05-02 17:31:36 -0700514 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700515 bucket = &ivector_table[ino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000516 irq = bucket_get_irq(__pa(bucket));
517 if (!irq) {
518 irq = irq_alloc(0, ino);
519 bucket_set_irq(__pa(bucket), irq);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100520 irq_set_chip_and_handler_name(irq, &sun4u_irq,
521 handle_fasteoi_irq, "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700522 }
523
Thomas Gleixner394d4412011-03-24 17:52:54 +0100524 handler_data = irq_get_handler_data(irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000525 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700526 goto out;
527
Sam Ravnborgcae787282011-01-22 11:32:16 +0000528 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
529 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700530 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700531 prom_halt();
532 }
Thomas Gleixner394d4412011-03-24 17:52:54 +0100533 irq_set_handler_data(irq, handler_data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700534
Sam Ravnborgcae787282011-01-22 11:32:16 +0000535 handler_data->imap = imap;
536 handler_data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
David S. Miller088dd1f2005-07-04 13:24:38 -0700538out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000539 return irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
541
David S. Miller4a907de2007-06-13 00:01:04 -0700542static unsigned int sun4v_build_common(unsigned long sysino,
543 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800544{
545 struct ino_bucket *bucket;
Sam Ravnborgcae787282011-01-22 11:32:16 +0000546 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000547 unsigned int irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700548
549 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800550
David S. Millere3999572006-02-13 18:16:10 -0800551 bucket = &ivector_table[sysino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000552 irq = bucket_get_irq(__pa(bucket));
553 if (!irq) {
554 irq = irq_alloc(0, sysino);
555 bucket_set_irq(__pa(bucket), irq);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100556 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700557 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700558 }
559
Thomas Gleixner394d4412011-03-24 17:52:54 +0100560 handler_data = irq_get_handler_data(irq);
Sam Ravnborgcae787282011-01-22 11:32:16 +0000561 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700562 goto out;
563
Sam Ravnborgcae787282011-01-22 11:32:16 +0000564 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
565 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700566 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
567 prom_halt();
568 }
Thomas Gleixner394d4412011-03-24 17:52:54 +0100569 irq_set_handler_data(irq, handler_data);
David S. Millere3999572006-02-13 18:16:10 -0800570
571 /* Catch accidental accesses to these things. IMAP/ICLR handling
572 * is done by hypervisor calls on sun4v platforms, not by direct
573 * register accesses.
574 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000575 handler_data->imap = ~0UL;
576 handler_data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800577
David S. Millere18e2a02006-06-20 01:23:32 -0700578out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000579 return irq;
David S. Millere3999572006-02-13 18:16:10 -0800580}
581
David S. Miller4a907de2007-06-13 00:01:04 -0700582unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
583{
584 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
585
586 return sun4v_build_common(sysino, &sun4v_irq);
587}
588
589unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
590{
Sam Ravnborgcae787282011-01-22 11:32:16 +0000591 struct irq_handler_data *handler_data;
David S. Millerb80e6992007-10-13 21:51:37 -0700592 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700593 struct ino_bucket *bucket;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000594 unsigned int irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700595
David S. Millerb80e6992007-10-13 21:51:37 -0700596 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
597 if (unlikely(!bucket))
598 return 0;
David S. Miller25ad4032010-04-10 20:24:22 -0700599
600 /* The only reference we store to the IRQ bucket is
601 * by physical address which kmemleak can't see, tell
602 * it that this object explicitly is not a leak and
603 * should be scanned.
604 */
605 kmemleak_not_leak(bucket);
606
David S. Miller42d5f992007-10-13 23:03:21 -0700607 __flush_dcache_range((unsigned long) bucket,
608 ((unsigned long) bucket +
609 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700610
Sam Ravnborgfe414932011-01-22 11:32:19 +0000611 irq = irq_alloc(devhandle, devino);
612 bucket_set_irq(__pa(bucket), irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700613
Thomas Gleixner394d4412011-03-24 17:52:54 +0100614 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700615 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700616
Sam Ravnborgcae787282011-01-22 11:32:16 +0000617 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
618 if (unlikely(!handler_data))
David S. Millerb80e6992007-10-13 21:51:37 -0700619 return 0;
620
David S. Millerb7c2a752008-07-22 22:34:29 -0700621 /* In order to make the LDC channel startup sequence easier,
622 * especially wrt. locking, we do not let request_irq() enable
623 * the interrupt.
624 */
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100625 irq_set_status_flags(irq, IRQ_NOAUTOEN);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100626 irq_set_handler_data(irq, handler_data);
David S. Millerb80e6992007-10-13 21:51:37 -0700627
628 /* Catch accidental accesses to these things. IMAP/ICLR handling
629 * is done by hypervisor calls on sun4v platforms, not by direct
630 * register accesses.
631 */
Sam Ravnborgcae787282011-01-22 11:32:16 +0000632 handler_data->imap = ~0UL;
633 handler_data->iclr = ~0UL;
David S. Millerb80e6992007-10-13 21:51:37 -0700634
635 cookie = ~__pa(bucket);
636 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700637 if (hv_err) {
638 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
639 "err=%lu\n", devhandle, devino, hv_err);
640 prom_halt();
641 }
642
Sam Ravnborgfe414932011-01-22 11:32:19 +0000643 return irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700644}
645
Sam Ravnborgfe414932011-01-22 11:32:19 +0000646void ack_bad_irq(unsigned int irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700647{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000648 unsigned int ino = irq_table[irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700649
David S. Miller771823002007-10-13 23:41:28 -0700650 if (!ino)
651 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700652
Sam Ravnborgfe414932011-01-22 11:32:19 +0000653 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
654 ino, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
David S. Miller4f70f7a2008-08-12 18:33:56 -0700657void *hardirq_stack[NR_CPUS];
658void *softirq_stack[NR_CPUS];
659
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000660void __irq_entry handler_irq(int pil, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
David S. Millereb2d8d62007-10-13 21:42:46 -0700662 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400663 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700664 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000666 clear_softint(1 << pil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Al Viro6d24c8d2006-10-08 08:23:28 -0400668 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
David S. Millera650d382007-10-12 02:59:40 -0700671 /* Grab an atomic snapshot of the pending IVECs. */
672 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
673 "wrpr %0, %3, %%pstate\n\t"
674 "ldx [%2], %1\n\t"
675 "stx %%g0, [%2]\n\t"
676 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700677 : "=&r" (pstate), "=&r" (bucket_pa)
678 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700679 "i" (PSTATE_IE)
680 : "memory");
681
David S. Miller4f70f7a2008-08-12 18:33:56 -0700682 orig_sp = set_hardirq_stack();
683
David S. Millereb2d8d62007-10-13 21:42:46 -0700684 while (bucket_pa) {
685 unsigned long next_pa;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000686 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
David S. Miller42d5f992007-10-13 23:03:21 -0700688 next_pa = bucket_get_chain_pa(bucket_pa);
Sam Ravnborgfe414932011-01-22 11:32:19 +0000689 irq = bucket_get_irq(bucket_pa);
David S. Miller42d5f992007-10-13 23:03:21 -0700690 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700691
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100692 generic_handle_irq(irq);
David S. Millereb2d8d62007-10-13 21:42:46 -0700693
694 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
David S. Millere18e2a02006-06-20 01:23:32 -0700696
David S. Miller4f70f7a2008-08-12 18:33:56 -0700697 restore_hardirq_stack(orig_sp);
698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400700 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
David S. Miller4f70f7a2008-08-12 18:33:56 -0700703void do_softirq(void)
704{
705 unsigned long flags;
706
707 if (in_interrupt())
708 return;
709
710 local_irq_save(flags);
711
712 if (local_softirq_pending()) {
713 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
714
715 sp += THREAD_SIZE - 192 - STACK_BIAS;
716
717 __asm__ __volatile__("mov %%sp, %0\n\t"
718 "mov %1, %%sp"
719 : "=&r" (orig_sp)
720 : "r" (sp));
721 __do_softirq();
722 __asm__ __volatile__("mov %0, %%sp"
723 : : "r" (orig_sp));
724 }
725
726 local_irq_restore(flags);
727}
728
David S. Millere0204402007-07-16 03:49:40 -0700729#ifdef CONFIG_HOTPLUG_CPU
730void fixup_irqs(void)
731{
732 unsigned int irq;
733
734 for (irq = 0; irq < NR_IRQS; irq++) {
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100735 struct irq_desc *desc = irq_to_desc(irq);
736 struct irq_data *data = irq_desc_get_irq_data(desc);
David S. Millere0204402007-07-16 03:49:40 -0700737 unsigned long flags;
738
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100739 raw_spin_lock_irqsave(&desc->lock, flags);
740 if (desc->action && !irqd_is_per_cpu(data)) {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000741 if (data->chip->irq_set_affinity)
742 data->chip->irq_set_affinity(data,
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100743 data->affinity,
744 false);
David S. Millere0204402007-07-16 03:49:40 -0700745 }
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100746 raw_spin_unlock_irqrestore(&desc->lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700747 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700748
749 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700750}
751#endif
752
David S. Millercdd51862005-07-24 19:36:13 -0700753struct sun5_timer {
754 u64 count0;
755 u64 limit0;
756 u64 count1;
757 u64 limit1;
758};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
David S. Millercdd51862005-07-24 19:36:13 -0700760static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761static u64 prom_limit0, prom_limit1;
762
763static void map_prom_timers(void)
764{
David S. Miller25c75812006-06-22 20:21:22 -0700765 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700766 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700769 dp = of_find_node_by_path("/");
770 dp = dp->child;
771 while (dp) {
772 if (!strcmp(dp->name, "counter-timer"))
773 break;
774 dp = dp->sibling;
775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 /* Assume if node is not present, PROM uses different tick mechanism
778 * which we should not care about.
779 */
David S. Miller25c75812006-06-22 20:21:22 -0700780 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 prom_timers = (struct sun5_timer *) 0;
782 return;
783 }
784
785 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700786 addr = of_get_property(dp, "address", NULL);
787 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 prom_printf("PROM does not have timer mapped, trying to continue.\n");
789 prom_timers = (struct sun5_timer *) 0;
790 return;
791 }
792 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
793}
794
795static void kill_prom_timer(void)
796{
797 if (!prom_timers)
798 return;
799
800 /* Save them away for later. */
801 prom_limit0 = prom_timers->limit0;
802 prom_limit1 = prom_timers->limit1;
803
804 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
805 * We turn both off here just to be paranoid.
806 */
807 prom_timers->limit0 = 0;
808 prom_timers->limit1 = 0;
809
810 /* Wheee, eat the interrupt packet too... */
811 __asm__ __volatile__(
812" mov 0x40, %%g2\n"
813" ldxa [%%g0] %0, %%g1\n"
814" ldxa [%%g2] %1, %%g1\n"
815" stxa %%g0, [%%g0] %0\n"
816" membar #Sync\n"
817 : /* no outputs */
818 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
819 : "g1", "g2");
820}
821
David S. Miller98430992008-09-16 11:44:00 -0700822void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 int cpu = hard_smp_processor_id();
825
David S. Millereb2d8d62007-10-13 21:42:46 -0700826 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827}
828
David S. Miller5cbc3072007-05-25 15:49:59 -0700829/* Please be very careful with register_one_mondo() and
830 * sun4v_register_mondo_queues().
831 *
832 * On SMP this gets invoked from the CPU trampoline before
833 * the cpu has fully taken over the trap table from OBP,
834 * and it's kernel stack + %g6 thread register state is
835 * not fully cooked yet.
836 *
837 * Therefore you cannot make any OBP calls, not even prom_printf,
838 * from these two routines.
839 */
David S. Millerbd4352c2009-09-04 03:38:54 -0700840static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800841{
David S. Miller5cbc3072007-05-25 15:49:59 -0700842 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800843 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800844
David S. Miller94f87622006-02-16 14:26:53 -0800845 status = sun4v_cpu_qconf(type, paddr, num_entries);
846 if (status != HV_EOK) {
847 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
848 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800849 prom_halt();
850 }
851}
852
David S. Miller98430992008-09-16 11:44:00 -0700853void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800854{
David S. Millerb5a37e92006-02-11 23:07:13 -0800855 struct trap_per_cpu *tb = &trap_block[this_cpu];
856
David S. Miller5cbc3072007-05-25 15:49:59 -0700857 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
858 tb->cpu_mondo_qmask);
859 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
860 tb->dev_mondo_qmask);
861 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
862 tb->resum_qmask);
863 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
864 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800865}
866
David S. Miller14a2ff62009-06-25 19:00:47 -0700867/* Each queue region must be a power of 2 multiple of 64 bytes in
868 * size. The base real address must be aligned to the size of the
869 * region. Thus, an 8KB queue must be 8KB aligned, for example.
870 */
871static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800872{
David S. Miller5cbc3072007-05-25 15:49:59 -0700873 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller14a2ff62009-06-25 19:00:47 -0700874 unsigned long order = get_order(size);
875 unsigned long p;
876
877 p = __get_free_pages(GFP_KERNEL, order);
David S. Miller5cbc3072007-05-25 15:49:59 -0700878 if (!p) {
David S. Miller14a2ff62009-06-25 19:00:47 -0700879 prom_printf("SUN4V: Error, cannot allocate queue.\n");
David S. Miller5b0c0572006-02-08 02:53:50 -0800880 prom_halt();
881 }
882
David S. Miller5cbc3072007-05-25 15:49:59 -0700883 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800884}
885
David S. Millerb434e712007-08-08 17:32:33 -0700886static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800887{
888#ifdef CONFIG_SMP
David S. Miller14a2ff62009-06-25 19:00:47 -0700889 unsigned long page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800890
891 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
892
David S. Miller14a2ff62009-06-25 19:00:47 -0700893 page = get_zeroed_page(GFP_KERNEL);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800894 if (!page) {
895 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
896 prom_halt();
897 }
898
899 tb->cpu_mondo_block_pa = __pa(page);
900 tb->cpu_list_pa = __pa(page + 64);
901#endif
902}
903
David S. Millerb434e712007-08-08 17:32:33 -0700904/* Allocate mondo and error queues for all possible cpus. */
905static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800906{
David S. Millerb434e712007-08-08 17:32:33 -0700907 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800908
David S. Millerb434e712007-08-08 17:32:33 -0700909 for_each_possible_cpu(cpu) {
910 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800911
David S. Miller14a2ff62009-06-25 19:00:47 -0700912 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
913 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
914 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
915 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
916 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
917 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
918 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700919 }
920}
921
922static void __init init_send_mondo_info(void)
923{
924 int cpu;
925
926 for_each_possible_cpu(cpu) {
927 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -0700928
929 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -0800930 }
David S. Millerac29c112006-02-08 00:08:23 -0800931}
932
David S. Millere18e2a02006-06-20 01:23:32 -0700933static struct irqaction timer_irq_action = {
934 .name = "timer",
935};
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937/* Only invoked on boot processor. */
938void __init init_IRQ(void)
939{
David S. Miller10397e42007-10-13 21:43:31 -0700940 unsigned long size;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 map_prom_timers();
943 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
David S. Miller10397e42007-10-13 21:43:31 -0700945 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller14a2ff62009-06-25 19:00:47 -0700946 ivector_table = kzalloc(size, GFP_KERNEL);
David S. Miller10397e42007-10-13 21:43:31 -0700947 if (!ivector_table) {
948 prom_printf("Fatal error, cannot allocate ivector_table\n");
949 prom_halt();
950 }
David S. Miller42d5f992007-10-13 23:03:21 -0700951 __flush_dcache_range((unsigned long) ivector_table,
952 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -0700953
954 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -0700955
David S. Millerac29c112006-02-08 00:08:23 -0800956 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -0700957 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -0800958
David S. Miller43f58922008-08-04 16:13:51 -0700959 init_send_mondo_info();
960
961 if (tlb_type == hypervisor) {
962 /* Load up the boot cpu's entries. */
963 sun4v_register_mondo_queues(hard_smp_processor_id());
964 }
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /* We need to clear any IRQ's pending in the soft interrupt
967 * registers, a spurious one could be left around from the
968 * PROM timer which we just disabled.
969 */
970 clear_softint(get_softint());
971
972 /* Now that ivector table is initialized, it is safe
973 * to receive IRQ vector traps. We will normally take
974 * one or two right now, in case some device PROM used
975 * to boot us wants to speak to us. We just ignore them.
976 */
977 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
978 "or %%g1, %0, %%g1\n\t"
979 "wrpr %%g1, 0x0, %%pstate"
980 : /* No outputs */
981 : "i" (PSTATE_IE)
982 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -0700983
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100984 irq_to_desc(0)->action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985}