blob: dff2c3d7d370cc7464663da148ccc937ce99fa94 [file] [log] [blame]
David S. Miller4a907de2007-06-13 00:01:04 -07001/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller227c3312008-04-26 02:19:18 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/sched.h>
David S. Miller98430992008-09-16 11:44:00 -07009#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/ptrace.h>
11#include <linux/errno.h>
12#include <linux/kernel_stat.h>
13#include <linux/signal.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070022#include <linux/ftrace.h>
David S. Millere18e2a02006-06-20 01:23:32 -070023#include <linux/irq.h>
Frederic Weisbecker2e2dc1d2010-04-13 14:28:24 -070024#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/ptrace.h>
27#include <asm/processor.h>
Arun Sharma60063492011-07-26 16:09:06 -070028#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070030#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/iommu.h>
32#include <asm/upa.h>
33#include <asm/oplib.h>
David S. Miller25c75812006-06-22 20:21:22 -070034#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/timer.h>
36#include <asm/smp.h>
37#include <asm/starfire.h>
38#include <asm/uaccess.h>
39#include <asm/cache.h>
40#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070041#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080042#include <asm/head.h>
David S. Miller4a907de2007-06-13 00:01:04 -070043#include <asm/hypervisor.h>
David S. Miller42d5f992007-10-13 23:03:21 -070044#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
David S. Millerd91aa122008-03-26 00:37:51 -070046#include "entry.h"
Hong H. Pham280ff972009-06-04 02:10:11 -070047#include "cpumap.h"
David S. Millerec687882010-04-14 02:04:29 -070048#include "kstack.h"
David S. Millere18e2a02006-06-20 01:23:32 -070049
50#define NUM_IVECS (IMAP_INR + 1)
David S. Millerd91aa122008-03-26 00:37:51 -070051
David S. Miller10397e42007-10-13 21:43:31 -070052struct ino_bucket *ivector_table;
David S. Millereb2d8d62007-10-13 21:42:46 -070053unsigned long ivector_table_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
David S. Miller42d5f992007-10-13 23:03:21 -070055/* On several sun4u processors, it is illegal to mix bypass and
56 * non-bypass accesses. Therefore we access all INO buckets
57 * using bypass accesses only.
58 */
59static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
60{
61 unsigned long ret;
62
63 __asm__ __volatile__("ldxa [%1] %2, %0"
64 : "=&r" (ret)
65 : "r" (bucket_pa +
66 offsetof(struct ino_bucket,
67 __irq_chain_pa)),
68 "i" (ASI_PHYS_USE_EC));
69
70 return ret;
71}
72
73static void bucket_clear_chain_pa(unsigned long bucket_pa)
74{
75 __asm__ __volatile__("stxa %%g0, [%0] %1"
76 : /* no outputs */
77 : "r" (bucket_pa +
78 offsetof(struct ino_bucket,
79 __irq_chain_pa)),
80 "i" (ASI_PHYS_USE_EC));
81}
82
Sam Ravnborgfe414932011-01-22 11:32:19 +000083static unsigned int bucket_get_irq(unsigned long bucket_pa)
David S. Miller42d5f992007-10-13 23:03:21 -070084{
85 unsigned int ret;
86
87 __asm__ __volatile__("lduwa [%1] %2, %0"
88 : "=&r" (ret)
89 : "r" (bucket_pa +
90 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +000091 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -070092 "i" (ASI_PHYS_USE_EC));
93
94 return ret;
95}
96
Sam Ravnborgfe414932011-01-22 11:32:19 +000097static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
David S. Miller42d5f992007-10-13 23:03:21 -070098{
99 __asm__ __volatile__("stwa %0, [%1] %2"
100 : /* no outputs */
Sam Ravnborgfe414932011-01-22 11:32:19 +0000101 : "r" (irq),
David S. Miller42d5f992007-10-13 23:03:21 -0700102 "r" (bucket_pa +
103 offsetof(struct ino_bucket,
Sam Ravnborgfe414932011-01-22 11:32:19 +0000104 __irq)),
David S. Miller42d5f992007-10-13 23:03:21 -0700105 "i" (ASI_PHYS_USE_EC));
106}
107
David S. Millereb2d8d62007-10-13 21:42:46 -0700108#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
David S. Miller93b32382007-07-20 02:58:28 -0700110static struct {
David S. Miller93b32382007-07-20 02:58:28 -0700111 unsigned int dev_handle;
112 unsigned int dev_ino;
David S. Miller256c1df2007-10-13 23:50:38 -0700113 unsigned int in_use;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000114} irq_table[NR_IRQS];
115static DEFINE_SPINLOCK(irq_alloc_lock);
David S. Miller8047e242006-06-20 01:22:35 -0700116
Sam Ravnborgfe414932011-01-22 11:32:19 +0000117unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
David S. Miller8047e242006-06-20 01:22:35 -0700118{
David S. Miller759f89e2007-10-11 03:16:13 -0700119 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700120 unsigned char ent;
121
122 BUILD_BUG_ON(NR_IRQS >= 256);
123
Sam Ravnborgfe414932011-01-22 11:32:19 +0000124 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700125
David S. Miller35a17eb2007-02-10 17:41:02 -0800126 for (ent = 1; ent < NR_IRQS; ent++) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000127 if (!irq_table[ent].in_use)
David S. Miller35a17eb2007-02-10 17:41:02 -0800128 break;
129 }
David S. Miller8047e242006-06-20 01:22:35 -0700130 if (ent >= NR_IRQS) {
131 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
David S. Miller759f89e2007-10-11 03:16:13 -0700132 ent = 0;
133 } else {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000134 irq_table[ent].dev_handle = dev_handle;
135 irq_table[ent].dev_ino = dev_ino;
136 irq_table[ent].in_use = 1;
David S. Miller8047e242006-06-20 01:22:35 -0700137 }
138
Sam Ravnborgfe414932011-01-22 11:32:19 +0000139 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700140
141 return ent;
142}
143
David S. Miller5746c992007-02-20 01:26:48 -0800144#ifdef CONFIG_PCI_MSI
Sam Ravnborgfe414932011-01-22 11:32:19 +0000145void irq_free(unsigned int irq)
David S. Miller8047e242006-06-20 01:22:35 -0700146{
David S. Miller759f89e2007-10-11 03:16:13 -0700147 unsigned long flags;
David S. Miller8047e242006-06-20 01:22:35 -0700148
Sam Ravnborgfe414932011-01-22 11:32:19 +0000149 if (irq >= NR_IRQS)
David S. Miller35a17eb2007-02-10 17:41:02 -0800150 return;
151
Sam Ravnborgfe414932011-01-22 11:32:19 +0000152 spin_lock_irqsave(&irq_alloc_lock, flags);
David S. Miller759f89e2007-10-11 03:16:13 -0700153
Sam Ravnborgfe414932011-01-22 11:32:19 +0000154 irq_table[irq].in_use = 0;
David S. Miller35a17eb2007-02-10 17:41:02 -0800155
Sam Ravnborgfe414932011-01-22 11:32:19 +0000156 spin_unlock_irqrestore(&irq_alloc_lock, flags);
David S. Miller8047e242006-06-20 01:22:35 -0700157}
David S. Miller5746c992007-02-20 01:26:48 -0800158#endif
David S. Miller8047e242006-06-20 01:22:35 -0700159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160/*
David S. Millere18e2a02006-06-20 01:23:32 -0700161 * /proc/interrupts printing:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 */
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100163int arch_show_interrupts(struct seq_file *p, int prec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100165 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Thomas Gleixnerfa680c72011-03-24 18:03:13 +0100167 seq_printf(p, "NMI: ");
168 for_each_online_cpu(j)
169 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
170 seq_printf(p, " Non-maskable interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return 0;
172}
173
David S. Millerebd8c562006-02-17 08:38:06 -0800174static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
175{
176 unsigned int tid;
177
178 if (this_is_starfire) {
179 tid = starfire_translate(imap, cpuid);
180 tid <<= IMAP_TID_SHIFT;
181 tid &= IMAP_TID_UPA;
182 } else {
183 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
184 unsigned long ver;
185
186 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
187 if ((ver >> 32UL) == __JALAPENO_ID ||
188 (ver >> 32UL) == __SERRANO_ID) {
189 tid = cpuid << IMAP_TID_SHIFT;
190 tid &= IMAP_TID_JBUS;
191 } else {
192 unsigned int a = cpuid & 0x1f;
193 unsigned int n = (cpuid >> 5) & 0x1f;
194
195 tid = ((a << IMAP_AID_SHIFT) |
196 (n << IMAP_NID_SHIFT));
197 tid &= (IMAP_AID_SAFARI |
Joe Perchesa419aef2009-08-18 11:18:35 -0700198 IMAP_NID_SAFARI);
David S. Millerebd8c562006-02-17 08:38:06 -0800199 }
200 } else {
201 tid = cpuid << IMAP_TID_SHIFT;
202 tid &= IMAP_TID_UPA;
203 }
204 }
205
206 return tid;
207}
208
David S. Millere18e2a02006-06-20 01:23:32 -0700209struct irq_handler_data {
210 unsigned long iclr;
211 unsigned long imap;
212
213 void (*pre_handler)(unsigned int, void *, void *);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700214 void *arg1;
215 void *arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700216};
217
David S. Millere18e2a02006-06-20 01:23:32 -0700218#ifdef CONFIG_SMP
Sam Ravnborgfe414932011-01-22 11:32:19 +0000219static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
David S. Millere18e2a02006-06-20 01:23:32 -0700220{
Mike Travise65e49d2009-01-12 15:27:13 -0800221 cpumask_t mask;
David S. Millere18e2a02006-06-20 01:23:32 -0700222 int cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
David S. Miller1091ce62010-01-20 19:30:49 -0800224 cpumask_copy(&mask, affinity);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700225 if (cpumask_equal(&mask, cpu_online_mask)) {
Sam Ravnborgfe414932011-01-22 11:32:19 +0000226 cpuid = map_to_cpu(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700227 } else {
228 cpumask_t tmp;
229
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -0700230 cpumask_and(&tmp, cpu_online_mask, &mask);
231 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
David S. Millere18e2a02006-06-20 01:23:32 -0700232 }
233
234 return cpuid;
235}
236#else
Sam Ravnborgfe414932011-01-22 11:32:19 +0000237#define irq_choose_cpu(irq, affinity) \
David S. Miller6abce772010-01-26 04:16:49 -0800238 real_hard_smp_processor_id()
David S. Millere18e2a02006-06-20 01:23:32 -0700239#endif
240
Sam Ravnborg4832b992011-01-22 11:32:18 +0000241static void sun4u_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700242{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000243 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700244
Sam Ravnborgcae78722011-01-22 11:32:16 +0000245 if (likely(handler_data)) {
David S. Miller861fe902007-05-02 17:31:36 -0700246 unsigned long cpuid, imap, val;
David S. Millere18e2a02006-06-20 01:23:32 -0700247 unsigned int tid;
248
Sam Ravnborg4832b992011-01-22 11:32:18 +0000249 cpuid = irq_choose_cpu(data->irq, data->affinity);
Sam Ravnborgcae78722011-01-22 11:32:16 +0000250 imap = handler_data->imap;
David S. Millere18e2a02006-06-20 01:23:32 -0700251
252 tid = sun4u_compute_tid(imap, cpuid);
253
David S. Miller861fe902007-05-02 17:31:36 -0700254 val = upa_readq(imap);
255 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
256 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
257 val |= tid | IMAP_VALID;
258 upa_writeq(val, imap);
Sam Ravnborgcae78722011-01-22 11:32:16 +0000259 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700260 }
261}
262
Sam Ravnborg4832b992011-01-22 11:32:18 +0000263static int sun4u_set_affinity(struct irq_data *data,
264 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700265{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000266 struct irq_handler_data *handler_data = data->handler_data;
David S. Miller1091ce62010-01-20 19:30:49 -0800267
Sam Ravnborgcae78722011-01-22 11:32:16 +0000268 if (likely(handler_data)) {
David S. Miller1091ce62010-01-20 19:30:49 -0800269 unsigned long cpuid, imap, val;
270 unsigned int tid;
271
Sam Ravnborg4832b992011-01-22 11:32:18 +0000272 cpuid = irq_choose_cpu(data->irq, mask);
Sam Ravnborgcae78722011-01-22 11:32:16 +0000273 imap = handler_data->imap;
David S. Miller1091ce62010-01-20 19:30:49 -0800274
275 tid = sun4u_compute_tid(imap, cpuid);
276
277 val = upa_readq(imap);
278 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
279 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
280 val |= tid | IMAP_VALID;
281 upa_writeq(val, imap);
Sam Ravnborgcae78722011-01-22 11:32:16 +0000282 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Miller1091ce62010-01-20 19:30:49 -0800283 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700284
285 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700286}
287
David S. Millerd0cac392009-03-04 14:43:47 -0800288/* Don't do anything. The desc->status check for IRQ_DISABLED in
289 * handler_irq() will skip the handler call and that will leave the
290 * interrupt in the sent state. The next ->enable() call will hit the
291 * ICLR register to reset the state machine.
292 *
293 * This scheme is necessary, instead of clearing the Valid bit in the
294 * IMAP register, to handle the case of IMAP registers being shared by
295 * multiple INOs (and thus ICLR registers). Since we use a different
296 * virtual IRQ for each shared IMAP instance, the generic code thinks
297 * there is only one user so it prematurely calls ->disable() on
298 * free_irq().
299 *
300 * We have to provide an explicit ->disable() method instead of using
301 * NULL to get the default. The reason is that if the generic code
302 * sees that, it also hooks up a default ->shutdown method which
303 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
304 */
Sam Ravnborg4832b992011-01-22 11:32:18 +0000305static void sun4u_irq_disable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700306{
David S. Millere18e2a02006-06-20 01:23:32 -0700307}
308
Sam Ravnborg4832b992011-01-22 11:32:18 +0000309static void sun4u_irq_eoi(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700310{
Sam Ravnborg4832b992011-01-22 11:32:18 +0000311 struct irq_handler_data *handler_data = data->handler_data;
David S. Millere18e2a02006-06-20 01:23:32 -0700312
Sam Ravnborgcae78722011-01-22 11:32:16 +0000313 if (likely(handler_data))
314 upa_writeq(ICLR_IDLE, handler_data->iclr);
David S. Millere18e2a02006-06-20 01:23:32 -0700315}
316
Sam Ravnborg4832b992011-01-22 11:32:18 +0000317static void sun4v_irq_enable(struct irq_data *data)
David S. Millere18e2a02006-06-20 01:23:32 -0700318{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000319 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000320 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller771823002007-10-13 23:41:28 -0700321 int err;
David S. Millere18e2a02006-06-20 01:23:32 -0700322
David S. Miller771823002007-10-13 23:41:28 -0700323 err = sun4v_intr_settarget(ino, cpuid);
324 if (err != HV_EOK)
325 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
326 "err(%d)\n", ino, cpuid, err);
327 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
328 if (err != HV_EOK)
329 printk(KERN_ERR "sun4v_intr_setstate(%x): "
330 "err(%d)\n", ino, err);
331 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
332 if (err != HV_EOK)
333 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
334 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335}
336
Sam Ravnborg4832b992011-01-22 11:32:18 +0000337static int sun4v_set_affinity(struct irq_data *data,
338 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700339{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000340 unsigned int ino = irq_table[data->irq].dev_ino;
Sam Ravnborg4832b992011-01-22 11:32:18 +0000341 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
David S. Miller771823002007-10-13 23:41:28 -0700342 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700343
David S. Miller771823002007-10-13 23:41:28 -0700344 err = sun4v_intr_settarget(ino, cpuid);
345 if (err != HV_EOK)
346 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
347 "err(%d)\n", ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700348
349 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700350}
351
Sam Ravnborg4832b992011-01-22 11:32:18 +0000352static void sun4v_irq_disable(struct irq_data *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000354 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700355 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
David S. Miller771823002007-10-13 23:41:28 -0700357 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
358 if (err != HV_EOK)
359 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
360 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361}
362
Sam Ravnborg4832b992011-01-22 11:32:18 +0000363static void sun4v_irq_eoi(struct irq_data *data)
David S. Miller088dd1f2005-07-04 13:24:38 -0700364{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000365 unsigned int ino = irq_table[data->irq].dev_ino;
David S. Miller771823002007-10-13 23:41:28 -0700366 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700367
David S. Miller771823002007-10-13 23:41:28 -0700368 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
369 if (err != HV_EOK)
370 printk(KERN_ERR "sun4v_intr_setstate(%x): "
371 "err(%d)\n", ino, err);
David S. Miller088dd1f2005-07-04 13:24:38 -0700372}
373
Sam Ravnborg4832b992011-01-22 11:32:18 +0000374static void sun4v_virq_enable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700375{
David S. Miller771823002007-10-13 23:41:28 -0700376 unsigned long cpuid, dev_handle, dev_ino;
377 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700378
Sam Ravnborg4832b992011-01-22 11:32:18 +0000379 cpuid = irq_choose_cpu(data->irq, data->affinity);
David S. Miller4a907de2007-06-13 00:01:04 -0700380
Sam Ravnborgfe414932011-01-22 11:32:19 +0000381 dev_handle = irq_table[data->irq].dev_handle;
382 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700383
David S. Miller771823002007-10-13 23:41:28 -0700384 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
385 if (err != HV_EOK)
386 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
387 "err(%d)\n",
388 dev_handle, dev_ino, cpuid, err);
389 err = sun4v_vintr_set_state(dev_handle, dev_ino,
390 HV_INTR_STATE_IDLE);
391 if (err != HV_EOK)
392 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
393 "HV_INTR_STATE_IDLE): err(%d)\n",
394 dev_handle, dev_ino, err);
395 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
396 HV_INTR_ENABLED);
397 if (err != HV_EOK)
398 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
399 "HV_INTR_ENABLED): err(%d)\n",
400 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700401}
402
Sam Ravnborg4832b992011-01-22 11:32:18 +0000403static int sun4v_virt_set_affinity(struct irq_data *data,
404 const struct cpumask *mask, bool force)
David S. Millerb53bcb62007-07-14 03:16:13 -0700405{
David S. Miller771823002007-10-13 23:41:28 -0700406 unsigned long cpuid, dev_handle, dev_ino;
407 int err;
David S. Millerb53bcb62007-07-14 03:16:13 -0700408
Sam Ravnborg4832b992011-01-22 11:32:18 +0000409 cpuid = irq_choose_cpu(data->irq, mask);
David S. Millerb53bcb62007-07-14 03:16:13 -0700410
Sam Ravnborgfe414932011-01-22 11:32:19 +0000411 dev_handle = irq_table[data->irq].dev_handle;
412 dev_ino = irq_table[data->irq].dev_ino;
David S. Millerb53bcb62007-07-14 03:16:13 -0700413
David S. Miller771823002007-10-13 23:41:28 -0700414 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
415 if (err != HV_EOK)
416 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
417 "err(%d)\n",
418 dev_handle, dev_ino, cpuid, err);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700419
420 return 0;
David S. Millerb53bcb62007-07-14 03:16:13 -0700421}
422
Sam Ravnborg4832b992011-01-22 11:32:18 +0000423static void sun4v_virq_disable(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700424{
David S. Miller771823002007-10-13 23:41:28 -0700425 unsigned long dev_handle, dev_ino;
426 int err;
David S. Miller4a907de2007-06-13 00:01:04 -0700427
Sam Ravnborgfe414932011-01-22 11:32:19 +0000428 dev_handle = irq_table[data->irq].dev_handle;
429 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700430
David S. Miller771823002007-10-13 23:41:28 -0700431 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
432 HV_INTR_DISABLED);
433 if (err != HV_EOK)
434 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
435 "HV_INTR_DISABLED): err(%d)\n",
436 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700437}
438
Sam Ravnborg4832b992011-01-22 11:32:18 +0000439static void sun4v_virq_eoi(struct irq_data *data)
David S. Miller4a907de2007-06-13 00:01:04 -0700440{
David S. Miller771823002007-10-13 23:41:28 -0700441 unsigned long dev_handle, dev_ino;
442 int err;
David S. Miller5a606b72007-07-09 22:40:36 -0700443
Sam Ravnborgfe414932011-01-22 11:32:19 +0000444 dev_handle = irq_table[data->irq].dev_handle;
445 dev_ino = irq_table[data->irq].dev_ino;
David S. Miller4a907de2007-06-13 00:01:04 -0700446
David S. Miller771823002007-10-13 23:41:28 -0700447 err = sun4v_vintr_set_state(dev_handle, dev_ino,
448 HV_INTR_STATE_IDLE);
449 if (err != HV_EOK)
450 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
451 "HV_INTR_STATE_IDLE): err(%d)\n",
452 dev_handle, dev_ino, err);
David S. Miller4a907de2007-06-13 00:01:04 -0700453}
454
David S. Miller729e7d72006-12-12 00:59:12 -0800455static struct irq_chip sun4u_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000456 .name = "sun4u",
457 .irq_enable = sun4u_irq_enable,
458 .irq_disable = sun4u_irq_disable,
459 .irq_eoi = sun4u_irq_eoi,
460 .irq_set_affinity = sun4u_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100461 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Millere18e2a02006-06-20 01:23:32 -0700462};
463
David S. Miller729e7d72006-12-12 00:59:12 -0800464static struct irq_chip sun4v_irq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000465 .name = "sun4v",
466 .irq_enable = sun4v_irq_enable,
467 .irq_disable = sun4v_irq_disable,
468 .irq_eoi = sun4v_irq_eoi,
469 .irq_set_affinity = sun4v_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100470 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Millere18e2a02006-06-20 01:23:32 -0700471};
472
David S. Miller4a907de2007-06-13 00:01:04 -0700473static struct irq_chip sun4v_virq = {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000474 .name = "vsun4v",
475 .irq_enable = sun4v_virq_enable,
476 .irq_disable = sun4v_virq_disable,
477 .irq_eoi = sun4v_virq_eoi,
478 .irq_set_affinity = sun4v_virt_set_affinity,
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100479 .flags = IRQCHIP_EOI_IF_HANDLED,
David S. Miller4a907de2007-06-13 00:01:04 -0700480};
481
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100482static void pre_flow_handler(struct irq_data *d)
David S. Miller8d57d3a2007-10-22 02:16:45 -0700483{
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100484 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
485 unsigned int ino = irq_table[d->irq].dev_ino;
David S. Miller8d57d3a2007-10-22 02:16:45 -0700486
Sam Ravnborgcae78722011-01-22 11:32:16 +0000487 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700488}
489
Sam Ravnborgfe414932011-01-22 11:32:19 +0000490void irq_install_pre_handler(int irq,
David S. Millere18e2a02006-06-20 01:23:32 -0700491 void (*func)(unsigned int, void *, void *),
492 void *arg1, void *arg2)
493{
Thomas Gleixner394d4412011-03-24 17:52:54 +0100494 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
David S. Millere18e2a02006-06-20 01:23:32 -0700495
Sam Ravnborgcae78722011-01-22 11:32:16 +0000496 handler_data->pre_handler = func;
497 handler_data->arg1 = arg1;
498 handler_data->arg2 = arg2;
David S. Millere18e2a02006-06-20 01:23:32 -0700499
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100500 __irq_set_preflow_handler(irq, pre_flow_handler);
David S. Millere18e2a02006-06-20 01:23:32 -0700501}
502
503unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504{
505 struct ino_bucket *bucket;
Sam Ravnborgcae78722011-01-22 11:32:16 +0000506 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000507 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int ino;
509
David S. Miller10951ee2006-02-13 18:22:57 -0800510 BUG_ON(tlb_type == hypervisor);
511
David S. Miller861fe902007-05-02 17:31:36 -0700512 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
David S. Miller088dd1f2005-07-04 13:24:38 -0700513 bucket = &ivector_table[ino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000514 irq = bucket_get_irq(__pa(bucket));
515 if (!irq) {
516 irq = irq_alloc(0, ino);
517 bucket_set_irq(__pa(bucket), irq);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100518 irq_set_chip_and_handler_name(irq, &sun4u_irq,
519 handle_fasteoi_irq, "IVEC");
David S. Miller088dd1f2005-07-04 13:24:38 -0700520 }
521
Thomas Gleixner394d4412011-03-24 17:52:54 +0100522 handler_data = irq_get_handler_data(irq);
Sam Ravnborgcae78722011-01-22 11:32:16 +0000523 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700524 goto out;
525
Sam Ravnborgcae78722011-01-22 11:32:16 +0000526 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
527 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700528 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
David S. Miller088dd1f2005-07-04 13:24:38 -0700529 prom_halt();
530 }
Thomas Gleixner394d4412011-03-24 17:52:54 +0100531 irq_set_handler_data(irq, handler_data);
David S. Miller088dd1f2005-07-04 13:24:38 -0700532
Sam Ravnborgcae78722011-01-22 11:32:16 +0000533 handler_data->imap = imap;
534 handler_data->iclr = iclr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
David S. Miller088dd1f2005-07-04 13:24:38 -0700536out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000537 return irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538}
539
David S. Miller4a907de2007-06-13 00:01:04 -0700540static unsigned int sun4v_build_common(unsigned long sysino,
541 struct irq_chip *chip)
David S. Millere3999572006-02-13 18:16:10 -0800542{
543 struct ino_bucket *bucket;
Sam Ravnborgcae78722011-01-22 11:32:16 +0000544 struct irq_handler_data *handler_data;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000545 unsigned int irq;
David S. Millere18e2a02006-06-20 01:23:32 -0700546
547 BUG_ON(tlb_type != hypervisor);
David S. Millere3999572006-02-13 18:16:10 -0800548
David S. Millere3999572006-02-13 18:16:10 -0800549 bucket = &ivector_table[sysino];
Sam Ravnborgfe414932011-01-22 11:32:19 +0000550 irq = bucket_get_irq(__pa(bucket));
551 if (!irq) {
552 irq = irq_alloc(0, sysino);
553 bucket_set_irq(__pa(bucket), irq);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100554 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700555 "IVEC");
David S. Millere18e2a02006-06-20 01:23:32 -0700556 }
557
Thomas Gleixner394d4412011-03-24 17:52:54 +0100558 handler_data = irq_get_handler_data(irq);
Sam Ravnborgcae78722011-01-22 11:32:16 +0000559 if (unlikely(handler_data))
David S. Millere18e2a02006-06-20 01:23:32 -0700560 goto out;
561
Sam Ravnborgcae78722011-01-22 11:32:16 +0000562 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
563 if (unlikely(!handler_data)) {
David S. Millere18e2a02006-06-20 01:23:32 -0700564 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
565 prom_halt();
566 }
Thomas Gleixner394d4412011-03-24 17:52:54 +0100567 irq_set_handler_data(irq, handler_data);
David S. Millere3999572006-02-13 18:16:10 -0800568
569 /* Catch accidental accesses to these things. IMAP/ICLR handling
570 * is done by hypervisor calls on sun4v platforms, not by direct
571 * register accesses.
572 */
Sam Ravnborgcae78722011-01-22 11:32:16 +0000573 handler_data->imap = ~0UL;
574 handler_data->iclr = ~0UL;
David S. Millere3999572006-02-13 18:16:10 -0800575
David S. Millere18e2a02006-06-20 01:23:32 -0700576out:
Sam Ravnborgfe414932011-01-22 11:32:19 +0000577 return irq;
David S. Millere3999572006-02-13 18:16:10 -0800578}
579
David S. Miller4a907de2007-06-13 00:01:04 -0700580unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
581{
582 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
583
584 return sun4v_build_common(sysino, &sun4v_irq);
585}
586
587unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
588{
Sam Ravnborgcae78722011-01-22 11:32:16 +0000589 struct irq_handler_data *handler_data;
David S. Millerb80e6992007-10-13 21:51:37 -0700590 unsigned long hv_err, cookie;
David S. Millerb7c2a752008-07-22 22:34:29 -0700591 struct ino_bucket *bucket;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000592 unsigned int irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700593
David S. Millerb80e6992007-10-13 21:51:37 -0700594 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
595 if (unlikely(!bucket))
596 return 0;
David S. Miller25ad4032010-04-10 20:24:22 -0700597
598 /* The only reference we store to the IRQ bucket is
599 * by physical address which kmemleak can't see, tell
600 * it that this object explicitly is not a leak and
601 * should be scanned.
602 */
603 kmemleak_not_leak(bucket);
604
David S. Miller42d5f992007-10-13 23:03:21 -0700605 __flush_dcache_range((unsigned long) bucket,
606 ((unsigned long) bucket +
607 sizeof(struct ino_bucket)));
David S. Miller4a907de2007-06-13 00:01:04 -0700608
Sam Ravnborgfe414932011-01-22 11:32:19 +0000609 irq = irq_alloc(devhandle, devino);
610 bucket_set_irq(__pa(bucket), irq);
David S. Miller8d57d3a2007-10-22 02:16:45 -0700611
Thomas Gleixner394d4412011-03-24 17:52:54 +0100612 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
David S. Miller8d57d3a2007-10-22 02:16:45 -0700613 "IVEC");
David S. Miller4a907de2007-06-13 00:01:04 -0700614
Sam Ravnborgcae78722011-01-22 11:32:16 +0000615 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
616 if (unlikely(!handler_data))
David S. Millerb80e6992007-10-13 21:51:37 -0700617 return 0;
618
David S. Millerb7c2a752008-07-22 22:34:29 -0700619 /* In order to make the LDC channel startup sequence easier,
620 * especially wrt. locking, we do not let request_irq() enable
621 * the interrupt.
622 */
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100623 irq_set_status_flags(irq, IRQ_NOAUTOEN);
Thomas Gleixner394d4412011-03-24 17:52:54 +0100624 irq_set_handler_data(irq, handler_data);
David S. Millerb80e6992007-10-13 21:51:37 -0700625
626 /* Catch accidental accesses to these things. IMAP/ICLR handling
627 * is done by hypervisor calls on sun4v platforms, not by direct
628 * register accesses.
629 */
Sam Ravnborgcae78722011-01-22 11:32:16 +0000630 handler_data->imap = ~0UL;
631 handler_data->iclr = ~0UL;
David S. Millerb80e6992007-10-13 21:51:37 -0700632
633 cookie = ~__pa(bucket);
634 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
David S. Miller4a907de2007-06-13 00:01:04 -0700635 if (hv_err) {
636 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
637 "err=%lu\n", devhandle, devino, hv_err);
638 prom_halt();
639 }
640
Sam Ravnborgfe414932011-01-22 11:32:19 +0000641 return irq;
David S. Miller4a907de2007-06-13 00:01:04 -0700642}
643
Sam Ravnborgfe414932011-01-22 11:32:19 +0000644void ack_bad_irq(unsigned int irq)
David S. Miller088dd1f2005-07-04 13:24:38 -0700645{
Sam Ravnborgfe414932011-01-22 11:32:19 +0000646 unsigned int ino = irq_table[irq].dev_ino;
David S. Miller088dd1f2005-07-04 13:24:38 -0700647
David S. Miller771823002007-10-13 23:41:28 -0700648 if (!ino)
649 ino = 0xdeadbeef;
David S. Miller088dd1f2005-07-04 13:24:38 -0700650
Sam Ravnborgfe414932011-01-22 11:32:19 +0000651 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
652 ino, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654
David S. Miller4f70f7a2008-08-12 18:33:56 -0700655void *hardirq_stack[NR_CPUS];
656void *softirq_stack[NR_CPUS];
657
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000658void __irq_entry handler_irq(int pil, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659{
David S. Millereb2d8d62007-10-13 21:42:46 -0700660 unsigned long pstate, bucket_pa;
Al Viro6d24c8d2006-10-08 08:23:28 -0400661 struct pt_regs *old_regs;
David S. Miller4f70f7a2008-08-12 18:33:56 -0700662 void *orig_sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Sam Ravnborgd4d1ec42011-01-22 11:32:15 +0000664 clear_softint(1 << pil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Al Viro6d24c8d2006-10-08 08:23:28 -0400666 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
David S. Millera650d382007-10-12 02:59:40 -0700669 /* Grab an atomic snapshot of the pending IVECs. */
670 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
671 "wrpr %0, %3, %%pstate\n\t"
672 "ldx [%2], %1\n\t"
673 "stx %%g0, [%2]\n\t"
674 "wrpr %0, 0x0, %%pstate\n\t"
David S. Millereb2d8d62007-10-13 21:42:46 -0700675 : "=&r" (pstate), "=&r" (bucket_pa)
676 : "r" (irq_work_pa(smp_processor_id())),
David S. Millera650d382007-10-12 02:59:40 -0700677 "i" (PSTATE_IE)
678 : "memory");
679
David S. Miller4f70f7a2008-08-12 18:33:56 -0700680 orig_sp = set_hardirq_stack();
681
David S. Millereb2d8d62007-10-13 21:42:46 -0700682 while (bucket_pa) {
683 unsigned long next_pa;
Sam Ravnborgfe414932011-01-22 11:32:19 +0000684 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
David S. Miller42d5f992007-10-13 23:03:21 -0700686 next_pa = bucket_get_chain_pa(bucket_pa);
Sam Ravnborgfe414932011-01-22 11:32:19 +0000687 irq = bucket_get_irq(bucket_pa);
David S. Miller42d5f992007-10-13 23:03:21 -0700688 bucket_clear_chain_pa(bucket_pa);
David S. Millerfd0504c32006-06-20 01:20:00 -0700689
Thomas Gleixnerfcd8d4f2011-03-24 09:03:45 +0100690 generic_handle_irq(irq);
David S. Millereb2d8d62007-10-13 21:42:46 -0700691
692 bucket_pa = next_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 }
David S. Millere18e2a02006-06-20 01:23:32 -0700694
David S. Miller4f70f7a2008-08-12 18:33:56 -0700695 restore_hardirq_stack(orig_sp);
696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 irq_exit();
Al Viro6d24c8d2006-10-08 08:23:28 -0400698 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
David S. Miller4f70f7a2008-08-12 18:33:56 -0700701void do_softirq(void)
702{
703 unsigned long flags;
704
705 if (in_interrupt())
706 return;
707
708 local_irq_save(flags);
709
710 if (local_softirq_pending()) {
711 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
712
713 sp += THREAD_SIZE - 192 - STACK_BIAS;
714
715 __asm__ __volatile__("mov %%sp, %0\n\t"
716 "mov %1, %%sp"
717 : "=&r" (orig_sp)
718 : "r" (sp));
719 __do_softirq();
720 __asm__ __volatile__("mov %0, %%sp"
721 : : "r" (orig_sp));
722 }
723
724 local_irq_restore(flags);
725}
726
David S. Millere0204402007-07-16 03:49:40 -0700727#ifdef CONFIG_HOTPLUG_CPU
728void fixup_irqs(void)
729{
730 unsigned int irq;
731
732 for (irq = 0; irq < NR_IRQS; irq++) {
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100733 struct irq_desc *desc = irq_to_desc(irq);
734 struct irq_data *data = irq_desc_get_irq_data(desc);
David S. Millere0204402007-07-16 03:49:40 -0700735 unsigned long flags;
736
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100737 raw_spin_lock_irqsave(&desc->lock, flags);
738 if (desc->action && !irqd_is_per_cpu(data)) {
Sam Ravnborg4832b992011-01-22 11:32:18 +0000739 if (data->chip->irq_set_affinity)
740 data->chip->irq_set_affinity(data,
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100741 data->affinity,
742 false);
David S. Millere0204402007-07-16 03:49:40 -0700743 }
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100744 raw_spin_unlock_irqrestore(&desc->lock, flags);
David S. Millere0204402007-07-16 03:49:40 -0700745 }
David S. Miller2eb2f772008-09-08 17:21:07 -0700746
747 tick_ops->disable_irq();
David S. Millere0204402007-07-16 03:49:40 -0700748}
749#endif
750
David S. Millercdd51862005-07-24 19:36:13 -0700751struct sun5_timer {
752 u64 count0;
753 u64 limit0;
754 u64 count1;
755 u64 limit1;
756};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
David S. Millercdd51862005-07-24 19:36:13 -0700758static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759static u64 prom_limit0, prom_limit1;
760
761static void map_prom_timers(void)
762{
David S. Miller25c75812006-06-22 20:21:22 -0700763 struct device_node *dp;
Stephen Rothwell6a23acf2007-04-23 15:53:27 -0700764 const unsigned int *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 /* PROM timer node hangs out in the top level of device siblings... */
David S. Miller25c75812006-06-22 20:21:22 -0700767 dp = of_find_node_by_path("/");
768 dp = dp->child;
769 while (dp) {
770 if (!strcmp(dp->name, "counter-timer"))
771 break;
772 dp = dp->sibling;
773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 /* Assume if node is not present, PROM uses different tick mechanism
776 * which we should not care about.
777 */
David S. Miller25c75812006-06-22 20:21:22 -0700778 if (!dp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 prom_timers = (struct sun5_timer *) 0;
780 return;
781 }
782
783 /* If PROM is really using this, it must be mapped by him. */
David S. Miller25c75812006-06-22 20:21:22 -0700784 addr = of_get_property(dp, "address", NULL);
785 if (!addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 prom_printf("PROM does not have timer mapped, trying to continue.\n");
787 prom_timers = (struct sun5_timer *) 0;
788 return;
789 }
790 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
791}
792
793static void kill_prom_timer(void)
794{
795 if (!prom_timers)
796 return;
797
798 /* Save them away for later. */
799 prom_limit0 = prom_timers->limit0;
800 prom_limit1 = prom_timers->limit1;
801
802 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
803 * We turn both off here just to be paranoid.
804 */
805 prom_timers->limit0 = 0;
806 prom_timers->limit1 = 0;
807
808 /* Wheee, eat the interrupt packet too... */
809 __asm__ __volatile__(
810" mov 0x40, %%g2\n"
811" ldxa [%%g0] %0, %%g1\n"
812" ldxa [%%g2] %1, %%g1\n"
813" stxa %%g0, [%%g0] %0\n"
814" membar #Sync\n"
815 : /* no outputs */
816 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
817 : "g1", "g2");
818}
819
David S. Miller98430992008-09-16 11:44:00 -0700820void notrace init_irqwork_curcpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 int cpu = hard_smp_processor_id();
823
David S. Millereb2d8d62007-10-13 21:42:46 -0700824 trap_block[cpu].irq_worklist_pa = 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825}
826
David S. Miller5cbc3072007-05-25 15:49:59 -0700827/* Please be very careful with register_one_mondo() and
828 * sun4v_register_mondo_queues().
829 *
830 * On SMP this gets invoked from the CPU trampoline before
831 * the cpu has fully taken over the trap table from OBP,
832 * and it's kernel stack + %g6 thread register state is
833 * not fully cooked yet.
834 *
835 * Therefore you cannot make any OBP calls, not even prom_printf,
836 * from these two routines.
837 */
David S. Millerbd4352c2009-09-04 03:38:54 -0700838static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
David S. Millerac29c112006-02-08 00:08:23 -0800839{
David S. Miller5cbc3072007-05-25 15:49:59 -0700840 unsigned long num_entries = (qmask + 1) / 64;
David S. Miller94f87622006-02-16 14:26:53 -0800841 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800842
David S. Miller94f87622006-02-16 14:26:53 -0800843 status = sun4v_cpu_qconf(type, paddr, num_entries);
844 if (status != HV_EOK) {
845 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
846 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800847 prom_halt();
848 }
849}
850
David S. Miller98430992008-09-16 11:44:00 -0700851void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800852{
David S. Millerb5a37e92006-02-11 23:07:13 -0800853 struct trap_per_cpu *tb = &trap_block[this_cpu];
854
David S. Miller5cbc3072007-05-25 15:49:59 -0700855 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
856 tb->cpu_mondo_qmask);
857 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
858 tb->dev_mondo_qmask);
859 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
860 tb->resum_qmask);
861 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
862 tb->nonresum_qmask);
David S. Millerb5a37e92006-02-11 23:07:13 -0800863}
864
David S. Miller14a2ff62009-06-25 19:00:47 -0700865/* Each queue region must be a power of 2 multiple of 64 bytes in
866 * size. The base real address must be aligned to the size of the
867 * region. Thus, an 8KB queue must be 8KB aligned, for example.
868 */
869static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
David S. Millerb5a37e92006-02-11 23:07:13 -0800870{
David S. Miller5cbc3072007-05-25 15:49:59 -0700871 unsigned long size = PAGE_ALIGN(qmask + 1);
David S. Miller14a2ff62009-06-25 19:00:47 -0700872 unsigned long order = get_order(size);
873 unsigned long p;
874
875 p = __get_free_pages(GFP_KERNEL, order);
David S. Miller5cbc3072007-05-25 15:49:59 -0700876 if (!p) {
David S. Miller14a2ff62009-06-25 19:00:47 -0700877 prom_printf("SUN4V: Error, cannot allocate queue.\n");
David S. Miller5b0c0572006-02-08 02:53:50 -0800878 prom_halt();
879 }
880
David S. Miller5cbc3072007-05-25 15:49:59 -0700881 *pa_ptr = __pa(p);
David S. Miller5b0c0572006-02-08 02:53:50 -0800882}
883
David S. Millerb434e712007-08-08 17:32:33 -0700884static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800885{
886#ifdef CONFIG_SMP
David S. Miller14a2ff62009-06-25 19:00:47 -0700887 unsigned long page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800888
889 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
890
David S. Miller14a2ff62009-06-25 19:00:47 -0700891 page = get_zeroed_page(GFP_KERNEL);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800892 if (!page) {
893 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
894 prom_halt();
895 }
896
897 tb->cpu_mondo_block_pa = __pa(page);
898 tb->cpu_list_pa = __pa(page + 64);
899#endif
900}
901
David S. Millerb434e712007-08-08 17:32:33 -0700902/* Allocate mondo and error queues for all possible cpus. */
903static void __init sun4v_init_mondo_queues(void)
David S. Millerac29c112006-02-08 00:08:23 -0800904{
David S. Millerb434e712007-08-08 17:32:33 -0700905 int cpu;
David S. Millerac29c112006-02-08 00:08:23 -0800906
David S. Millerb434e712007-08-08 17:32:33 -0700907 for_each_possible_cpu(cpu) {
908 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Miller1d2f1f92006-02-08 16:41:20 -0800909
David S. Miller14a2ff62009-06-25 19:00:47 -0700910 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
911 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
912 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
913 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
914 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
915 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
916 tb->nonresum_qmask);
David S. Miller43f58922008-08-04 16:13:51 -0700917 }
918}
919
920static void __init init_send_mondo_info(void)
921{
922 int cpu;
923
924 for_each_possible_cpu(cpu) {
925 struct trap_per_cpu *tb = &trap_block[cpu];
David S. Millerb434e712007-08-08 17:32:33 -0700926
927 init_cpu_send_mondo_info(tb);
David S. Miller72aff532006-02-17 01:29:17 -0800928 }
David S. Millerac29c112006-02-08 00:08:23 -0800929}
930
David S. Millere18e2a02006-06-20 01:23:32 -0700931static struct irqaction timer_irq_action = {
932 .name = "timer",
933};
934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935/* Only invoked on boot processor. */
936void __init init_IRQ(void)
937{
David S. Miller10397e42007-10-13 21:43:31 -0700938 unsigned long size;
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 map_prom_timers();
941 kill_prom_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
David S. Miller10397e42007-10-13 21:43:31 -0700943 size = sizeof(struct ino_bucket) * NUM_IVECS;
David S. Miller14a2ff62009-06-25 19:00:47 -0700944 ivector_table = kzalloc(size, GFP_KERNEL);
David S. Miller10397e42007-10-13 21:43:31 -0700945 if (!ivector_table) {
946 prom_printf("Fatal error, cannot allocate ivector_table\n");
947 prom_halt();
948 }
David S. Miller42d5f992007-10-13 23:03:21 -0700949 __flush_dcache_range((unsigned long) ivector_table,
950 ((unsigned long) ivector_table) + size);
David S. Miller10397e42007-10-13 21:43:31 -0700951
952 ivector_table_pa = __pa(ivector_table);
David S. Millereb2d8d62007-10-13 21:42:46 -0700953
David S. Millerac29c112006-02-08 00:08:23 -0800954 if (tlb_type == hypervisor)
David S. Millerb434e712007-08-08 17:32:33 -0700955 sun4v_init_mondo_queues();
David S. Millerac29c112006-02-08 00:08:23 -0800956
David S. Miller43f58922008-08-04 16:13:51 -0700957 init_send_mondo_info();
958
959 if (tlb_type == hypervisor) {
960 /* Load up the boot cpu's entries. */
961 sun4v_register_mondo_queues(hard_smp_processor_id());
962 }
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 /* We need to clear any IRQ's pending in the soft interrupt
965 * registers, a spurious one could be left around from the
966 * PROM timer which we just disabled.
967 */
968 clear_softint(get_softint());
969
970 /* Now that ivector table is initialized, it is safe
971 * to receive IRQ vector traps. We will normally take
972 * one or two right now, in case some device PROM used
973 * to boot us wants to speak to us. We just ignore them.
974 */
975 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
976 "or %%g1, %0, %%g1\n\t"
977 "wrpr %%g1, 0x0, %%pstate"
978 : /* No outputs */
979 : "i" (PSTATE_IE)
980 : "g1");
David S. Millere18e2a02006-06-20 01:23:32 -0700981
Thomas Gleixner16741ea2011-03-24 17:57:12 +0100982 irq_to_desc(0)->action = &timer_irq_action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}