blob: bf37667541734ea3cb914cf1215615af42eab786 [file] [log] [blame]
David Gibsone58923e2007-04-18 16:36:26 +10001/*
2 * arch/powerpc/sysdev/uic.c
3 *
4 * IBM PowerPC 4xx Universal Interrupt Controller
5 *
6 * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/reboot.h>
17#include <linux/slab.h>
18#include <linux/stddef.h>
19#include <linux/sched.h>
20#include <linux/signal.h>
21#include <linux/sysdev.h>
22#include <linux/device.h>
23#include <linux/bootmem.h>
24#include <linux/spinlock.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
David Gibson868afce2007-08-14 13:52:42 +100027#include <linux/kernel_stat.h>
David Gibsone58923e2007-04-18 16:36:26 +100028#include <asm/irq.h>
29#include <asm/io.h>
30#include <asm/prom.h>
31#include <asm/dcr.h>
32
33#define NR_UIC_INTS 32
34
35#define UIC_SR 0x0
36#define UIC_ER 0x2
37#define UIC_CR 0x3
38#define UIC_PR 0x4
39#define UIC_TR 0x5
40#define UIC_MSR 0x6
41#define UIC_VR 0x7
42#define UIC_VCR 0x8
43
44#define uic_irq_to_hw(virq) (irq_map[virq].hwirq)
45
46struct uic *primary_uic;
47
48struct uic {
49 int index;
50 int dcrbase;
51
52 spinlock_t lock;
53
54 /* The remapper for this UIC */
55 struct irq_host *irqhost;
56
57 /* For secondary UICs, the cascade interrupt's irqaction */
58 struct irqaction cascade;
David Gibsone58923e2007-04-18 16:36:26 +100059};
60
61static void uic_unmask_irq(unsigned int virq)
62{
63 struct uic *uic = get_irq_chip_data(virq);
64 unsigned int src = uic_irq_to_hw(virq);
65 unsigned long flags;
66 u32 er;
67
68 spin_lock_irqsave(&uic->lock, flags);
69 er = mfdcr(uic->dcrbase + UIC_ER);
70 er |= 1 << (31 - src);
71 mtdcr(uic->dcrbase + UIC_ER, er);
72 spin_unlock_irqrestore(&uic->lock, flags);
73}
74
75static void uic_mask_irq(unsigned int virq)
76{
77 struct uic *uic = get_irq_chip_data(virq);
78 unsigned int src = uic_irq_to_hw(virq);
79 unsigned long flags;
80 u32 er;
81
82 spin_lock_irqsave(&uic->lock, flags);
83 er = mfdcr(uic->dcrbase + UIC_ER);
84 er &= ~(1 << (31 - src));
85 mtdcr(uic->dcrbase + UIC_ER, er);
86 spin_unlock_irqrestore(&uic->lock, flags);
87}
88
89static void uic_ack_irq(unsigned int virq)
90{
91 struct uic *uic = get_irq_chip_data(virq);
92 unsigned int src = uic_irq_to_hw(virq);
93 unsigned long flags;
94
95 spin_lock_irqsave(&uic->lock, flags);
96 mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
97 spin_unlock_irqrestore(&uic->lock, flags);
98}
99
100static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
101{
102 struct uic *uic = get_irq_chip_data(virq);
103 unsigned int src = uic_irq_to_hw(virq);
104 struct irq_desc *desc = get_irq_desc(virq);
105 unsigned long flags;
106 int trigger, polarity;
107 u32 tr, pr, mask;
108
109 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
110 case IRQ_TYPE_NONE:
111 uic_mask_irq(virq);
112 return 0;
113
114 case IRQ_TYPE_EDGE_RISING:
115 trigger = 1; polarity = 1;
116 break;
117 case IRQ_TYPE_EDGE_FALLING:
118 trigger = 1; polarity = 0;
119 break;
120 case IRQ_TYPE_LEVEL_HIGH:
121 trigger = 0; polarity = 1;
122 break;
123 case IRQ_TYPE_LEVEL_LOW:
124 trigger = 0; polarity = 0;
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 mask = ~(1 << (31 - src));
131
132 spin_lock_irqsave(&uic->lock, flags);
133 tr = mfdcr(uic->dcrbase + UIC_TR);
134 pr = mfdcr(uic->dcrbase + UIC_PR);
135 tr = (tr & mask) | (trigger << (31-src));
136 pr = (pr & mask) | (polarity << (31-src));
137
138 mtdcr(uic->dcrbase + UIC_PR, pr);
139 mtdcr(uic->dcrbase + UIC_TR, tr);
140
141 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
142 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
David Gibson4dc7b4b2007-08-14 13:52:42 +1000143 if (!trigger)
David Gibsone58923e2007-04-18 16:36:26 +1000144 desc->status |= IRQ_LEVEL;
145
146 spin_unlock_irqrestore(&uic->lock, flags);
147
148 return 0;
149}
150
151static struct irq_chip uic_irq_chip = {
152 .typename = " UIC ",
153 .unmask = uic_unmask_irq,
154 .mask = uic_mask_irq,
155/* .mask_ack = uic_mask_irq_and_ack, */
156 .ack = uic_ack_irq,
157 .set_type = uic_set_irq_type,
158};
159
David Gibson868afce2007-08-14 13:52:42 +1000160/**
161 * handle_uic_irq - irq flow handler for UIC
162 * @irq: the interrupt number
163 * @desc: the interrupt description structure for this irq
164 *
165 * This is modified version of the generic handle_level_irq() suitable
166 * for the UIC. On the UIC, acking (i.e. clearing the SR bit) a level
167 * irq will have no effect if the interrupt is still asserted by the
168 * device, even if the interrupt is already masked. Therefore, unlike
169 * the standard handle_level_irq(), we must ack the interrupt *after*
170 * invoking the ISR (which should have de-asserted the interrupt in
171 * the external source). For edge interrupts we ack at the beginning
172 * instead of the end, to keep the window in which we can miss an
173 * interrupt as small as possible.
174 */
175void fastcall handle_uic_irq(unsigned int irq, struct irq_desc *desc)
176{
177 unsigned int cpu = smp_processor_id();
178 struct irqaction *action;
179 irqreturn_t action_ret;
180
181 spin_lock(&desc->lock);
182 if (desc->status & IRQ_LEVEL)
183 desc->chip->mask(irq);
184 else
185 desc->chip->mask_ack(irq);
186
187 if (unlikely(desc->status & IRQ_INPROGRESS))
188 goto out_unlock;
189 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
190 kstat_cpu(cpu).irqs[irq]++;
191
192 /*
193 * If its disabled or no action available
194 * keep it masked and get out of here
195 */
196 action = desc->action;
197 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
198 desc->status |= IRQ_PENDING;
199 goto out_unlock;
200 }
201
202 desc->status |= IRQ_INPROGRESS;
203 desc->status &= ~IRQ_PENDING;
204 spin_unlock(&desc->lock);
205
206 action_ret = handle_IRQ_event(irq, action);
207
208 spin_lock(&desc->lock);
209 desc->status &= ~IRQ_INPROGRESS;
210 if (desc->status & IRQ_LEVEL)
211 desc->chip->ack(irq);
212 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
213 desc->chip->unmask(irq);
214out_unlock:
215 spin_unlock(&desc->lock);
216}
217
David Gibsone58923e2007-04-18 16:36:26 +1000218static int uic_host_match(struct irq_host *h, struct device_node *node)
219{
Michael Ellerman52964f82007-08-28 18:47:54 +1000220 return h->of_node == node;
David Gibsone58923e2007-04-18 16:36:26 +1000221}
222
223static int uic_host_map(struct irq_host *h, unsigned int virq,
224 irq_hw_number_t hw)
225{
226 struct uic *uic = h->host_data;
227
228 set_irq_chip_data(virq, uic);
229 /* Despite the name, handle_level_irq() works for both level
230 * and edge irqs on UIC. FIXME: check this is correct */
David Gibson868afce2007-08-14 13:52:42 +1000231 set_irq_chip_and_handler(virq, &uic_irq_chip, handle_uic_irq);
David Gibsone58923e2007-04-18 16:36:26 +1000232
233 /* Set default irq type */
234 set_irq_type(virq, IRQ_TYPE_NONE);
235
236 return 0;
237}
238
239static int uic_host_xlate(struct irq_host *h, struct device_node *ct,
240 u32 *intspec, unsigned int intsize,
241 irq_hw_number_t *out_hwirq, unsigned int *out_type)
242
243{
244 /* UIC intspecs must have 2 cells */
245 BUG_ON(intsize != 2);
246 *out_hwirq = intspec[0];
247 *out_type = intspec[1];
248 return 0;
249}
250
251static struct irq_host_ops uic_host_ops = {
252 .match = uic_host_match,
253 .map = uic_host_map,
254 .xlate = uic_host_xlate,
255};
256
257irqreturn_t uic_cascade(int virq, void *data)
258{
259 struct uic *uic = data;
260 u32 msr;
261 int src;
262 int subvirq;
263
264 msr = mfdcr(uic->dcrbase + UIC_MSR);
David Gibson553fdff2007-08-14 13:52:42 +1000265 if (!msr) /* spurious interrupt */
266 return IRQ_HANDLED;
267
David Gibsone58923e2007-04-18 16:36:26 +1000268 src = 32 - ffs(msr);
269
270 subvirq = irq_linear_revmap(uic->irqhost, src);
271 generic_handle_irq(subvirq);
272
273 return IRQ_HANDLED;
274}
275
276static struct uic * __init uic_init_one(struct device_node *node)
277{
278 struct uic *uic;
279 const u32 *indexp, *dcrreg;
280 int len;
281
Stephen Rothwell55b61fe2007-05-03 17:26:52 +1000282 BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
David Gibsone58923e2007-04-18 16:36:26 +1000283
284 uic = alloc_bootmem(sizeof(*uic));
285 if (! uic)
286 return NULL; /* FIXME: panic? */
287
288 memset(uic, 0, sizeof(*uic));
289 spin_lock_init(&uic->lock);
Stephen Rothwell12d371a2007-04-29 16:29:08 +1000290 indexp = of_get_property(node, "cell-index", &len);
David Gibsone58923e2007-04-18 16:36:26 +1000291 if (!indexp || (len != sizeof(u32))) {
292 printk(KERN_ERR "uic: Device node %s has missing or invalid "
293 "cell-index property\n", node->full_name);
294 return NULL;
295 }
296 uic->index = *indexp;
297
Stephen Rothwell12d371a2007-04-29 16:29:08 +1000298 dcrreg = of_get_property(node, "dcr-reg", &len);
David Gibsone58923e2007-04-18 16:36:26 +1000299 if (!dcrreg || (len != 2*sizeof(u32))) {
300 printk(KERN_ERR "uic: Device node %s has missing or invalid "
301 "dcr-reg property\n", node->full_name);
302 return NULL;
303 }
304 uic->dcrbase = *dcrreg;
305
Michael Ellerman52964f82007-08-28 18:47:54 +1000306 uic->irqhost = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
307 NR_UIC_INTS, &uic_host_ops, -1);
David Gibsone58923e2007-04-18 16:36:26 +1000308 if (! uic->irqhost) {
309 of_node_put(node);
310 return NULL; /* FIXME: panic? */
311 }
312
313 uic->irqhost->host_data = uic;
314
315 /* Start with all interrupts disabled, level and non-critical */
316 mtdcr(uic->dcrbase + UIC_ER, 0);
317 mtdcr(uic->dcrbase + UIC_CR, 0);
318 mtdcr(uic->dcrbase + UIC_TR, 0);
319 /* Clear any pending interrupts, in case the firmware left some */
320 mtdcr(uic->dcrbase + UIC_SR, 0xffffffff);
321
322 printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index,
323 NR_UIC_INTS, uic->dcrbase);
324
325 return uic;
326}
327
328void __init uic_init_tree(void)
329{
330 struct device_node *np;
331 struct uic *uic;
332 const u32 *interrupts;
333
334 /* First locate and initialize the top-level UIC */
335
336 np = of_find_compatible_node(NULL, NULL, "ibm,uic");
337 while (np) {
Stephen Rothwell12d371a2007-04-29 16:29:08 +1000338 interrupts = of_get_property(np, "interrupts", NULL);
David Gibsone58923e2007-04-18 16:36:26 +1000339 if (! interrupts)
340 break;
341
342 np = of_find_compatible_node(np, NULL, "ibm,uic");
343 }
344
345 BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the
346 * top-level interrupt controller */
347 primary_uic = uic_init_one(np);
348 if (! primary_uic)
349 panic("Unable to initialize primary UIC %s\n", np->full_name);
350
351 irq_set_default_host(primary_uic->irqhost);
352 of_node_put(np);
353
354 /* The scan again for cascaded UICs */
355 np = of_find_compatible_node(NULL, NULL, "ibm,uic");
356 while (np) {
Stephen Rothwell12d371a2007-04-29 16:29:08 +1000357 interrupts = of_get_property(np, "interrupts", NULL);
David Gibsone58923e2007-04-18 16:36:26 +1000358 if (interrupts) {
359 /* Secondary UIC */
360 int cascade_virq;
361 int ret;
362
363 uic = uic_init_one(np);
364 if (! uic)
365 panic("Unable to initialize a secondary UIC %s\n",
366 np->full_name);
367
368 cascade_virq = irq_of_parse_and_map(np, 0);
369
370 uic->cascade.handler = uic_cascade;
371 uic->cascade.name = "UIC cascade";
372 uic->cascade.dev_id = uic;
373
374 ret = setup_irq(cascade_virq, &uic->cascade);
375 if (ret)
376 printk(KERN_ERR "Failed to setup_irq(%d) for "
377 "UIC%d cascade\n", cascade_virq,
378 uic->index);
379
380 /* FIXME: setup critical cascade?? */
381 }
382
383 np = of_find_compatible_node(np, NULL, "ibm,uic");
384 }
385}
386
387/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
388unsigned int uic_get_irq(void)
389{
390 u32 msr;
391 int src;
392
393 BUG_ON(! primary_uic);
394
395 msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
396 src = 32 - ffs(msr);
397
398 return irq_linear_revmap(primary_uic->irqhost, src);
399}