blob: 580b4de8b7c6a7be3f1372fb3a8d20d1255e24af [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/slab.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
David S. Millerb5a37e92006-02-11 23:07:13 -080024#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/ptrace.h>
27#include <asm/processor.h>
28#include <asm/atomic.h>
29#include <asm/system.h>
30#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070031#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/sbus.h>
33#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
36#include <asm/timer.h>
37#include <asm/smp.h>
38#include <asm/starfire.h>
39#include <asm/uaccess.h>
40#include <asm/cache.h>
41#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070042#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080043#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#ifdef CONFIG_SMP
46static void distribute_irqs(void);
47#endif
48
49/* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can
52 * set the softint on this cpu.
53 *
54 * To make processing these packets efficient and race free we use
55 * an array of irq buckets below. The interrupt vector handler in
56 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
57 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it
59 * at the same time.
60 */
61
62struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
63
64/* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure
67 * is accessed from the vectored interrupt trap handler. If
68 * access to this structure takes a TLB miss it could cause
69 * the 5-level sparc v9 trap stack to overflow.
70 */
71struct irq_work_struct {
72 unsigned int irq_worklists[16];
73};
74struct irq_work_struct __irq_work[NR_CPUS];
75#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
76
David S. Miller088dd1f2005-07-04 13:24:38 -070077static struct irqaction *irq_action[NR_IRQS+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/* This only synchronizes entities which modify IRQ handler
80 * state and some selected user-level spots that want to
81 * read things in the table. IRQ handler processing orders
82 * its' accesses such that no locking is needed.
83 */
84static DEFINE_SPINLOCK(irq_action_lock);
85
86static void register_irq_proc (unsigned int irq);
87
88/*
89 * Upper 2b of irqaction->flags holds the ino.
90 * irqaction->mask holds the smp affinity information.
91 */
92#define put_ino_in_irqaction(action, irq) \
93 action->flags &= 0xffffffffffffUL; \
94 if (__bucket(irq) == &pil0_dummy_bucket) \
95 action->flags |= 0xdeadUL << 48; \
96 else \
97 action->flags |= __irq_ino(irq) << 48;
98#define get_ino_in_irqaction(action) (action->flags >> 48)
99
100#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
101#define get_smpaff_in_irqaction(action) ((action)->mask)
102
103int show_interrupts(struct seq_file *p, void *v)
104{
105 unsigned long flags;
106 int i = *(loff_t *) v;
107 struct irqaction *action;
108#ifdef CONFIG_SMP
109 int j;
110#endif
111
112 spin_lock_irqsave(&irq_action_lock, flags);
113 if (i <= NR_IRQS) {
114 if (!(action = *(i + irq_action)))
115 goto out_unlock;
116 seq_printf(p, "%3d: ", i);
117#ifndef CONFIG_SMP
118 seq_printf(p, "%10u ", kstat_irqs(i));
119#else
120 for (j = 0; j < NR_CPUS; j++) {
121 if (!cpu_online(j))
122 continue;
123 seq_printf(p, "%10u ",
124 kstat_cpu(j).irqs[i]);
125 }
126#endif
127 seq_printf(p, " %s:%lx", action->name,
128 get_ino_in_irqaction(action));
129 for (action = action->next; action; action = action->next) {
130 seq_printf(p, ", %s:%lx", action->name,
131 get_ino_in_irqaction(action));
132 }
133 seq_putc(p, '\n');
134 }
135out_unlock:
136 spin_unlock_irqrestore(&irq_action_lock, flags);
137
138 return 0;
139}
140
141/* Now these are always passed a true fully specified sun4u INO. */
142void enable_irq(unsigned int irq)
143{
144 struct ino_bucket *bucket = __bucket(irq);
145 unsigned long imap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 imap = bucket->imap;
148 if (imap == 0UL)
149 return;
150
151 preempt_disable();
152
David S. Millerd82ace72006-02-09 02:52:44 -0800153 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800154 unsigned int ino = __irq_ino(irq);
David S. Miller10951ee2006-02-13 18:22:57 -0800155 int cpu = hard_smp_processor_id();
David S. Millerc4bea282006-02-13 22:56:27 -0800156 int err;
David S. Miller10951ee2006-02-13 18:22:57 -0800157
David S. Millerc4bea282006-02-13 22:56:27 -0800158 err = sun4v_intr_settarget(ino, cpu);
159 if (err != HV_EOK)
160 printk("sun4v_intr_settarget(%x,%d): err(%d)\n",
161 ino, cpu, err);
David S. Millerabd92b22006-02-14 22:20:13 -0800162 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
David S. Millerc4bea282006-02-13 22:56:27 -0800163 if (err != HV_EOK)
164 printk("sun4v_intr_setenabled(%x): err(%d)\n",
165 ino, err);
David S. Millerab66a502006-02-15 01:18:19 -0800166 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
167 if (err != HV_EOK)
168 printk("sun4v_intr_setstate(%x): "
169 "err(%d)\n", ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 } else {
David S. Miller22780e22006-02-16 14:37:05 -0800171 unsigned long tid;
172
David S. Millerd82ace72006-02-09 02:52:44 -0800173 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
174 unsigned long ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
David S. Millerd82ace72006-02-09 02:52:44 -0800176 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
177 if ((ver >> 32) == __JALAPENO_ID ||
178 (ver >> 32) == __SERRANO_ID) {
179 /* We set it to our JBUS ID. */
180 __asm__ __volatile__("ldxa [%%g0] %1, %0"
181 : "=r" (tid)
182 : "i" (ASI_JBUS_CONFIG));
183 tid = ((tid & (0x1fUL<<17)) << 9);
184 tid &= IMAP_TID_JBUS;
185 } else {
186 /* We set it to our Safari AID. */
187 __asm__ __volatile__("ldxa [%%g0] %1, %0"
188 : "=r" (tid)
189 : "i"(ASI_SAFARI_CONFIG));
190 tid = ((tid & (0x3ffUL<<17)) << 9);
191 tid &= IMAP_AID_SAFARI;
192 }
193 } else if (this_is_starfire == 0) {
194 /* We set it to our UPA MID. */
195 __asm__ __volatile__("ldxa [%%g0] %1, %0"
196 : "=r" (tid)
197 : "i" (ASI_UPA_CONFIG));
198 tid = ((tid & UPA_CONFIG_MID) << 9);
199 tid &= IMAP_TID_UPA;
200 } else {
201 tid = (starfire_translate(imap,
202 smp_processor_id()) << 26);
203 tid &= IMAP_TID_UPA;
204 }
205
206 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
207 * of this SYSIO's preconfigured IGN in the SYSIO Control
208 * Register, the hardware just mirrors that value here.
209 * However for Graphics and UPA Slave devices the full
210 * IMAP_INR field can be set by the programmer here.
211 *
212 * Things like FFB can now be handled via the new IRQ
213 * mechanism.
214 */
215 upa_writel(tid | IMAP_VALID, imap);
216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 preempt_enable();
219}
220
221/* This now gets passed true ino's as well. */
222void disable_irq(unsigned int irq)
223{
224 struct ino_bucket *bucket = __bucket(irq);
225 unsigned long imap;
226
227 imap = bucket->imap;
228 if (imap != 0UL) {
David S. Miller10951ee2006-02-13 18:22:57 -0800229 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800230 unsigned int ino = __irq_ino(irq);
David S. Millerc4bea282006-02-13 22:56:27 -0800231 int err;
David S. Miller4bf447d2006-02-13 22:37:32 -0800232
David S. Millerc4bea282006-02-13 22:56:27 -0800233 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
234 if (err != HV_EOK)
235 printk("sun4v_intr_setenabled(%x): "
236 "err(%d)\n", ino, err);
David S. Miller10951ee2006-02-13 18:22:57 -0800237 } else {
238 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
David S. Miller10951ee2006-02-13 18:22:57 -0800240 /* NOTE: We do not want to futz with the IRQ clear registers
241 * and move the state to IDLE, the SCSI code does call
242 * disable_irq() to assure atomicity in the queue cmd
243 * SCSI adapter driver code. Thus we'd lose interrupts.
244 */
245 tmp = upa_readl(imap);
246 tmp &= ~IMAP_VALID;
247 upa_writel(tmp, imap);
248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250}
251
252/* The timer is the one "weird" interrupt which is generated by
253 * the CPU %tick register and not by some normal vectored interrupt
254 * source. To handle this special case, we use this dummy INO bucket.
255 */
David S. Miller088dd1f2005-07-04 13:24:38 -0700256static struct irq_desc pil0_dummy_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257static struct ino_bucket pil0_dummy_bucket = {
David S. Miller088dd1f2005-07-04 13:24:38 -0700258 .irq_info = &pil0_dummy_desc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259};
260
David S. Miller088dd1f2005-07-04 13:24:38 -0700261static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
262 unsigned long iclr, unsigned long imap,
263 struct ino_bucket *bucket)
264{
265 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
266 "(%d:%d:%016lx:%016lx), halting...\n",
267 ino, bucket->pil, bucket->iclr, bucket->imap,
268 pil, inofixup, iclr, imap);
269 prom_halt();
270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
273{
274 struct ino_bucket *bucket;
275 int ino;
276
277 if (pil == 0) {
278 if (iclr != 0UL || imap != 0UL) {
279 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
280 iclr, imap);
281 prom_halt();
282 }
283 return __irq(&pil0_dummy_bucket);
284 }
285
David S. Miller10951ee2006-02-13 18:22:57 -0800286 BUG_ON(tlb_type == hypervisor);
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 /* RULE: Both must be specified in all other cases. */
289 if (iclr == 0UL || imap == 0UL) {
290 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
291 pil, inofixup, iclr, imap);
292 prom_halt();
293 }
294
295 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
296 if (ino > NUM_IVECS) {
297 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
298 ino, pil, inofixup, iclr, imap);
299 prom_halt();
300 }
301
David S. Miller088dd1f2005-07-04 13:24:38 -0700302 bucket = &ivector_table[ino];
303 if (bucket->flags & IBF_ACTIVE)
304 build_irq_error("IRQ: Trying to build active INO bucket.\n",
305 ino, pil, inofixup, iclr, imap, bucket);
306
307 if (bucket->irq_info) {
308 if (bucket->imap != imap || bucket->iclr != iclr)
309 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
310 ino, pil, inofixup, iclr, imap, bucket);
311
312 goto out;
313 }
314
315 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
316 if (!bucket->irq_info) {
317 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
318 prom_halt();
319 }
320 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /* Ok, looks good, set it up. Don't touch the irq_chain or
323 * the pending flag.
324 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 bucket->imap = imap;
326 bucket->iclr = iclr;
327 bucket->pil = pil;
328 bucket->flags = 0;
329
David S. Miller088dd1f2005-07-04 13:24:38 -0700330out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 return __irq(bucket);
332}
333
David S. Millere3999572006-02-13 18:16:10 -0800334unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags)
335{
336 struct ino_bucket *bucket;
337 unsigned long sysino;
338
339 sysino = sun4v_devino_to_sysino(devhandle, devino);
340
David S. Millere3999572006-02-13 18:16:10 -0800341 bucket = &ivector_table[sysino];
342
343 /* Catch accidental accesses to these things. IMAP/ICLR handling
344 * is done by hypervisor calls on sun4v platforms, not by direct
345 * register accesses.
David S. Miller22780e22006-02-16 14:37:05 -0800346 *
347 * But we need to make them look unique for the disable_irq() logic
348 * in free_irq().
David S. Millere3999572006-02-13 18:16:10 -0800349 */
David S. Miller22780e22006-02-16 14:37:05 -0800350 bucket->imap = ~0UL - sysino;
351 bucket->iclr = ~0UL - sysino;
David S. Millere3999572006-02-13 18:16:10 -0800352
353 bucket->pil = pil;
354 bucket->flags = flags;
355
356 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
357 if (!bucket->irq_info) {
358 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
359 prom_halt();
360 }
361 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
362
363 return __irq(bucket);
364}
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366static void atomic_bucket_insert(struct ino_bucket *bucket)
367{
368 unsigned long pstate;
369 unsigned int *ent;
370
371 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
372 __asm__ __volatile__("wrpr %0, %1, %%pstate"
373 : : "r" (pstate), "i" (PSTATE_IE));
374 ent = irq_work(smp_processor_id(), bucket->pil);
375 bucket->irq_chain = *ent;
376 *ent = __irq(bucket);
377 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
378}
379
David S. Miller088dd1f2005-07-04 13:24:38 -0700380static int check_irq_sharing(int pil, unsigned long irqflags)
381{
382 struct irqaction *action, *tmp;
383
384 action = *(irq_action + pil);
385 if (action) {
386 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
387 for (tmp = action; tmp->next; tmp = tmp->next)
388 ;
389 } else {
390 return -EBUSY;
391 }
392 }
393 return 0;
394}
395
396static void append_irq_action(int pil, struct irqaction *action)
397{
398 struct irqaction **pp = irq_action + pil;
399
400 while (*pp)
401 pp = &((*pp)->next);
402 *pp = action;
403}
404
405static struct irqaction *get_action_slot(struct ino_bucket *bucket)
406{
407 struct irq_desc *desc = bucket->irq_info;
408 int max_irq, i;
409
410 max_irq = 1;
411 if (bucket->flags & IBF_PCI)
412 max_irq = MAX_IRQ_DESC_ACTION;
413 for (i = 0; i < max_irq; i++) {
414 struct irqaction *p = &desc->action[i];
415 u32 mask = (1 << i);
416
417 if (desc->action_active_mask & mask)
418 continue;
419
420 desc->action_active_mask |= mask;
421 return p;
422 }
423 return NULL;
424}
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
427 unsigned long irqflags, const char *name, void *dev_id)
428{
David S. Miller088dd1f2005-07-04 13:24:38 -0700429 struct irqaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 struct ino_bucket *bucket = __bucket(irq);
431 unsigned long flags;
432 int pending = 0;
433
David S. Miller088dd1f2005-07-04 13:24:38 -0700434 if (unlikely(!handler))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return -EINVAL;
David S. Miller088dd1f2005-07-04 13:24:38 -0700436
437 if (unlikely(!bucket->irq_info))
438 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
440 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
441 /*
442 * This function might sleep, we want to call it first,
443 * outside of the atomic block. In SA_STATIC_ALLOC case,
444 * random driver's kmalloc will fail, but it is safe.
445 * If already initialized, random driver will not reinit.
446 * Yes, this might clear the entropy pool if the wrong
447 * driver is attempted to be loaded, without actually
448 * installing a new handler, but is this really a problem,
449 * only the sysadmin is able to do this.
450 */
451 rand_initialize_irq(irq);
452 }
453
454 spin_lock_irqsave(&irq_action_lock, flags);
455
David S. Miller088dd1f2005-07-04 13:24:38 -0700456 if (check_irq_sharing(bucket->pil, irqflags)) {
457 spin_unlock_irqrestore(&irq_action_lock, flags);
458 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
460
David S. Miller088dd1f2005-07-04 13:24:38 -0700461 action = get_action_slot(bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if (!action) {
463 spin_unlock_irqrestore(&irq_action_lock, flags);
464 return -ENOMEM;
465 }
466
David S. Miller088dd1f2005-07-04 13:24:38 -0700467 bucket->flags |= IBF_ACTIVE;
468 pending = 0;
469 if (bucket != &pil0_dummy_bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 pending = bucket->pending;
471 if (pending)
472 bucket->pending = 0;
473 }
474
475 action->handler = handler;
476 action->flags = irqflags;
477 action->name = name;
478 action->next = NULL;
479 action->dev_id = dev_id;
480 put_ino_in_irqaction(action, irq);
481 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
482
David S. Miller088dd1f2005-07-04 13:24:38 -0700483 append_irq_action(bucket->pil, action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 enable_irq(irq);
486
487 /* We ate the IVEC already, this makes sure it does not get lost. */
488 if (pending) {
489 atomic_bucket_insert(bucket);
490 set_softint(1 << bucket->pil);
491 }
David S. Miller088dd1f2005-07-04 13:24:38 -0700492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 spin_unlock_irqrestore(&irq_action_lock, flags);
David S. Miller088dd1f2005-07-04 13:24:38 -0700494
495 if (bucket != &pil0_dummy_bucket)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 register_irq_proc(__irq_ino(irq));
497
498#ifdef CONFIG_SMP
499 distribute_irqs();
500#endif
501 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502}
503
504EXPORT_SYMBOL(request_irq);
505
David S. Miller088dd1f2005-07-04 13:24:38 -0700506static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
507{
508 struct ino_bucket *bucket = __bucket(irq);
509 struct irqaction *action, **pp;
510
511 pp = irq_action + bucket->pil;
512 action = *pp;
513 if (unlikely(!action))
514 return NULL;
515
516 if (unlikely(!action->handler)) {
517 printk("Freeing free IRQ %d\n", bucket->pil);
518 return NULL;
519 }
520
521 while (action && action->dev_id != dev_id) {
522 pp = &action->next;
523 action = *pp;
524 }
525
526 if (likely(action))
527 *pp = action->next;
528
529 return action;
530}
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532void free_irq(unsigned int irq, void *dev_id)
533{
534 struct irqaction *action;
David S. Miller088dd1f2005-07-04 13:24:38 -0700535 struct ino_bucket *bucket;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 spin_lock_irqsave(&irq_action_lock, flags);
539
David S. Miller088dd1f2005-07-04 13:24:38 -0700540 action = unlink_irq_action(irq, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 spin_unlock_irqrestore(&irq_action_lock, flags);
543
David S. Miller088dd1f2005-07-04 13:24:38 -0700544 if (unlikely(!action))
545 return;
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 synchronize_irq(irq);
548
549 spin_lock_irqsave(&irq_action_lock, flags);
550
David S. Miller088dd1f2005-07-04 13:24:38 -0700551 bucket = __bucket(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 if (bucket != &pil0_dummy_bucket) {
David S. Miller088dd1f2005-07-04 13:24:38 -0700553 struct irq_desc *desc = bucket->irq_info;
David S. Miller088dd1f2005-07-04 13:24:38 -0700554 int ent, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
David S. Miller088dd1f2005-07-04 13:24:38 -0700556 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
557 struct irqaction *p = &desc->action[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
David S. Miller088dd1f2005-07-04 13:24:38 -0700559 if (p == action) {
560 desc->action_active_mask &= ~(1 << i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 break;
David S. Miller088dd1f2005-07-04 13:24:38 -0700562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 }
564
David S. Miller088dd1f2005-07-04 13:24:38 -0700565 if (!desc->action_active_mask) {
David S. Miller22780e22006-02-16 14:37:05 -0800566 unsigned long imap = bucket->imap;
567
David S. Miller088dd1f2005-07-04 13:24:38 -0700568 /* This unique interrupt source is now inactive. */
569 bucket->flags &= ~IBF_ACTIVE;
570
571 /* See if any other buckets share this bucket's IMAP
572 * and are still active.
573 */
574 for (ent = 0; ent < NUM_IVECS; ent++) {
575 struct ino_bucket *bp = &ivector_table[ent];
576 if (bp != bucket &&
577 bp->imap == imap &&
578 (bp->flags & IBF_ACTIVE) != 0)
579 break;
580 }
581
582 /* Only disable when no other sub-irq levels of
583 * the same IMAP are active.
584 */
585 if (ent == NUM_IVECS)
586 disable_irq(irq);
587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 spin_unlock_irqrestore(&irq_action_lock, flags);
591}
592
593EXPORT_SYMBOL(free_irq);
594
595#ifdef CONFIG_SMP
596void synchronize_irq(unsigned int irq)
597{
598 struct ino_bucket *bucket = __bucket(irq);
599
600#if 0
601 /* The following is how I wish I could implement this.
602 * Unfortunately the ICLR registers are read-only, you can
603 * only write ICLR_foo values to them. To get the current
604 * IRQ status you would need to get at the IRQ diag registers
605 * in the PCI/SBUS controller and the layout of those vary
606 * from one controller to the next, sigh... -DaveM
607 */
608 unsigned long iclr = bucket->iclr;
609
610 while (1) {
611 u32 tmp = upa_readl(iclr);
612
613 if (tmp == ICLR_TRANSMIT ||
614 tmp == ICLR_PENDING) {
615 cpu_relax();
616 continue;
617 }
618 break;
619 }
620#else
621 /* So we have to do this with a INPROGRESS bit just like x86. */
622 while (bucket->flags & IBF_INPROGRESS)
623 cpu_relax();
624#endif
625}
626#endif /* CONFIG_SMP */
627
David S. Miller088dd1f2005-07-04 13:24:38 -0700628static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629{
David S. Miller088dd1f2005-07-04 13:24:38 -0700630 struct irq_desc *desc = bp->irq_info;
631 unsigned char flags = bp->flags;
632 u32 action_mask, i;
633 int random;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
David S. Miller088dd1f2005-07-04 13:24:38 -0700635 bp->flags |= IBF_INPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
David S. Miller088dd1f2005-07-04 13:24:38 -0700637 if (unlikely(!(flags & IBF_ACTIVE))) {
638 bp->pending = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 }
641
David S. Miller088dd1f2005-07-04 13:24:38 -0700642 if (desc->pre_handler)
643 desc->pre_handler(bp,
644 desc->pre_handler_arg1,
645 desc->pre_handler_arg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
David S. Miller088dd1f2005-07-04 13:24:38 -0700647 action_mask = desc->action_active_mask;
648 random = 0;
649 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
650 struct irqaction *p = &desc->action[i];
651 u32 mask = (1 << i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
David S. Miller088dd1f2005-07-04 13:24:38 -0700653 if (!(action_mask & mask))
654 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
David S. Miller088dd1f2005-07-04 13:24:38 -0700656 action_mask &= ~mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
David S. Miller088dd1f2005-07-04 13:24:38 -0700658 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
659 random |= p->flags;
660
661 if (!action_mask)
662 break;
663 }
664 if (bp->pil != 0) {
David S. Miller10951ee2006-02-13 18:22:57 -0800665 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800666 unsigned int ino = __irq_ino(bp);
David S. Millerc4bea282006-02-13 22:56:27 -0800667 int err;
David S. Miller10951ee2006-02-13 18:22:57 -0800668
David S. Millerc4bea282006-02-13 22:56:27 -0800669 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
670 if (err != HV_EOK)
671 printk("sun4v_intr_setstate(%x): "
672 "err(%d)\n", ino, err);
David S. Miller10951ee2006-02-13 18:22:57 -0800673 } else {
674 upa_writel(ICLR_IDLE, bp->iclr);
David S. Miller10951ee2006-02-13 18:22:57 -0800675 }
David S. Millerab66a502006-02-15 01:18:19 -0800676
677 /* Test and add entropy */
678 if (random & SA_SAMPLE_RANDOM)
679 add_interrupt_randomness(irq);
David S. Miller088dd1f2005-07-04 13:24:38 -0700680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681out:
David S. Miller088dd1f2005-07-04 13:24:38 -0700682 bp->flags &= ~IBF_INPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685void handler_irq(int irq, struct pt_regs *regs)
686{
David S. Miller088dd1f2005-07-04 13:24:38 -0700687 struct ino_bucket *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 int cpu = smp_processor_id();
689
690#ifndef CONFIG_SMP
691 /*
692 * Check for TICK_INT on level 14 softint.
693 */
694 {
695 unsigned long clr_mask = 1 << irq;
696 unsigned long tick_mask = tick_ops->softint_mask;
697
698 if ((irq == 14) && (get_softint() & tick_mask)) {
699 irq = 0;
700 clr_mask = tick_mask;
701 }
702 clear_softint(clr_mask);
703 }
704#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 clear_softint(1 << irq);
706#endif
707
708 irq_enter();
709 kstat_this_cpu.irqs[irq]++;
710
711 /* Sliiiick... */
712#ifndef CONFIG_SMP
713 bp = ((irq != 0) ?
714 __bucket(xchg32(irq_work(cpu, irq), 0)) :
715 &pil0_dummy_bucket);
716#else
717 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
718#endif
David S. Miller088dd1f2005-07-04 13:24:38 -0700719 while (bp) {
720 struct ino_bucket *nbp = __bucket(bp->irq_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 bp->irq_chain = 0;
David S. Miller088dd1f2005-07-04 13:24:38 -0700723 process_bucket(irq, bp, regs);
724 bp = nbp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 }
726 irq_exit();
727}
728
729#ifdef CONFIG_BLK_DEV_FD
David S. Miller63b61452005-06-27 17:04:45 -0700730extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
David S. Miller63b61452005-06-27 17:04:45 -0700732/* XXX No easy way to include asm/floppy.h XXX */
733extern unsigned char *pdma_vaddr;
734extern unsigned long pdma_size;
735extern volatile int doing_pdma;
736extern unsigned long fdc_status;
737
738irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
David S. Miller63b61452005-06-27 17:04:45 -0700740 if (likely(doing_pdma)) {
741 void __iomem *stat = (void __iomem *) fdc_status;
742 unsigned char *vaddr = pdma_vaddr;
743 unsigned long size = pdma_size;
744 u8 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
David S. Miller63b61452005-06-27 17:04:45 -0700746 while (size) {
747 val = readb(stat);
748 if (unlikely(!(val & 0x80))) {
749 pdma_vaddr = vaddr;
750 pdma_size = size;
751 return IRQ_HANDLED;
752 }
753 if (unlikely(!(val & 0x20))) {
754 pdma_vaddr = vaddr;
755 pdma_size = size;
756 doing_pdma = 0;
757 goto main_interrupt;
758 }
759 if (val & 0x40) {
760 /* read */
761 *vaddr++ = readb(stat + 1);
762 } else {
763 unsigned char data = *vaddr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
David S. Miller63b61452005-06-27 17:04:45 -0700765 /* write */
766 writeb(data, stat + 1);
767 }
768 size--;
769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
David S. Miller63b61452005-06-27 17:04:45 -0700771 pdma_vaddr = vaddr;
772 pdma_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
David S. Miller63b61452005-06-27 17:04:45 -0700774 /* Send Terminal Count pulse to floppy controller. */
775 val = readb(auxio_register);
776 val |= AUXIO_AUX1_FTCNT;
777 writeb(val, auxio_register);
Bernhard R Link94bbc172006-03-10 01:23:13 -0800778 val &= ~AUXIO_AUX1_FTCNT;
David S. Miller63b61452005-06-27 17:04:45 -0700779 writeb(val, auxio_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
David S. Miller63b61452005-06-27 17:04:45 -0700781 doing_pdma = 0;
782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
David S. Miller63b61452005-06-27 17:04:45 -0700784main_interrupt:
785 return floppy_interrupt(irq, dev_cookie, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786}
David S. Miller63b61452005-06-27 17:04:45 -0700787EXPORT_SYMBOL(sparc_floppy_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788#endif
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790/* We really don't need these at all on the Sparc. We only have
791 * stubs here because they are exported to modules.
792 */
793unsigned long probe_irq_on(void)
794{
795 return 0;
796}
797
798EXPORT_SYMBOL(probe_irq_on);
799
800int probe_irq_off(unsigned long mask)
801{
802 return 0;
803}
804
805EXPORT_SYMBOL(probe_irq_off);
806
807#ifdef CONFIG_SMP
808static int retarget_one_irq(struct irqaction *p, int goal_cpu)
809{
810 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
812 while (!cpu_online(goal_cpu)) {
813 if (++goal_cpu >= NR_CPUS)
814 goal_cpu = 0;
815 }
816
David S. Miller10951ee2006-02-13 18:22:57 -0800817 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800818 unsigned int ino = __irq_ino(bucket);
David S. Miller10951ee2006-02-13 18:22:57 -0800819
David S. Miller4bf447d2006-02-13 22:37:32 -0800820 sun4v_intr_settarget(ino, goal_cpu);
821 sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 } else {
David S. Miller22780e22006-02-16 14:37:05 -0800823 unsigned long imap = bucket->imap;
David S. Miller10951ee2006-02-13 18:22:57 -0800824 unsigned int tid;
825
826 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
827 tid = goal_cpu << 26;
828 tid &= IMAP_AID_SAFARI;
829 } else if (this_is_starfire == 0) {
830 tid = goal_cpu << 26;
831 tid &= IMAP_TID_UPA;
832 } else {
833 tid = (starfire_translate(imap, goal_cpu) << 26);
834 tid &= IMAP_TID_UPA;
835 }
836 upa_writel(tid | IMAP_VALID, imap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
David S. Millercee28242005-05-03 22:04:36 -0700839 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 if (++goal_cpu >= NR_CPUS)
841 goal_cpu = 0;
David S. Millercee28242005-05-03 22:04:36 -0700842 } while (!cpu_online(goal_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 return goal_cpu;
845}
846
847/* Called from request_irq. */
848static void distribute_irqs(void)
849{
850 unsigned long flags;
851 int cpu, level;
852
853 spin_lock_irqsave(&irq_action_lock, flags);
854 cpu = 0;
855
856 /*
857 * Skip the timer at [0], and very rare error/power intrs at [15].
858 * Also level [12], it causes problems on Ex000 systems.
859 */
860 for (level = 1; level < NR_IRQS; level++) {
861 struct irqaction *p = irq_action[level];
David S. Miller088dd1f2005-07-04 13:24:38 -0700862
863 if (level == 12)
864 continue;
865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 while(p) {
867 cpu = retarget_one_irq(p, cpu);
868 p = p->next;
869 }
870 }
871 spin_unlock_irqrestore(&irq_action_lock, flags);
872}
873#endif
874
David S. Millercdd51862005-07-24 19:36:13 -0700875struct sun5_timer {
876 u64 count0;
877 u64 limit0;
878 u64 count1;
879 u64 limit1;
880};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
David S. Millercdd51862005-07-24 19:36:13 -0700882static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883static u64 prom_limit0, prom_limit1;
884
885static void map_prom_timers(void)
886{
887 unsigned int addr[3];
888 int tnode, err;
889
890 /* PROM timer node hangs out in the top level of device siblings... */
891 tnode = prom_finddevice("/counter-timer");
892
893 /* Assume if node is not present, PROM uses different tick mechanism
894 * which we should not care about.
895 */
896 if (tnode == 0 || tnode == -1) {
897 prom_timers = (struct sun5_timer *) 0;
898 return;
899 }
900
901 /* If PROM is really using this, it must be mapped by him. */
902 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
903 if (err == -1) {
904 prom_printf("PROM does not have timer mapped, trying to continue.\n");
905 prom_timers = (struct sun5_timer *) 0;
906 return;
907 }
908 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
909}
910
911static void kill_prom_timer(void)
912{
913 if (!prom_timers)
914 return;
915
916 /* Save them away for later. */
917 prom_limit0 = prom_timers->limit0;
918 prom_limit1 = prom_timers->limit1;
919
920 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
921 * We turn both off here just to be paranoid.
922 */
923 prom_timers->limit0 = 0;
924 prom_timers->limit1 = 0;
925
926 /* Wheee, eat the interrupt packet too... */
927 __asm__ __volatile__(
928" mov 0x40, %%g2\n"
929" ldxa [%%g0] %0, %%g1\n"
930" ldxa [%%g2] %1, %%g1\n"
931" stxa %%g0, [%%g0] %0\n"
932" membar #Sync\n"
933 : /* no outputs */
934 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
935 : "g1", "g2");
936}
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938void init_irqwork_curcpu(void)
939{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 int cpu = hard_smp_processor_id();
941
David S. Miller56fb4df2006-02-26 23:24:22 -0800942 memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
944
David S. Millerb5a37e92006-02-11 23:07:13 -0800945static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
David S. Millerac29c112006-02-08 00:08:23 -0800946{
David S. Miller94f87622006-02-16 14:26:53 -0800947 unsigned long num_entries = 128;
948 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800949
David S. Miller94f87622006-02-16 14:26:53 -0800950 status = sun4v_cpu_qconf(type, paddr, num_entries);
951 if (status != HV_EOK) {
952 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
953 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800954 prom_halt();
955 }
956}
957
David S. Millerb5a37e92006-02-11 23:07:13 -0800958static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c0572006-02-08 02:53:50 -0800959{
David S. Millerb5a37e92006-02-11 23:07:13 -0800960 struct trap_per_cpu *tb = &trap_block[this_cpu];
961
962 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
963 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
964 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
965 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
966}
967
968static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
969{
970 void *page;
971
972 if (use_bootmem)
973 page = alloc_bootmem_low_pages(PAGE_SIZE);
974 else
975 page = (void *) get_zeroed_page(GFP_ATOMIC);
976
977 if (!page) {
978 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
979 prom_halt();
980 }
981
982 *pa_ptr = __pa(page);
983}
984
985static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
986{
987 void *page;
988
989 if (use_bootmem)
990 page = alloc_bootmem_low_pages(PAGE_SIZE);
991 else
992 page = (void *) get_zeroed_page(GFP_ATOMIC);
David S. Miller5b0c0572006-02-08 02:53:50 -0800993
994 if (!page) {
995 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
996 prom_halt();
997 }
998
999 *pa_ptr = __pa(page);
1000}
1001
David S. Millerb5a37e92006-02-11 23:07:13 -08001002static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
David S. Miller1d2f1f92006-02-08 16:41:20 -08001003{
1004#ifdef CONFIG_SMP
David S. Millerb5a37e92006-02-11 23:07:13 -08001005 void *page;
David S. Miller1d2f1f92006-02-08 16:41:20 -08001006
1007 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
1008
David S. Millerb5a37e92006-02-11 23:07:13 -08001009 if (use_bootmem)
1010 page = alloc_bootmem_low_pages(PAGE_SIZE);
1011 else
1012 page = (void *) get_zeroed_page(GFP_ATOMIC);
1013
David S. Miller1d2f1f92006-02-08 16:41:20 -08001014 if (!page) {
1015 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
1016 prom_halt();
1017 }
1018
1019 tb->cpu_mondo_block_pa = __pa(page);
1020 tb->cpu_list_pa = __pa(page + 64);
1021#endif
1022}
1023
David S. Millerb5a37e92006-02-11 23:07:13 -08001024/* Allocate and register the mondo and error queues for this cpu. */
1025void __cpuinit sun4v_init_mondo_queues(int use_bootmem)
David S. Millerac29c112006-02-08 00:08:23 -08001026{
1027 int cpu = hard_smp_processor_id();
1028 struct trap_per_cpu *tb = &trap_block[cpu];
1029
David S. Millerb5a37e92006-02-11 23:07:13 -08001030 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
1031 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
1032 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
1033 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
1034 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
1035 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
David S. Miller1d2f1f92006-02-08 16:41:20 -08001036
David S. Millerb5a37e92006-02-11 23:07:13 -08001037 init_cpu_send_mondo_info(tb, use_bootmem);
David S. Miller1d2f1f92006-02-08 16:41:20 -08001038
David S. Millerb5a37e92006-02-11 23:07:13 -08001039 sun4v_register_mondo_queues(cpu);
David S. Millerac29c112006-02-08 00:08:23 -08001040}
1041
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042/* Only invoked on boot processor. */
1043void __init init_IRQ(void)
1044{
1045 map_prom_timers();
1046 kill_prom_timer();
1047 memset(&ivector_table[0], 0, sizeof(ivector_table));
1048
David S. Millerac29c112006-02-08 00:08:23 -08001049 if (tlb_type == hypervisor)
David S. Millerb5a37e92006-02-11 23:07:13 -08001050 sun4v_init_mondo_queues(1);
David S. Millerac29c112006-02-08 00:08:23 -08001051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 /* We need to clear any IRQ's pending in the soft interrupt
1053 * registers, a spurious one could be left around from the
1054 * PROM timer which we just disabled.
1055 */
1056 clear_softint(get_softint());
1057
1058 /* Now that ivector table is initialized, it is safe
1059 * to receive IRQ vector traps. We will normally take
1060 * one or two right now, in case some device PROM used
1061 * to boot us wants to speak to us. We just ignore them.
1062 */
1063 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1064 "or %%g1, %0, %%g1\n\t"
1065 "wrpr %%g1, 0x0, %%pstate"
1066 : /* No outputs */
1067 : "i" (PSTATE_IE)
1068 : "g1");
1069}
1070
1071static struct proc_dir_entry * root_irq_dir;
1072static struct proc_dir_entry * irq_dir [NUM_IVECS];
1073
1074#ifdef CONFIG_SMP
1075
1076static int irq_affinity_read_proc (char *page, char **start, off_t off,
1077 int count, int *eof, void *data)
1078{
1079 struct ino_bucket *bp = ivector_table + (long)data;
Eddie C. Dost12cf6492005-07-06 15:40:21 -07001080 struct irq_desc *desc = bp->irq_info;
1081 struct irqaction *ap = desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 cpumask_t mask;
1083 int len;
1084
1085 mask = get_smpaff_in_irqaction(ap);
1086 if (cpus_empty(mask))
1087 mask = cpu_online_map;
1088
1089 len = cpumask_scnprintf(page, count, mask);
1090 if (count - len < 2)
1091 return -EINVAL;
1092 len += sprintf(page + len, "\n");
1093 return len;
1094}
1095
1096static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1097{
1098 struct ino_bucket *bp = ivector_table + irq;
Eddie C. Dost12cf6492005-07-06 15:40:21 -07001099 struct irq_desc *desc = bp->irq_info;
1100 struct irqaction *ap = desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 /* Users specify affinity in terms of hw cpu ids.
1103 * As soon as we do this, handler_irq() might see and take action.
1104 */
Eddie C. Dost12cf6492005-07-06 15:40:21 -07001105 put_smpaff_in_irqaction(ap, hw_aff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 /* Migration is simply done by the next cpu to service this
1108 * interrupt.
1109 */
1110}
1111
1112static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1113 unsigned long count, void *data)
1114{
1115 int irq = (long) data, full_count = count, err;
1116 cpumask_t new_value;
1117
1118 err = cpumask_parse(buffer, count, new_value);
1119
1120 /*
1121 * Do not allow disabling IRQs completely - it's a too easy
1122 * way to make the system unusable accidentally :-) At least
1123 * one online CPU still has to be targeted.
1124 */
1125 cpus_and(new_value, new_value, cpu_online_map);
1126 if (cpus_empty(new_value))
1127 return -EINVAL;
1128
1129 set_intr_affinity(irq, new_value);
1130
1131 return full_count;
1132}
1133
1134#endif
1135
1136#define MAX_NAMELEN 10
1137
1138static void register_irq_proc (unsigned int irq)
1139{
1140 char name [MAX_NAMELEN];
1141
1142 if (!root_irq_dir || irq_dir[irq])
1143 return;
1144
1145 memset(name, 0, MAX_NAMELEN);
1146 sprintf(name, "%x", irq);
1147
1148 /* create /proc/irq/1234 */
1149 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1150
1151#ifdef CONFIG_SMP
1152 /* XXX SMP affinity not supported on starfire yet. */
1153 if (this_is_starfire == 0) {
1154 struct proc_dir_entry *entry;
1155
1156 /* create /proc/irq/1234/smp_affinity */
1157 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1158
1159 if (entry) {
1160 entry->nlink = 1;
1161 entry->data = (void *)(long)irq;
1162 entry->read_proc = irq_affinity_read_proc;
1163 entry->write_proc = irq_affinity_write_proc;
1164 }
1165 }
1166#endif
1167}
1168
1169void init_irq_proc (void)
1170{
1171 /* create /proc/irq */
1172 root_irq_dir = proc_mkdir("irq", NULL);
1173}
1174