blob: 463d2c4d9441f9592ce6abf85d27b1e8c38694af [file] [log] [blame]
Manuel Lauss9bdcf332009-10-04 14:55:24 +02001/*
2 * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
3 *
4 * All Alchemy development boards (except, of course, the weird PB1000)
5 * have a few registers in a CPLD with standardised layout; they mostly
6 * only differ in base address.
7 * All registers are 16bits wide with 32bit spacing.
8 */
9
Manuel Lauss95a43792009-10-04 14:55:25 +020010#include <linux/interrupt.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020011#include <linux/module.h>
12#include <linux/spinlock.h>
David Howellsca4d3e672010-10-07 14:08:54 +010013#include <linux/irq.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020014#include <asm/addrspace.h>
15#include <asm/io.h>
16#include <asm/mach-db1x00/bcsr.h>
17
18static struct bcsr_reg {
19 void __iomem *raddr;
20 spinlock_t lock;
21} bcsr_regs[BCSR_CNT];
22
Manuel Lauss95a43792009-10-04 14:55:25 +020023static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
24static int bcsr_csc_base; /* linux-irq of first cascaded irq */
25
Manuel Lauss9bdcf332009-10-04 14:55:24 +020026void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
27{
28 int i;
29
30 bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
31 bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
32
Manuel Lauss95a43792009-10-04 14:55:25 +020033 bcsr_virt = (void __iomem *)bcsr1_phys;
34
Manuel Lauss9bdcf332009-10-04 14:55:24 +020035 for (i = 0; i < BCSR_CNT; i++) {
36 if (i >= BCSR_HEXLEDS)
37 bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
38 (0x04 * (i - BCSR_HEXLEDS));
39 else
40 bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
41 (0x04 * i);
42
43 spin_lock_init(&bcsr_regs[i].lock);
44 }
45}
46
47unsigned short bcsr_read(enum bcsr_id reg)
48{
49 unsigned short r;
50 unsigned long flags;
51
52 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
53 r = __raw_readw(bcsr_regs[reg].raddr);
54 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
55 return r;
56}
57EXPORT_SYMBOL_GPL(bcsr_read);
58
59void bcsr_write(enum bcsr_id reg, unsigned short val)
60{
61 unsigned long flags;
62
63 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
64 __raw_writew(val, bcsr_regs[reg].raddr);
65 wmb();
66 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
67}
68EXPORT_SYMBOL_GPL(bcsr_write);
69
70void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
71{
72 unsigned short r;
73 unsigned long flags;
74
75 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
76 r = __raw_readw(bcsr_regs[reg].raddr);
77 r &= ~clr;
78 r |= set;
79 __raw_writew(r, bcsr_regs[reg].raddr);
80 wmb();
81 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
82}
83EXPORT_SYMBOL_GPL(bcsr_mod);
Manuel Lauss95a43792009-10-04 14:55:25 +020084
85/*
86 * DB1200/PB1200 CPLD IRQ muxer
87 */
88static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
89{
90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
91
Manuel Laussdd0a0282011-08-12 08:28:35 +020092 disable_irq_nosync(irq);
93
Manuel Lauss95a43792009-10-04 14:55:25 +020094 for ( ; bisr; bisr &= bisr - 1)
95 generic_handle_irq(bcsr_csc_base + __ffs(bisr));
Manuel Laussdd0a0282011-08-12 08:28:35 +020096
97 enable_irq(irq);
Manuel Lauss95a43792009-10-04 14:55:25 +020098}
99
100/* NOTE: both the enable and mask bits must be cleared, otherwise the
101 * CPLD generates tons of spurious interrupts (at least on my DB1200).
102 * -- mlau
103 */
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000104static void bcsr_irq_mask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200105{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000106 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200107 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
108 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
109 wmb();
110}
111
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000112static void bcsr_irq_maskack(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200113{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000114 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200115 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
116 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
117 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
118 wmb();
119}
120
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000121static void bcsr_irq_unmask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200122{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000123 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200124 __raw_writew(v, bcsr_virt + BCSR_REG_INTSET);
125 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
126 wmb();
127}
128
129static struct irq_chip bcsr_irq_type = {
130 .name = "CPLD",
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000131 .irq_mask = bcsr_irq_mask,
132 .irq_mask_ack = bcsr_irq_maskack,
133 .irq_unmask = bcsr_irq_unmask,
Manuel Lauss95a43792009-10-04 14:55:25 +0200134};
135
136void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
137{
138 unsigned int irq;
139
140 /* mask & disable & ack all */
141 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTCLR);
142 __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
143 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
144 wmb();
145
146 bcsr_csc_base = csc_start;
147
148 for (irq = csc_start; irq <= csc_end; irq++)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200149 irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
150 handle_level_irq, "level");
Manuel Lauss95a43792009-10-04 14:55:25 +0200151
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200152 irq_set_chained_handler(hook_irq, bcsr_csc_handler);
Manuel Lauss95a43792009-10-04 14:55:25 +0200153}