blob: f91c43a7d5dc1ecf6d9a220704c9c0852d8c43b2 [file] [log] [blame]
Manuel Lauss9bdcf332009-10-04 14:55:24 +02001/*
2 * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
3 *
4 * All Alchemy development boards (except, of course, the weird PB1000)
5 * have a few registers in a CPLD with standardised layout; they mostly
6 * only differ in base address.
7 * All registers are 16bits wide with 32bit spacing.
8 */
9
Manuel Lauss95a43792009-10-04 14:55:25 +020010#include <linux/interrupt.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020011#include <linux/module.h>
12#include <linux/spinlock.h>
David Howellsca4d3e672010-10-07 14:08:54 +010013#include <linux/irq.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020014#include <asm/addrspace.h>
15#include <asm/io.h>
16#include <asm/mach-db1x00/bcsr.h>
17
18static struct bcsr_reg {
19 void __iomem *raddr;
20 spinlock_t lock;
21} bcsr_regs[BCSR_CNT];
22
Manuel Lauss95a43792009-10-04 14:55:25 +020023static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
24static int bcsr_csc_base; /* linux-irq of first cascaded irq */
25
Manuel Lauss9bdcf332009-10-04 14:55:24 +020026void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
27{
28 int i;
29
30 bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
31 bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
32
Manuel Lauss95a43792009-10-04 14:55:25 +020033 bcsr_virt = (void __iomem *)bcsr1_phys;
34
Manuel Lauss9bdcf332009-10-04 14:55:24 +020035 for (i = 0; i < BCSR_CNT; i++) {
36 if (i >= BCSR_HEXLEDS)
37 bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
38 (0x04 * (i - BCSR_HEXLEDS));
39 else
40 bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
41 (0x04 * i);
42
43 spin_lock_init(&bcsr_regs[i].lock);
44 }
45}
46
47unsigned short bcsr_read(enum bcsr_id reg)
48{
49 unsigned short r;
50 unsigned long flags;
51
52 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
53 r = __raw_readw(bcsr_regs[reg].raddr);
54 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
55 return r;
56}
57EXPORT_SYMBOL_GPL(bcsr_read);
58
59void bcsr_write(enum bcsr_id reg, unsigned short val)
60{
61 unsigned long flags;
62
63 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
64 __raw_writew(val, bcsr_regs[reg].raddr);
65 wmb();
66 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
67}
68EXPORT_SYMBOL_GPL(bcsr_write);
69
70void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
71{
72 unsigned short r;
73 unsigned long flags;
74
75 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
76 r = __raw_readw(bcsr_regs[reg].raddr);
77 r &= ~clr;
78 r |= set;
79 __raw_writew(r, bcsr_regs[reg].raddr);
80 wmb();
81 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
82}
83EXPORT_SYMBOL_GPL(bcsr_mod);
Manuel Lauss95a43792009-10-04 14:55:25 +020084
85/*
86 * DB1200/PB1200 CPLD IRQ muxer
87 */
88static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
89{
90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
91
92 for ( ; bisr; bisr &= bisr - 1)
93 generic_handle_irq(bcsr_csc_base + __ffs(bisr));
94}
95
96/* NOTE: both the enable and mask bits must be cleared, otherwise the
97 * CPLD generates tons of spurious interrupts (at least on my DB1200).
98 * -- mlau
99 */
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000100static void bcsr_irq_mask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200101{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000102 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200103 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
104 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
105 wmb();
106}
107
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000108static void bcsr_irq_maskack(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200109{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000110 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200111 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
112 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
113 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
114 wmb();
115}
116
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000117static void bcsr_irq_unmask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200118{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000119 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200120 __raw_writew(v, bcsr_virt + BCSR_REG_INTSET);
121 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
122 wmb();
123}
124
125static struct irq_chip bcsr_irq_type = {
126 .name = "CPLD",
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000127 .irq_mask = bcsr_irq_mask,
128 .irq_mask_ack = bcsr_irq_maskack,
129 .irq_unmask = bcsr_irq_unmask,
Manuel Lauss95a43792009-10-04 14:55:25 +0200130};
131
132void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
133{
134 unsigned int irq;
135
136 /* mask & disable & ack all */
137 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTCLR);
138 __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
139 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
140 wmb();
141
142 bcsr_csc_base = csc_start;
143
144 for (irq = csc_start; irq <= csc_end; irq++)
145 set_irq_chip_and_handler_name(irq, &bcsr_irq_type,
146 handle_level_irq, "level");
147
148 set_irq_chained_handler(hook_irq, bcsr_csc_handler);
149}