blob: f2039ef2c293615470481733747b7e198d797d75 [file] [log] [blame]
Manuel Lauss9bdcf332009-10-04 14:55:24 +02001/*
2 * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
3 *
4 * All Alchemy development boards (except, of course, the weird PB1000)
5 * have a few registers in a CPLD with standardised layout; they mostly
6 * only differ in base address.
7 * All registers are 16bits wide with 32bit spacing.
8 */
9
Manuel Lauss95a43792009-10-04 14:55:25 +020010#include <linux/interrupt.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020011#include <linux/module.h>
12#include <linux/spinlock.h>
David Howellsca4d3e672010-10-07 14:08:54 +010013#include <linux/irq.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020014#include <asm/addrspace.h>
15#include <asm/io.h>
16#include <asm/mach-db1x00/bcsr.h>
17
18static struct bcsr_reg {
19 void __iomem *raddr;
20 spinlock_t lock;
21} bcsr_regs[BCSR_CNT];
22
Manuel Lauss95a43792009-10-04 14:55:25 +020023static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
24static int bcsr_csc_base; /* linux-irq of first cascaded irq */
25
Manuel Lauss9bdcf332009-10-04 14:55:24 +020026void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
27{
28 int i;
29
30 bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
31 bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
32
Manuel Lauss95a43792009-10-04 14:55:25 +020033 bcsr_virt = (void __iomem *)bcsr1_phys;
34
Manuel Lauss9bdcf332009-10-04 14:55:24 +020035 for (i = 0; i < BCSR_CNT; i++) {
36 if (i >= BCSR_HEXLEDS)
37 bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
38 (0x04 * (i - BCSR_HEXLEDS));
39 else
40 bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
41 (0x04 * i);
42
43 spin_lock_init(&bcsr_regs[i].lock);
44 }
45}
46
47unsigned short bcsr_read(enum bcsr_id reg)
48{
49 unsigned short r;
50 unsigned long flags;
51
52 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
53 r = __raw_readw(bcsr_regs[reg].raddr);
54 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
55 return r;
56}
57EXPORT_SYMBOL_GPL(bcsr_read);
58
59void bcsr_write(enum bcsr_id reg, unsigned short val)
60{
61 unsigned long flags;
62
63 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
64 __raw_writew(val, bcsr_regs[reg].raddr);
65 wmb();
66 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
67}
68EXPORT_SYMBOL_GPL(bcsr_write);
69
70void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
71{
72 unsigned short r;
73 unsigned long flags;
74
75 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
76 r = __raw_readw(bcsr_regs[reg].raddr);
77 r &= ~clr;
78 r |= set;
79 __raw_writew(r, bcsr_regs[reg].raddr);
80 wmb();
81 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
82}
83EXPORT_SYMBOL_GPL(bcsr_mod);
Manuel Lauss95a43792009-10-04 14:55:25 +020084
85/*
86 * DB1200/PB1200 CPLD IRQ muxer
87 */
88static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
89{
90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
91
Manuel Laussdd0a0282011-08-12 08:28:35 +020092 disable_irq_nosync(irq);
Manuel Lauss6c2be5c2012-01-21 18:13:15 +010093 generic_handle_irq(bcsr_csc_base + __ffs(bisr));
Manuel Laussdd0a0282011-08-12 08:28:35 +020094 enable_irq(irq);
Manuel Lauss95a43792009-10-04 14:55:25 +020095}
96
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +000097static void bcsr_irq_mask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +020098{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +000099 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200100 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
101 wmb();
102}
103
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000104static void bcsr_irq_maskack(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200105{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000106 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200107 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
108 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
109 wmb();
110}
111
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000112static void bcsr_irq_unmask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200113{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000114 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200115 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
116 wmb();
117}
118
119static struct irq_chip bcsr_irq_type = {
120 .name = "CPLD",
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000121 .irq_mask = bcsr_irq_mask,
122 .irq_mask_ack = bcsr_irq_maskack,
123 .irq_unmask = bcsr_irq_unmask,
Manuel Lauss95a43792009-10-04 14:55:25 +0200124};
125
126void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
127{
128 unsigned int irq;
129
Manuel Laussfb469f02011-11-01 20:03:29 +0100130 /* mask & enable & ack all */
Manuel Lauss95a43792009-10-04 14:55:25 +0200131 __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
Manuel Laussfb469f02011-11-01 20:03:29 +0100132 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET);
Manuel Lauss95a43792009-10-04 14:55:25 +0200133 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
134 wmb();
135
136 bcsr_csc_base = csc_start;
137
138 for (irq = csc_start; irq <= csc_end; irq++)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200139 irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
140 handle_level_irq, "level");
Manuel Lauss95a43792009-10-04 14:55:25 +0200141
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200142 irq_set_chained_handler(hook_irq, bcsr_csc_handler);
Manuel Lauss95a43792009-10-04 14:55:25 +0200143}