Manuel Lauss | 9bdcf33 | 2009-10-04 14:55:24 +0200 | [diff] [blame] | 1 | /* |
| 2 | * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction. |
| 3 | * |
| 4 | * All Alchemy development boards (except, of course, the weird PB1000) |
| 5 | * have a few registers in a CPLD with standardised layout; they mostly |
| 6 | * only differ in base address. |
| 7 | * All registers are 16bits wide with 32bit spacing. |
| 8 | */ |
| 9 | |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Manuel Lauss | 9bdcf33 | 2009-10-04 14:55:24 +0200 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/spinlock.h> |
David Howells | ca4d3e67 | 2010-10-07 14:08:54 +0100 | [diff] [blame] | 13 | #include <linux/irq.h> |
Manuel Lauss | 9bdcf33 | 2009-10-04 14:55:24 +0200 | [diff] [blame] | 14 | #include <asm/addrspace.h> |
| 15 | #include <asm/io.h> |
| 16 | #include <asm/mach-db1x00/bcsr.h> |
| 17 | |
| 18 | static struct bcsr_reg { |
| 19 | void __iomem *raddr; |
| 20 | spinlock_t lock; |
| 21 | } bcsr_regs[BCSR_CNT]; |
| 22 | |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 23 | static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */ |
| 24 | static int bcsr_csc_base; /* linux-irq of first cascaded irq */ |
| 25 | |
Manuel Lauss | 9bdcf33 | 2009-10-04 14:55:24 +0200 | [diff] [blame] | 26 | void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys) |
| 27 | { |
| 28 | int i; |
| 29 | |
| 30 | bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys)); |
| 31 | bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys)); |
| 32 | |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 33 | bcsr_virt = (void __iomem *)bcsr1_phys; |
| 34 | |
Manuel Lauss | 9bdcf33 | 2009-10-04 14:55:24 +0200 | [diff] [blame] | 35 | for (i = 0; i < BCSR_CNT; i++) { |
| 36 | if (i >= BCSR_HEXLEDS) |
| 37 | bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys + |
| 38 | (0x04 * (i - BCSR_HEXLEDS)); |
| 39 | else |
| 40 | bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys + |
| 41 | (0x04 * i); |
| 42 | |
| 43 | spin_lock_init(&bcsr_regs[i].lock); |
| 44 | } |
| 45 | } |
| 46 | |
| 47 | unsigned short bcsr_read(enum bcsr_id reg) |
| 48 | { |
| 49 | unsigned short r; |
| 50 | unsigned long flags; |
| 51 | |
| 52 | spin_lock_irqsave(&bcsr_regs[reg].lock, flags); |
| 53 | r = __raw_readw(bcsr_regs[reg].raddr); |
| 54 | spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags); |
| 55 | return r; |
| 56 | } |
| 57 | EXPORT_SYMBOL_GPL(bcsr_read); |
| 58 | |
| 59 | void bcsr_write(enum bcsr_id reg, unsigned short val) |
| 60 | { |
| 61 | unsigned long flags; |
| 62 | |
| 63 | spin_lock_irqsave(&bcsr_regs[reg].lock, flags); |
| 64 | __raw_writew(val, bcsr_regs[reg].raddr); |
| 65 | wmb(); |
| 66 | spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags); |
| 67 | } |
| 68 | EXPORT_SYMBOL_GPL(bcsr_write); |
| 69 | |
| 70 | void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set) |
| 71 | { |
| 72 | unsigned short r; |
| 73 | unsigned long flags; |
| 74 | |
| 75 | spin_lock_irqsave(&bcsr_regs[reg].lock, flags); |
| 76 | r = __raw_readw(bcsr_regs[reg].raddr); |
| 77 | r &= ~clr; |
| 78 | r |= set; |
| 79 | __raw_writew(r, bcsr_regs[reg].raddr); |
| 80 | wmb(); |
| 81 | spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags); |
| 82 | } |
| 83 | EXPORT_SYMBOL_GPL(bcsr_mod); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 84 | |
| 85 | /* |
| 86 | * DB1200/PB1200 CPLD IRQ muxer |
| 87 | */ |
| 88 | static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) |
| 89 | { |
| 90 | unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); |
| 91 | |
Manuel Lauss | dd0a028 | 2011-08-12 08:28:35 +0200 | [diff] [blame] | 92 | disable_irq_nosync(irq); |
| 93 | |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 94 | for ( ; bisr; bisr &= bisr - 1) |
| 95 | generic_handle_irq(bcsr_csc_base + __ffs(bisr)); |
Manuel Lauss | dd0a028 | 2011-08-12 08:28:35 +0200 | [diff] [blame] | 96 | |
| 97 | enable_irq(irq); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /* NOTE: both the enable and mask bits must be cleared, otherwise the |
| 101 | * CPLD generates tons of spurious interrupts (at least on my DB1200). |
| 102 | * -- mlau |
| 103 | */ |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 104 | static void bcsr_irq_mask(struct irq_data *d) |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 105 | { |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 106 | unsigned short v = 1 << (d->irq - bcsr_csc_base); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 107 | __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR); |
| 108 | __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR); |
| 109 | wmb(); |
| 110 | } |
| 111 | |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 112 | static void bcsr_irq_maskack(struct irq_data *d) |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 113 | { |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 114 | unsigned short v = 1 << (d->irq - bcsr_csc_base); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 115 | __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR); |
| 116 | __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR); |
| 117 | __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */ |
| 118 | wmb(); |
| 119 | } |
| 120 | |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 121 | static void bcsr_irq_unmask(struct irq_data *d) |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 122 | { |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 123 | unsigned short v = 1 << (d->irq - bcsr_csc_base); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 124 | __raw_writew(v, bcsr_virt + BCSR_REG_INTSET); |
| 125 | __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET); |
| 126 | wmb(); |
| 127 | } |
| 128 | |
| 129 | static struct irq_chip bcsr_irq_type = { |
| 130 | .name = "CPLD", |
Thomas Gleixner | d24c1a2 | 2011-03-23 21:08:44 +0000 | [diff] [blame] | 131 | .irq_mask = bcsr_irq_mask, |
| 132 | .irq_mask_ack = bcsr_irq_maskack, |
| 133 | .irq_unmask = bcsr_irq_unmask, |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 134 | }; |
| 135 | |
| 136 | void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq) |
| 137 | { |
| 138 | unsigned int irq; |
| 139 | |
| 140 | /* mask & disable & ack all */ |
| 141 | __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTCLR); |
| 142 | __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR); |
| 143 | __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT); |
| 144 | wmb(); |
| 145 | |
| 146 | bcsr_csc_base = csc_start; |
| 147 | |
| 148 | for (irq = csc_start; irq <= csc_end; irq++) |
Thomas Gleixner | e4ec798 | 2011-03-27 15:19:28 +0200 | [diff] [blame] | 149 | irq_set_chip_and_handler_name(irq, &bcsr_irq_type, |
| 150 | handle_level_irq, "level"); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 151 | |
Thomas Gleixner | e4ec798 | 2011-03-27 15:19:28 +0200 | [diff] [blame] | 152 | irq_set_chained_handler(hook_irq, bcsr_csc_handler); |
Manuel Lauss | 95a4379 | 2009-10-04 14:55:25 +0200 | [diff] [blame] | 153 | } |