| Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Intel IXP4xx Queue Manager driver for Linux | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify it | 
|  | 7 | * under the terms of version 2 of the GNU General Public License | 
|  | 8 | * as published by the Free Software Foundation. | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/ioport.h> | 
|  | 12 | #include <linux/interrupt.h> | 
|  | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/module.h> | 
| Russell King | a09e64f | 2008-08-05 16:14:15 +0100 | [diff] [blame] | 15 | #include <mach/qmgr.h> | 
| Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 16 |  | 
|  | 17 | #define DEBUG		0 | 
|  | 18 |  | 
|  | 19 | struct qmgr_regs __iomem *qmgr_regs; | 
|  | 20 | static struct resource *mem_res; | 
|  | 21 | static spinlock_t qmgr_lock; | 
|  | 22 | static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ | 
|  | 23 | static void (*irq_handlers[HALF_QUEUES])(void *pdev); | 
|  | 24 | static void *irq_pdevs[HALF_QUEUES]; | 
|  | 25 |  | 
|  | 26 | void qmgr_set_irq(unsigned int queue, int src, | 
|  | 27 | void (*handler)(void *pdev), void *pdev) | 
|  | 28 | { | 
|  | 29 | u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */ | 
|  | 30 | int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ | 
|  | 31 | unsigned long flags; | 
|  | 32 |  | 
|  | 33 | src &= 7; | 
|  | 34 | spin_lock_irqsave(&qmgr_lock, flags); | 
|  | 35 | __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg); | 
|  | 36 | irq_handlers[queue] = handler; | 
|  | 37 | irq_pdevs[queue] = pdev; | 
|  | 38 | spin_unlock_irqrestore(&qmgr_lock, flags); | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 |  | 
|  | 42 | static irqreturn_t qmgr_irq1(int irq, void *pdev) | 
|  | 43 | { | 
|  | 44 | int i; | 
|  | 45 | u32 val = __raw_readl(&qmgr_regs->irqstat[0]); | 
|  | 46 | __raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */ | 
|  | 47 |  | 
|  | 48 | for (i = 0; i < HALF_QUEUES; i++) | 
|  | 49 | if (val & (1 << i)) | 
|  | 50 | irq_handlers[i](irq_pdevs[i]); | 
|  | 51 |  | 
|  | 52 | return val ? IRQ_HANDLED : 0; | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 |  | 
|  | 56 | void qmgr_enable_irq(unsigned int queue) | 
|  | 57 | { | 
|  | 58 | unsigned long flags; | 
|  | 59 |  | 
|  | 60 | spin_lock_irqsave(&qmgr_lock, flags); | 
|  | 61 | __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue), | 
|  | 62 | &qmgr_regs->irqen[0]); | 
|  | 63 | spin_unlock_irqrestore(&qmgr_lock, flags); | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | void qmgr_disable_irq(unsigned int queue) | 
|  | 67 | { | 
|  | 68 | unsigned long flags; | 
|  | 69 |  | 
|  | 70 | spin_lock_irqsave(&qmgr_lock, flags); | 
|  | 71 | __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue), | 
|  | 72 | &qmgr_regs->irqen[0]); | 
|  | 73 | spin_unlock_irqrestore(&qmgr_lock, flags); | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | static inline void shift_mask(u32 *mask) | 
|  | 77 | { | 
|  | 78 | mask[3] = mask[3] << 1 | mask[2] >> 31; | 
|  | 79 | mask[2] = mask[2] << 1 | mask[1] >> 31; | 
|  | 80 | mask[1] = mask[1] << 1 | mask[0] >> 31; | 
|  | 81 | mask[0] <<= 1; | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, | 
|  | 85 | unsigned int nearly_empty_watermark, | 
|  | 86 | unsigned int nearly_full_watermark) | 
|  | 87 | { | 
|  | 88 | u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ | 
|  | 89 | int err; | 
|  | 90 |  | 
|  | 91 | if (queue >= HALF_QUEUES) | 
|  | 92 | return -ERANGE; | 
|  | 93 |  | 
|  | 94 | if ((nearly_empty_watermark | nearly_full_watermark) & ~7) | 
|  | 95 | return -EINVAL; | 
|  | 96 |  | 
|  | 97 | switch (len) { | 
|  | 98 | case  16: | 
|  | 99 | cfg = 0 << 24; | 
|  | 100 | mask[0] = 0x1; | 
|  | 101 | break; | 
|  | 102 | case  32: | 
|  | 103 | cfg = 1 << 24; | 
|  | 104 | mask[0] = 0x3; | 
|  | 105 | break; | 
|  | 106 | case  64: | 
|  | 107 | cfg = 2 << 24; | 
|  | 108 | mask[0] = 0xF; | 
|  | 109 | break; | 
|  | 110 | case 128: | 
|  | 111 | cfg = 3 << 24; | 
|  | 112 | mask[0] = 0xFF; | 
|  | 113 | break; | 
|  | 114 | default: | 
|  | 115 | return -EINVAL; | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | cfg |= nearly_empty_watermark << 26; | 
|  | 119 | cfg |= nearly_full_watermark << 29; | 
|  | 120 | len /= 16;		/* in 16-dwords: 1, 2, 4 or 8 */ | 
|  | 121 | mask[1] = mask[2] = mask[3] = 0; | 
|  | 122 |  | 
|  | 123 | if (!try_module_get(THIS_MODULE)) | 
|  | 124 | return -ENODEV; | 
|  | 125 |  | 
|  | 126 | spin_lock_irq(&qmgr_lock); | 
|  | 127 | if (__raw_readl(&qmgr_regs->sram[queue])) { | 
|  | 128 | err = -EBUSY; | 
|  | 129 | goto err; | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | while (1) { | 
|  | 133 | if (!(used_sram_bitmap[0] & mask[0]) && | 
|  | 134 | !(used_sram_bitmap[1] & mask[1]) && | 
|  | 135 | !(used_sram_bitmap[2] & mask[2]) && | 
|  | 136 | !(used_sram_bitmap[3] & mask[3])) | 
|  | 137 | break; /* found free space */ | 
|  | 138 |  | 
|  | 139 | addr++; | 
|  | 140 | shift_mask(mask); | 
|  | 141 | if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { | 
|  | 142 | printk(KERN_ERR "qmgr: no free SRAM space for" | 
|  | 143 | " queue %i\n", queue); | 
|  | 144 | err = -ENOMEM; | 
|  | 145 | goto err; | 
|  | 146 | } | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | used_sram_bitmap[0] |= mask[0]; | 
|  | 150 | used_sram_bitmap[1] |= mask[1]; | 
|  | 151 | used_sram_bitmap[2] |= mask[2]; | 
|  | 152 | used_sram_bitmap[3] |= mask[3]; | 
|  | 153 | __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); | 
|  | 154 | spin_unlock_irq(&qmgr_lock); | 
|  | 155 |  | 
|  | 156 | #if DEBUG | 
|  | 157 | printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n", | 
|  | 158 | queue, addr); | 
|  | 159 | #endif | 
|  | 160 | return 0; | 
|  | 161 |  | 
|  | 162 | err: | 
|  | 163 | spin_unlock_irq(&qmgr_lock); | 
|  | 164 | module_put(THIS_MODULE); | 
|  | 165 | return err; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | void qmgr_release_queue(unsigned int queue) | 
|  | 169 | { | 
|  | 170 | u32 cfg, addr, mask[4]; | 
|  | 171 |  | 
|  | 172 | BUG_ON(queue >= HALF_QUEUES); /* not in valid range */ | 
|  | 173 |  | 
|  | 174 | spin_lock_irq(&qmgr_lock); | 
|  | 175 | cfg = __raw_readl(&qmgr_regs->sram[queue]); | 
|  | 176 | addr = (cfg >> 14) & 0xFF; | 
|  | 177 |  | 
|  | 178 | BUG_ON(!addr);		/* not requested */ | 
|  | 179 |  | 
|  | 180 | switch ((cfg >> 24) & 3) { | 
|  | 181 | case 0: mask[0] = 0x1; break; | 
|  | 182 | case 1: mask[0] = 0x3; break; | 
|  | 183 | case 2: mask[0] = 0xF; break; | 
|  | 184 | case 3: mask[0] = 0xFF; break; | 
|  | 185 | } | 
|  | 186 |  | 
| Krzysztof Halasa | dac2f83 | 2008-04-20 19:06:39 +0200 | [diff] [blame] | 187 | mask[1] = mask[2] = mask[3] = 0; | 
|  | 188 |  | 
| Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 189 | while (addr--) | 
|  | 190 | shift_mask(mask); | 
|  | 191 |  | 
|  | 192 | __raw_writel(0, &qmgr_regs->sram[queue]); | 
|  | 193 |  | 
|  | 194 | used_sram_bitmap[0] &= ~mask[0]; | 
|  | 195 | used_sram_bitmap[1] &= ~mask[1]; | 
|  | 196 | used_sram_bitmap[2] &= ~mask[2]; | 
|  | 197 | used_sram_bitmap[3] &= ~mask[3]; | 
|  | 198 | irq_handlers[queue] = NULL; /* catch IRQ bugs */ | 
|  | 199 | spin_unlock_irq(&qmgr_lock); | 
|  | 200 |  | 
|  | 201 | module_put(THIS_MODULE); | 
|  | 202 | #if DEBUG | 
|  | 203 | printk(KERN_DEBUG "qmgr: released queue %i\n", queue); | 
|  | 204 | #endif | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | static int qmgr_init(void) | 
|  | 208 | { | 
|  | 209 | int i, err; | 
|  | 210 | mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS, | 
|  | 211 | IXP4XX_QMGR_REGION_SIZE, | 
|  | 212 | "IXP4xx Queue Manager"); | 
|  | 213 | if (mem_res == NULL) | 
|  | 214 | return -EBUSY; | 
|  | 215 |  | 
|  | 216 | qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); | 
|  | 217 | if (qmgr_regs == NULL) { | 
|  | 218 | err = -ENOMEM; | 
|  | 219 | goto error_map; | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | /* reset qmgr registers */ | 
|  | 223 | for (i = 0; i < 4; i++) { | 
|  | 224 | __raw_writel(0x33333333, &qmgr_regs->stat1[i]); | 
|  | 225 | __raw_writel(0, &qmgr_regs->irqsrc[i]); | 
|  | 226 | } | 
|  | 227 | for (i = 0; i < 2; i++) { | 
|  | 228 | __raw_writel(0, &qmgr_regs->stat2[i]); | 
|  | 229 | __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ | 
|  | 230 | __raw_writel(0, &qmgr_regs->irqen[i]); | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 | for (i = 0; i < QUEUES; i++) | 
|  | 234 | __raw_writel(0, &qmgr_regs->sram[i]); | 
|  | 235 |  | 
|  | 236 | err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0, | 
|  | 237 | "IXP4xx Queue Manager", NULL); | 
|  | 238 | if (err) { | 
|  | 239 | printk(KERN_ERR "qmgr: failed to request IRQ%i\n", | 
|  | 240 | IRQ_IXP4XX_QM1); | 
|  | 241 | goto error_irq; | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ | 
|  | 245 | spin_lock_init(&qmgr_lock); | 
|  | 246 |  | 
|  | 247 | printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); | 
|  | 248 | return 0; | 
|  | 249 |  | 
|  | 250 | error_irq: | 
|  | 251 | iounmap(qmgr_regs); | 
|  | 252 | error_map: | 
|  | 253 | release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); | 
|  | 254 | return err; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | static void qmgr_remove(void) | 
|  | 258 | { | 
|  | 259 | free_irq(IRQ_IXP4XX_QM1, NULL); | 
|  | 260 | synchronize_irq(IRQ_IXP4XX_QM1); | 
|  | 261 | iounmap(qmgr_regs); | 
|  | 262 | release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 | module_init(qmgr_init); | 
|  | 266 | module_exit(qmgr_remove); | 
|  | 267 |  | 
|  | 268 | MODULE_LICENSE("GPL v2"); | 
|  | 269 | MODULE_AUTHOR("Krzysztof Halasa"); | 
|  | 270 |  | 
|  | 271 | EXPORT_SYMBOL(qmgr_regs); | 
|  | 272 | EXPORT_SYMBOL(qmgr_set_irq); | 
|  | 273 | EXPORT_SYMBOL(qmgr_enable_irq); | 
|  | 274 | EXPORT_SYMBOL(qmgr_disable_irq); | 
|  | 275 | EXPORT_SYMBOL(qmgr_request_queue); | 
|  | 276 | EXPORT_SYMBOL(qmgr_release_queue); |