| John Crispin | dfec1a8 | 2011-05-06 00:10:00 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | *   This program is free software; you can redistribute it and/or modify it | 
|  | 3 | *   under the terms of the GNU General Public License version 2 as published | 
|  | 4 | *   by the Free Software Foundation. | 
|  | 5 | * | 
|  | 6 | *   This program is distributed in the hope that it will be useful, | 
|  | 7 | *   but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 8 | *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 9 | *   GNU General Public License for more details. | 
|  | 10 | * | 
|  | 11 | *   You should have received a copy of the GNU General Public License | 
|  | 12 | *   along with this program; if not, write to the Free Software | 
|  | 13 | *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | 
|  | 14 | * | 
|  | 15 | *   Copyright (C) 2011 John Crispin <blogic@openwrt.org> | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #include <linux/init.h> | 
|  | 19 | #include <linux/platform_device.h> | 
|  | 20 | #include <linux/io.h> | 
|  | 21 | #include <linux/dma-mapping.h> | 
|  | 22 |  | 
|  | 23 | #include <lantiq_soc.h> | 
|  | 24 | #include <xway_dma.h> | 
|  | 25 |  | 
|  | 26 | #define LTQ_DMA_CTRL		0x10 | 
|  | 27 | #define LTQ_DMA_CPOLL		0x14 | 
|  | 28 | #define LTQ_DMA_CS		0x18 | 
|  | 29 | #define LTQ_DMA_CCTRL		0x1C | 
|  | 30 | #define LTQ_DMA_CDBA		0x20 | 
|  | 31 | #define LTQ_DMA_CDLEN		0x24 | 
|  | 32 | #define LTQ_DMA_CIS		0x28 | 
|  | 33 | #define LTQ_DMA_CIE		0x2C | 
|  | 34 | #define LTQ_DMA_PS		0x40 | 
|  | 35 | #define LTQ_DMA_PCTRL		0x44 | 
|  | 36 | #define LTQ_DMA_IRNEN		0xf4 | 
|  | 37 |  | 
|  | 38 | #define DMA_DESCPT		BIT(3)		/* descriptor complete irq */ | 
|  | 39 | #define DMA_TX			BIT(8)		/* TX channel direction */ | 
|  | 40 | #define DMA_CHAN_ON		BIT(0)		/* channel on / off bit */ | 
|  | 41 | #define DMA_PDEN		BIT(6)		/* enable packet drop */ | 
|  | 42 | #define DMA_CHAN_RST		BIT(1)		/* channel on / off bit */ | 
|  | 43 | #define DMA_RESET		BIT(0)		/* channel on / off bit */ | 
|  | 44 | #define DMA_IRQ_ACK		0x7e		/* IRQ status register */ | 
|  | 45 | #define DMA_POLL		BIT(31)		/* turn on channel polling */ | 
|  | 46 | #define DMA_CLK_DIV4		BIT(6)		/* polling clock divider */ | 
|  | 47 | #define DMA_2W_BURST		BIT(1)		/* 2 word burst length */ | 
|  | 48 | #define DMA_MAX_CHANNEL		20		/* the soc has 20 channels */ | 
|  | 49 | #define DMA_ETOP_ENDIANESS	(0xf << 8) /* endianess swap etop channels */ | 
|  | 50 | #define DMA_WEIGHT	(BIT(17) | BIT(16))	/* default channel wheight */ | 
|  | 51 |  | 
|  | 52 | #define ltq_dma_r32(x)			ltq_r32(ltq_dma_membase + (x)) | 
|  | 53 | #define ltq_dma_w32(x, y)		ltq_w32(x, ltq_dma_membase + (y)) | 
|  | 54 | #define ltq_dma_w32_mask(x, y, z)	ltq_w32_mask(x, y, \ | 
|  | 55 | ltq_dma_membase + (z)) | 
|  | 56 |  | 
|  | 57 | static struct resource ltq_dma_resource = { | 
|  | 58 | .name	= "dma", | 
|  | 59 | .start	= LTQ_DMA_BASE_ADDR, | 
|  | 60 | .end	= LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1, | 
|  | 61 | .flags  = IORESOURCE_MEM, | 
|  | 62 | }; | 
|  | 63 |  | 
|  | 64 | static void __iomem *ltq_dma_membase; | 
|  | 65 |  | 
|  | 66 | void | 
|  | 67 | ltq_dma_enable_irq(struct ltq_dma_channel *ch) | 
|  | 68 | { | 
|  | 69 | unsigned long flags; | 
|  | 70 |  | 
|  | 71 | local_irq_save(flags); | 
|  | 72 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 
|  | 73 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | 
|  | 74 | local_irq_restore(flags); | 
|  | 75 | } | 
|  | 76 | EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); | 
|  | 77 |  | 
|  | 78 | void | 
|  | 79 | ltq_dma_disable_irq(struct ltq_dma_channel *ch) | 
|  | 80 | { | 
|  | 81 | unsigned long flags; | 
|  | 82 |  | 
|  | 83 | local_irq_save(flags); | 
|  | 84 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 
|  | 85 | ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); | 
|  | 86 | local_irq_restore(flags); | 
|  | 87 | } | 
|  | 88 | EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); | 
|  | 89 |  | 
|  | 90 | void | 
|  | 91 | ltq_dma_ack_irq(struct ltq_dma_channel *ch) | 
|  | 92 | { | 
|  | 93 | unsigned long flags; | 
|  | 94 |  | 
|  | 95 | local_irq_save(flags); | 
|  | 96 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 
|  | 97 | ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); | 
|  | 98 | local_irq_restore(flags); | 
|  | 99 | } | 
|  | 100 | EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); | 
|  | 101 |  | 
|  | 102 | void | 
|  | 103 | ltq_dma_open(struct ltq_dma_channel *ch) | 
|  | 104 | { | 
|  | 105 | unsigned long flag; | 
|  | 106 |  | 
|  | 107 | local_irq_save(flag); | 
|  | 108 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 
|  | 109 | ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); | 
|  | 110 | ltq_dma_enable_irq(ch); | 
|  | 111 | local_irq_restore(flag); | 
|  | 112 | } | 
|  | 113 | EXPORT_SYMBOL_GPL(ltq_dma_open); | 
|  | 114 |  | 
|  | 115 | void | 
|  | 116 | ltq_dma_close(struct ltq_dma_channel *ch) | 
|  | 117 | { | 
|  | 118 | unsigned long flag; | 
|  | 119 |  | 
|  | 120 | local_irq_save(flag); | 
|  | 121 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 
|  | 122 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | 
|  | 123 | ltq_dma_disable_irq(ch); | 
|  | 124 | local_irq_restore(flag); | 
|  | 125 | } | 
|  | 126 | EXPORT_SYMBOL_GPL(ltq_dma_close); | 
|  | 127 |  | 
|  | 128 | static void | 
|  | 129 | ltq_dma_alloc(struct ltq_dma_channel *ch) | 
|  | 130 | { | 
|  | 131 | unsigned long flags; | 
|  | 132 |  | 
|  | 133 | ch->desc = 0; | 
|  | 134 | ch->desc_base = dma_alloc_coherent(NULL, | 
|  | 135 | LTQ_DESC_NUM * LTQ_DESC_SIZE, | 
|  | 136 | &ch->phys, GFP_ATOMIC); | 
|  | 137 | memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); | 
|  | 138 |  | 
|  | 139 | local_irq_save(flags); | 
|  | 140 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 
|  | 141 | ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); | 
|  | 142 | ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); | 
|  | 143 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | 
|  | 144 | wmb(); | 
|  | 145 | ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); | 
|  | 146 | while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) | 
|  | 147 | ; | 
|  | 148 | local_irq_restore(flags); | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | void | 
|  | 152 | ltq_dma_alloc_tx(struct ltq_dma_channel *ch) | 
|  | 153 | { | 
|  | 154 | unsigned long flags; | 
|  | 155 |  | 
|  | 156 | ltq_dma_alloc(ch); | 
|  | 157 |  | 
|  | 158 | local_irq_save(flags); | 
|  | 159 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | 
|  | 160 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | 
|  | 161 | ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); | 
|  | 162 | local_irq_restore(flags); | 
|  | 163 | } | 
|  | 164 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); | 
|  | 165 |  | 
|  | 166 | void | 
|  | 167 | ltq_dma_alloc_rx(struct ltq_dma_channel *ch) | 
|  | 168 | { | 
|  | 169 | unsigned long flags; | 
|  | 170 |  | 
|  | 171 | ltq_dma_alloc(ch); | 
|  | 172 |  | 
|  | 173 | local_irq_save(flags); | 
|  | 174 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | 
|  | 175 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | 
|  | 176 | ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); | 
|  | 177 | local_irq_restore(flags); | 
|  | 178 | } | 
|  | 179 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); | 
|  | 180 |  | 
|  | 181 | void | 
|  | 182 | ltq_dma_free(struct ltq_dma_channel *ch) | 
|  | 183 | { | 
|  | 184 | if (!ch->desc_base) | 
|  | 185 | return; | 
|  | 186 | ltq_dma_close(ch); | 
|  | 187 | dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, | 
|  | 188 | ch->desc_base, ch->phys); | 
|  | 189 | } | 
|  | 190 | EXPORT_SYMBOL_GPL(ltq_dma_free); | 
|  | 191 |  | 
|  | 192 | void | 
|  | 193 | ltq_dma_init_port(int p) | 
|  | 194 | { | 
|  | 195 | ltq_dma_w32(p, LTQ_DMA_PS); | 
|  | 196 | switch (p) { | 
|  | 197 | case DMA_PORT_ETOP: | 
|  | 198 | /* | 
|  | 199 | * Tell the DMA engine to swap the endianess of data frames and | 
|  | 200 | * drop packets if the channel arbitration fails. | 
|  | 201 | */ | 
|  | 202 | ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN, | 
|  | 203 | LTQ_DMA_PCTRL); | 
|  | 204 | break; | 
|  | 205 |  | 
|  | 206 | case DMA_PORT_DEU: | 
|  | 207 | ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), | 
|  | 208 | LTQ_DMA_PCTRL); | 
|  | 209 | break; | 
|  | 210 |  | 
|  | 211 | default: | 
|  | 212 | break; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 | EXPORT_SYMBOL_GPL(ltq_dma_init_port); | 
|  | 216 |  | 
|  | 217 | int __init | 
|  | 218 | ltq_dma_init(void) | 
|  | 219 | { | 
|  | 220 | int i; | 
|  | 221 |  | 
|  | 222 | /* insert and request the memory region */ | 
|  | 223 | if (insert_resource(&iomem_resource, <q_dma_resource) < 0) | 
|  | 224 | panic("Failed to insert dma memory\n"); | 
|  | 225 |  | 
|  | 226 | if (request_mem_region(ltq_dma_resource.start, | 
|  | 227 | resource_size(<q_dma_resource), "dma") < 0) | 
|  | 228 | panic("Failed to request dma memory\n"); | 
|  | 229 |  | 
|  | 230 | /* remap dma register range */ | 
|  | 231 | ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, | 
|  | 232 | resource_size(<q_dma_resource)); | 
|  | 233 | if (!ltq_dma_membase) | 
|  | 234 | panic("Failed to remap dma memory\n"); | 
|  | 235 |  | 
|  | 236 | /* power up and reset the dma engine */ | 
|  | 237 | ltq_pmu_enable(PMU_DMA); | 
|  | 238 | ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); | 
|  | 239 |  | 
|  | 240 | /* disable all interrupts */ | 
|  | 241 | ltq_dma_w32(0, LTQ_DMA_IRNEN); | 
|  | 242 |  | 
|  | 243 | /* reset/configure each channel */ | 
|  | 244 | for (i = 0; i < DMA_MAX_CHANNEL; i++) { | 
|  | 245 | ltq_dma_w32(i, LTQ_DMA_CS); | 
|  | 246 | ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); | 
|  | 247 | ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); | 
|  | 248 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | 
|  | 249 | } | 
|  | 250 | return 0; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | postcore_initcall(ltq_dma_init); |