| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/drivers/ide/arm/icside.c | 
|  | 3 | * | 
|  | 4 | * Copyright (c) 1996-2004 Russell King. | 
|  | 5 | * | 
|  | 6 | * Please note that this platform does not support 32-bit IDE IO. | 
|  | 7 | */ | 
|  | 8 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/string.h> | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/ioport.h> | 
|  | 12 | #include <linux/slab.h> | 
|  | 13 | #include <linux/blkdev.h> | 
|  | 14 | #include <linux/errno.h> | 
|  | 15 | #include <linux/hdreg.h> | 
|  | 16 | #include <linux/ide.h> | 
|  | 17 | #include <linux/dma-mapping.h> | 
|  | 18 | #include <linux/device.h> | 
|  | 19 | #include <linux/init.h> | 
|  | 20 | #include <linux/scatterlist.h> | 
|  | 21 |  | 
|  | 22 | #include <asm/dma.h> | 
|  | 23 | #include <asm/ecard.h> | 
|  | 24 | #include <asm/io.h> | 
|  | 25 |  | 
|  | 26 | #define ICS_IDENT_OFFSET		0x2280 | 
|  | 27 |  | 
|  | 28 | #define ICS_ARCIN_V5_INTRSTAT		0x0000 | 
|  | 29 | #define ICS_ARCIN_V5_INTROFFSET		0x0004 | 
|  | 30 | #define ICS_ARCIN_V5_IDEOFFSET		0x2800 | 
|  | 31 | #define ICS_ARCIN_V5_IDEALTOFFSET	0x2b80 | 
|  | 32 | #define ICS_ARCIN_V5_IDESTEPPING	6 | 
|  | 33 |  | 
|  | 34 | #define ICS_ARCIN_V6_IDEOFFSET_1	0x2000 | 
|  | 35 | #define ICS_ARCIN_V6_INTROFFSET_1	0x2200 | 
|  | 36 | #define ICS_ARCIN_V6_INTRSTAT_1		0x2290 | 
|  | 37 | #define ICS_ARCIN_V6_IDEALTOFFSET_1	0x2380 | 
|  | 38 | #define ICS_ARCIN_V6_IDEOFFSET_2	0x3000 | 
|  | 39 | #define ICS_ARCIN_V6_INTROFFSET_2	0x3200 | 
|  | 40 | #define ICS_ARCIN_V6_INTRSTAT_2		0x3290 | 
|  | 41 | #define ICS_ARCIN_V6_IDEALTOFFSET_2	0x3380 | 
|  | 42 | #define ICS_ARCIN_V6_IDESTEPPING	6 | 
|  | 43 |  | 
|  | 44 | struct cardinfo { | 
|  | 45 | unsigned int dataoffset; | 
|  | 46 | unsigned int ctrloffset; | 
|  | 47 | unsigned int stepping; | 
|  | 48 | }; | 
|  | 49 |  | 
|  | 50 | static struct cardinfo icside_cardinfo_v5 = { | 
|  | 51 | .dataoffset	= ICS_ARCIN_V5_IDEOFFSET, | 
|  | 52 | .ctrloffset	= ICS_ARCIN_V5_IDEALTOFFSET, | 
|  | 53 | .stepping	= ICS_ARCIN_V5_IDESTEPPING, | 
|  | 54 | }; | 
|  | 55 |  | 
|  | 56 | static struct cardinfo icside_cardinfo_v6_1 = { | 
|  | 57 | .dataoffset	= ICS_ARCIN_V6_IDEOFFSET_1, | 
|  | 58 | .ctrloffset	= ICS_ARCIN_V6_IDEALTOFFSET_1, | 
|  | 59 | .stepping	= ICS_ARCIN_V6_IDESTEPPING, | 
|  | 60 | }; | 
|  | 61 |  | 
|  | 62 | static struct cardinfo icside_cardinfo_v6_2 = { | 
|  | 63 | .dataoffset	= ICS_ARCIN_V6_IDEOFFSET_2, | 
|  | 64 | .ctrloffset	= ICS_ARCIN_V6_IDEALTOFFSET_2, | 
|  | 65 | .stepping	= ICS_ARCIN_V6_IDESTEPPING, | 
|  | 66 | }; | 
|  | 67 |  | 
|  | 68 | struct icside_state { | 
|  | 69 | unsigned int channel; | 
|  | 70 | unsigned int enabled; | 
|  | 71 | void __iomem *irq_port; | 
|  | 72 | void __iomem *ioc_base; | 
|  | 73 | unsigned int type; | 
|  | 74 | /* parent device... until the IDE core gets one of its own */ | 
|  | 75 | struct device *dev; | 
|  | 76 | ide_hwif_t *hwif[2]; | 
|  | 77 | }; | 
|  | 78 |  | 
|  | 79 | #define ICS_TYPE_A3IN	0 | 
|  | 80 | #define ICS_TYPE_A3USER	1 | 
|  | 81 | #define ICS_TYPE_V6	3 | 
|  | 82 | #define ICS_TYPE_V5	15 | 
|  | 83 | #define ICS_TYPE_NOTYPE	((unsigned int)-1) | 
|  | 84 |  | 
|  | 85 | /* ---------------- Version 5 PCB Support Functions --------------------- */ | 
|  | 86 | /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) | 
|  | 87 | * Purpose  : enable interrupts from card | 
|  | 88 | */ | 
|  | 89 | static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) | 
|  | 90 | { | 
|  | 91 | struct icside_state *state = ec->irq_data; | 
|  | 92 |  | 
|  | 93 | writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) | 
|  | 97 | * Purpose  : disable interrupts from card | 
|  | 98 | */ | 
|  | 99 | static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) | 
|  | 100 | { | 
|  | 101 | struct icside_state *state = ec->irq_data; | 
|  | 102 |  | 
|  | 103 | readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static const expansioncard_ops_t icside_ops_arcin_v5 = { | 
|  | 107 | .irqenable	= icside_irqenable_arcin_v5, | 
|  | 108 | .irqdisable	= icside_irqdisable_arcin_v5, | 
|  | 109 | }; | 
|  | 110 |  | 
|  | 111 |  | 
|  | 112 | /* ---------------- Version 6 PCB Support Functions --------------------- */ | 
|  | 113 | /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) | 
|  | 114 | * Purpose  : enable interrupts from card | 
|  | 115 | */ | 
|  | 116 | static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) | 
|  | 117 | { | 
|  | 118 | struct icside_state *state = ec->irq_data; | 
|  | 119 | void __iomem *base = state->irq_port; | 
|  | 120 |  | 
|  | 121 | state->enabled = 1; | 
|  | 122 |  | 
|  | 123 | switch (state->channel) { | 
|  | 124 | case 0: | 
|  | 125 | writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); | 
|  | 126 | readb(base + ICS_ARCIN_V6_INTROFFSET_2); | 
|  | 127 | break; | 
|  | 128 | case 1: | 
|  | 129 | writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); | 
|  | 130 | readb(base + ICS_ARCIN_V6_INTROFFSET_1); | 
|  | 131 | break; | 
|  | 132 | } | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) | 
|  | 136 | * Purpose  : disable interrupts from card | 
|  | 137 | */ | 
|  | 138 | static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) | 
|  | 139 | { | 
|  | 140 | struct icside_state *state = ec->irq_data; | 
|  | 141 |  | 
|  | 142 | state->enabled = 0; | 
|  | 143 |  | 
|  | 144 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); | 
|  | 145 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /* Prototype: icside_irqprobe(struct expansion_card *ec) | 
|  | 149 | * Purpose  : detect an active interrupt from card | 
|  | 150 | */ | 
|  | 151 | static int icside_irqpending_arcin_v6(struct expansion_card *ec) | 
|  | 152 | { | 
|  | 153 | struct icside_state *state = ec->irq_data; | 
|  | 154 |  | 
|  | 155 | return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || | 
|  | 156 | readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | static const expansioncard_ops_t icside_ops_arcin_v6 = { | 
|  | 160 | .irqenable	= icside_irqenable_arcin_v6, | 
|  | 161 | .irqdisable	= icside_irqdisable_arcin_v6, | 
|  | 162 | .irqpending	= icside_irqpending_arcin_v6, | 
|  | 163 | }; | 
|  | 164 |  | 
|  | 165 | /* | 
|  | 166 | * Handle routing of interrupts.  This is called before | 
|  | 167 | * we write the command to the drive. | 
|  | 168 | */ | 
|  | 169 | static void icside_maskproc(ide_drive_t *drive, int mask) | 
|  | 170 | { | 
|  | 171 | ide_hwif_t *hwif = HWIF(drive); | 
|  | 172 | struct icside_state *state = hwif->hwif_data; | 
|  | 173 | unsigned long flags; | 
|  | 174 |  | 
|  | 175 | local_irq_save(flags); | 
|  | 176 |  | 
|  | 177 | state->channel = hwif->channel; | 
|  | 178 |  | 
|  | 179 | if (state->enabled && !mask) { | 
|  | 180 | switch (hwif->channel) { | 
|  | 181 | case 0: | 
|  | 182 | writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); | 
|  | 183 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); | 
|  | 184 | break; | 
|  | 185 | case 1: | 
|  | 186 | writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); | 
|  | 187 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); | 
|  | 188 | break; | 
|  | 189 | } | 
|  | 190 | } else { | 
|  | 191 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); | 
|  | 192 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | local_irq_restore(flags); | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | #ifdef CONFIG_BLK_DEV_IDEDMA_ICS | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | /* | 
|  | 200 | * SG-DMA support. | 
|  | 201 | * | 
|  | 202 | * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. | 
|  | 203 | * There is only one DMA controller per card, which means that only | 
|  | 204 | * one drive can be accessed at one time.  NOTE! We do not enforce that | 
|  | 205 | * here, but we rely on the main IDE driver spotting that both | 
|  | 206 | * interfaces use the same IRQ, which should guarantee this. | 
|  | 207 | */ | 
|  | 208 |  | 
|  | 209 | static void icside_build_sglist(ide_drive_t *drive, struct request *rq) | 
|  | 210 | { | 
|  | 211 | ide_hwif_t *hwif = drive->hwif; | 
|  | 212 | struct icside_state *state = hwif->hwif_data; | 
|  | 213 | struct scatterlist *sg = hwif->sg_table; | 
|  | 214 |  | 
|  | 215 | ide_map_sg(drive, rq); | 
|  | 216 |  | 
|  | 217 | if (rq_data_dir(rq) == READ) | 
|  | 218 | hwif->sg_dma_direction = DMA_FROM_DEVICE; | 
|  | 219 | else | 
|  | 220 | hwif->sg_dma_direction = DMA_TO_DEVICE; | 
|  | 221 |  | 
|  | 222 | hwif->sg_nents = dma_map_sg(state->dev, sg, hwif->sg_nents, | 
|  | 223 | hwif->sg_dma_direction); | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | /* | 
|  | 227 | * Configure the IOMD to give the appropriate timings for the transfer | 
|  | 228 | * mode being requested.  We take the advice of the ATA standards, and | 
|  | 229 | * calculate the cycle time based on the transfer mode, and the EIDE | 
|  | 230 | * MW DMA specs that the drive provides in the IDENTIFY command. | 
|  | 231 | * | 
|  | 232 | * We have the following IOMD DMA modes to choose from: | 
|  | 233 | * | 
|  | 234 | *	Type	Active		Recovery	Cycle | 
|  | 235 | *	A	250 (250)	312 (550)	562 (800) | 
|  | 236 | *	B	187		250		437 | 
|  | 237 | *	C	125 (125)	125 (375)	250 (500) | 
|  | 238 | *	D	62		125		187 | 
|  | 239 | * | 
|  | 240 | * (figures in brackets are actual measured timings) | 
|  | 241 | * | 
|  | 242 | * However, we also need to take care of the read/write active and | 
|  | 243 | * recovery timings: | 
|  | 244 | * | 
|  | 245 | *			Read	Write | 
|  | 246 | *  	Mode	Active	-- Recovery --	Cycle	IOMD type | 
|  | 247 | *	MW0	215	50	215	480	A | 
|  | 248 | *	MW1	80	50	50	150	C | 
|  | 249 | *	MW2	70	25	25	120	C | 
|  | 250 | */ | 
|  | 251 | static int icside_set_speed(ide_drive_t *drive, u8 xfer_mode) | 
|  | 252 | { | 
|  | 253 | int on = 0, cycle_time = 0, use_dma_info = 0; | 
|  | 254 |  | 
|  | 255 | /* | 
|  | 256 | * Limit the transfer speed to MW_DMA_2. | 
|  | 257 | */ | 
|  | 258 | if (xfer_mode > XFER_MW_DMA_2) | 
|  | 259 | xfer_mode = XFER_MW_DMA_2; | 
|  | 260 |  | 
|  | 261 | switch (xfer_mode) { | 
|  | 262 | case XFER_MW_DMA_2: | 
|  | 263 | cycle_time = 250; | 
|  | 264 | use_dma_info = 1; | 
|  | 265 | break; | 
|  | 266 |  | 
|  | 267 | case XFER_MW_DMA_1: | 
|  | 268 | cycle_time = 250; | 
|  | 269 | use_dma_info = 1; | 
|  | 270 | break; | 
|  | 271 |  | 
|  | 272 | case XFER_MW_DMA_0: | 
|  | 273 | cycle_time = 480; | 
|  | 274 | break; | 
|  | 275 |  | 
|  | 276 | case XFER_SW_DMA_2: | 
|  | 277 | case XFER_SW_DMA_1: | 
|  | 278 | case XFER_SW_DMA_0: | 
|  | 279 | cycle_time = 480; | 
|  | 280 | break; | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | /* | 
|  | 284 | * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should | 
|  | 285 | * take care to note the values in the ID... | 
|  | 286 | */ | 
|  | 287 | if (use_dma_info && drive->id->eide_dma_time > cycle_time) | 
|  | 288 | cycle_time = drive->id->eide_dma_time; | 
|  | 289 |  | 
|  | 290 | drive->drive_data = cycle_time; | 
|  | 291 |  | 
|  | 292 | if (cycle_time && ide_config_drive_speed(drive, xfer_mode) == 0) | 
|  | 293 | on = 1; | 
|  | 294 | else | 
|  | 295 | drive->drive_data = 480; | 
|  | 296 |  | 
|  | 297 | printk("%s: %s selected (peak %dMB/s)\n", drive->name, | 
|  | 298 | ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); | 
|  | 299 |  | 
|  | 300 | drive->current_speed = xfer_mode; | 
|  | 301 |  | 
|  | 302 | return on; | 
|  | 303 | } | 
|  | 304 |  | 
| Bartlomiej Zolnierkiewicz | 7469aaf | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 305 | static void icside_dma_host_off(ide_drive_t *drive) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | } | 
|  | 308 |  | 
| Bartlomiej Zolnierkiewicz | 7469aaf | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 309 | static void icside_dma_off_quietly(ide_drive_t *drive) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | { | 
|  | 311 | drive->using_dma = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | } | 
|  | 313 |  | 
| Bartlomiej Zolnierkiewicz | ccf3528 | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 314 | static void icside_dma_host_on(ide_drive_t *drive) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } | 
|  | 317 |  | 
|  | 318 | static int icside_dma_on(ide_drive_t *drive) | 
|  | 319 | { | 
|  | 320 | drive->using_dma = 1; | 
| Bartlomiej Zolnierkiewicz | ccf3528 | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 321 |  | 
|  | 322 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | } | 
|  | 324 |  | 
|  | 325 | static int icside_dma_check(ide_drive_t *drive) | 
|  | 326 | { | 
|  | 327 | struct hd_driveid *id = drive->id; | 
|  | 328 | ide_hwif_t *hwif = HWIF(drive); | 
|  | 329 | int xfer_mode = XFER_PIO_2; | 
|  | 330 | int on; | 
|  | 331 |  | 
|  | 332 | if (!(id->capability & 1) || !hwif->autodma) | 
|  | 333 | goto out; | 
|  | 334 |  | 
|  | 335 | /* | 
|  | 336 | * Consult the list of known "bad" drives | 
|  | 337 | */ | 
|  | 338 | if (__ide_dma_bad_drive(drive)) | 
|  | 339 | goto out; | 
|  | 340 |  | 
|  | 341 | /* | 
|  | 342 | * Enable DMA on any drive that has multiword DMA | 
|  | 343 | */ | 
|  | 344 | if (id->field_valid & 2) { | 
|  | 345 | xfer_mode = ide_dma_speed(drive, 0); | 
|  | 346 | goto out; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | /* | 
|  | 350 | * Consult the list of known "good" drives | 
|  | 351 | */ | 
|  | 352 | if (__ide_dma_good_drive(drive)) { | 
|  | 353 | if (id->eide_dma_time > 150) | 
|  | 354 | goto out; | 
|  | 355 | xfer_mode = XFER_MW_DMA_1; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | out: | 
|  | 359 | on = icside_set_speed(drive, xfer_mode); | 
|  | 360 |  | 
| Bartlomiej Zolnierkiewicz | 3608b5d | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 361 | return on ? 0 : -1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } | 
|  | 363 |  | 
|  | 364 | static int icside_dma_end(ide_drive_t *drive) | 
|  | 365 | { | 
|  | 366 | ide_hwif_t *hwif = HWIF(drive); | 
|  | 367 | struct icside_state *state = hwif->hwif_data; | 
|  | 368 |  | 
|  | 369 | drive->waiting_for_dma = 0; | 
|  | 370 |  | 
|  | 371 | disable_dma(hwif->hw.dma); | 
|  | 372 |  | 
|  | 373 | /* Teardown mappings after DMA has completed. */ | 
|  | 374 | dma_unmap_sg(state->dev, hwif->sg_table, hwif->sg_nents, | 
|  | 375 | hwif->sg_dma_direction); | 
|  | 376 |  | 
|  | 377 | return get_dma_residue(hwif->hw.dma) != 0; | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | static void icside_dma_start(ide_drive_t *drive) | 
|  | 381 | { | 
|  | 382 | ide_hwif_t *hwif = HWIF(drive); | 
|  | 383 |  | 
|  | 384 | /* We can not enable DMA on both channels simultaneously. */ | 
|  | 385 | BUG_ON(dma_channel_active(hwif->hw.dma)); | 
|  | 386 | enable_dma(hwif->hw.dma); | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | static int icside_dma_setup(ide_drive_t *drive) | 
|  | 390 | { | 
|  | 391 | ide_hwif_t *hwif = HWIF(drive); | 
|  | 392 | struct request *rq = hwif->hwgroup->rq; | 
|  | 393 | unsigned int dma_mode; | 
|  | 394 |  | 
|  | 395 | if (rq_data_dir(rq)) | 
|  | 396 | dma_mode = DMA_MODE_WRITE; | 
|  | 397 | else | 
|  | 398 | dma_mode = DMA_MODE_READ; | 
|  | 399 |  | 
|  | 400 | /* | 
|  | 401 | * We can not enable DMA on both channels. | 
|  | 402 | */ | 
|  | 403 | BUG_ON(dma_channel_active(hwif->hw.dma)); | 
|  | 404 |  | 
|  | 405 | icside_build_sglist(drive, rq); | 
|  | 406 |  | 
|  | 407 | /* | 
|  | 408 | * Ensure that we have the right interrupt routed. | 
|  | 409 | */ | 
|  | 410 | icside_maskproc(drive, 0); | 
|  | 411 |  | 
|  | 412 | /* | 
|  | 413 | * Route the DMA signals to the correct interface. | 
|  | 414 | */ | 
|  | 415 | writeb(hwif->select_data, hwif->config_data); | 
|  | 416 |  | 
|  | 417 | /* | 
|  | 418 | * Select the correct timing for this drive. | 
|  | 419 | */ | 
|  | 420 | set_dma_speed(hwif->hw.dma, drive->drive_data); | 
|  | 421 |  | 
|  | 422 | /* | 
|  | 423 | * Tell the DMA engine about the SG table and | 
|  | 424 | * data direction. | 
|  | 425 | */ | 
|  | 426 | set_dma_sg(hwif->hw.dma, hwif->sg_table, hwif->sg_nents); | 
|  | 427 | set_dma_mode(hwif->hw.dma, dma_mode); | 
|  | 428 |  | 
|  | 429 | drive->waiting_for_dma = 1; | 
|  | 430 |  | 
|  | 431 | return 0; | 
|  | 432 | } | 
|  | 433 |  | 
|  | 434 | static void icside_dma_exec_cmd(ide_drive_t *drive, u8 cmd) | 
|  | 435 | { | 
|  | 436 | /* issue cmd to drive */ | 
|  | 437 | ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD, NULL); | 
|  | 438 | } | 
|  | 439 |  | 
|  | 440 | static int icside_dma_test_irq(ide_drive_t *drive) | 
|  | 441 | { | 
|  | 442 | ide_hwif_t *hwif = HWIF(drive); | 
|  | 443 | struct icside_state *state = hwif->hwif_data; | 
|  | 444 |  | 
|  | 445 | return readb(state->irq_port + | 
|  | 446 | (hwif->channel ? | 
|  | 447 | ICS_ARCIN_V6_INTRSTAT_2 : | 
|  | 448 | ICS_ARCIN_V6_INTRSTAT_1)) & 1; | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | static int icside_dma_timeout(ide_drive_t *drive) | 
|  | 452 | { | 
|  | 453 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); | 
|  | 454 |  | 
|  | 455 | if (icside_dma_test_irq(drive)) | 
|  | 456 | return 0; | 
|  | 457 |  | 
|  | 458 | ide_dump_status(drive, "DMA timeout", | 
|  | 459 | HWIF(drive)->INB(IDE_STATUS_REG)); | 
|  | 460 |  | 
|  | 461 | return icside_dma_end(drive); | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | static int icside_dma_lostirq(ide_drive_t *drive) | 
|  | 465 | { | 
|  | 466 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); | 
|  | 467 | return 1; | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 | static void icside_dma_init(ide_hwif_t *hwif) | 
|  | 471 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | printk("    %s: SG-DMA", hwif->name); | 
|  | 473 |  | 
|  | 474 | hwif->atapi_dma		= 1; | 
|  | 475 | hwif->mwdma_mask	= 7; /* MW0..2 */ | 
|  | 476 | hwif->swdma_mask	= 7; /* SW0..2 */ | 
|  | 477 |  | 
|  | 478 | hwif->dmatable_cpu	= NULL; | 
|  | 479 | hwif->dmatable_dma	= 0; | 
|  | 480 | hwif->speedproc		= icside_set_speed; | 
| Bartlomiej Zolnierkiewicz | 120b9cf | 2007-03-17 21:57:41 +0100 | [diff] [blame] | 481 | hwif->autodma		= 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 |  | 
|  | 483 | hwif->ide_dma_check	= icside_dma_check; | 
| Bartlomiej Zolnierkiewicz | 7469aaf | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 484 | hwif->dma_host_off	= icside_dma_host_off; | 
|  | 485 | hwif->dma_off_quietly	= icside_dma_off_quietly; | 
| Bartlomiej Zolnierkiewicz | ccf3528 | 2007-02-17 02:40:26 +0100 | [diff] [blame] | 486 | hwif->dma_host_on	= icside_dma_host_on; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | hwif->ide_dma_on	= icside_dma_on; | 
|  | 488 | hwif->dma_setup		= icside_dma_setup; | 
|  | 489 | hwif->dma_exec_cmd	= icside_dma_exec_cmd; | 
|  | 490 | hwif->dma_start		= icside_dma_start; | 
|  | 491 | hwif->ide_dma_end	= icside_dma_end; | 
|  | 492 | hwif->ide_dma_test_irq	= icside_dma_test_irq; | 
|  | 493 | hwif->ide_dma_timeout	= icside_dma_timeout; | 
|  | 494 | hwif->ide_dma_lostirq	= icside_dma_lostirq; | 
|  | 495 |  | 
|  | 496 | hwif->drives[0].autodma = hwif->autodma; | 
|  | 497 | hwif->drives[1].autodma = hwif->autodma; | 
|  | 498 |  | 
|  | 499 | printk(" capable%s\n", hwif->autodma ? ", auto-enable" : ""); | 
|  | 500 | } | 
|  | 501 | #else | 
|  | 502 | #define icside_dma_init(hwif)	(0) | 
|  | 503 | #endif | 
|  | 504 |  | 
|  | 505 | static ide_hwif_t *icside_find_hwif(unsigned long dataport) | 
|  | 506 | { | 
|  | 507 | ide_hwif_t *hwif; | 
|  | 508 | int index; | 
|  | 509 |  | 
|  | 510 | for (index = 0; index < MAX_HWIFS; ++index) { | 
|  | 511 | hwif = &ide_hwifs[index]; | 
|  | 512 | if (hwif->io_ports[IDE_DATA_OFFSET] == dataport) | 
|  | 513 | goto found; | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | for (index = 0; index < MAX_HWIFS; ++index) { | 
|  | 517 | hwif = &ide_hwifs[index]; | 
|  | 518 | if (!hwif->io_ports[IDE_DATA_OFFSET]) | 
|  | 519 | goto found; | 
|  | 520 | } | 
|  | 521 |  | 
|  | 522 | hwif = NULL; | 
|  | 523 | found: | 
|  | 524 | return hwif; | 
|  | 525 | } | 
|  | 526 |  | 
|  | 527 | static ide_hwif_t * | 
|  | 528 | icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec) | 
|  | 529 | { | 
|  | 530 | unsigned long port = (unsigned long)base + info->dataoffset; | 
|  | 531 | ide_hwif_t *hwif; | 
|  | 532 |  | 
|  | 533 | hwif = icside_find_hwif(port); | 
|  | 534 | if (hwif) { | 
|  | 535 | int i; | 
|  | 536 |  | 
|  | 537 | memset(&hwif->hw, 0, sizeof(hw_regs_t)); | 
|  | 538 |  | 
|  | 539 | /* | 
|  | 540 | * Ensure we're using MMIO | 
|  | 541 | */ | 
|  | 542 | default_hwif_mmiops(hwif); | 
| Bartlomiej Zolnierkiewicz | 2ad1e55 | 2007-02-17 02:40:25 +0100 | [diff] [blame] | 543 | hwif->mmio = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 |  | 
|  | 545 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 
|  | 546 | hwif->hw.io_ports[i] = port; | 
|  | 547 | hwif->io_ports[i] = port; | 
|  | 548 | port += 1 << info->stepping; | 
|  | 549 | } | 
|  | 550 | hwif->hw.io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset; | 
|  | 551 | hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset; | 
|  | 552 | hwif->hw.irq  = ec->irq; | 
|  | 553 | hwif->irq     = ec->irq; | 
|  | 554 | hwif->noprobe = 0; | 
|  | 555 | hwif->chipset = ide_acorn; | 
|  | 556 | hwif->gendev.parent = &ec->dev; | 
|  | 557 | } | 
|  | 558 |  | 
|  | 559 | return hwif; | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | static int __init | 
|  | 563 | icside_register_v5(struct icside_state *state, struct expansion_card *ec) | 
|  | 564 | { | 
|  | 565 | ide_hwif_t *hwif; | 
|  | 566 | void __iomem *base; | 
|  | 567 |  | 
|  | 568 | base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), | 
|  | 569 | ecard_resource_len(ec, ECARD_RES_MEMC)); | 
|  | 570 | if (!base) | 
|  | 571 | return -ENOMEM; | 
|  | 572 |  | 
|  | 573 | state->irq_port = base; | 
|  | 574 |  | 
|  | 575 | ec->irqaddr  = base + ICS_ARCIN_V5_INTRSTAT; | 
|  | 576 | ec->irqmask  = 1; | 
|  | 577 | ec->irq_data = state; | 
|  | 578 | ec->ops      = &icside_ops_arcin_v5; | 
|  | 579 |  | 
|  | 580 | /* | 
|  | 581 | * Be on the safe side - disable interrupts | 
|  | 582 | */ | 
|  | 583 | icside_irqdisable_arcin_v5(ec, 0); | 
|  | 584 |  | 
|  | 585 | hwif = icside_setup(base, &icside_cardinfo_v5, ec); | 
|  | 586 | if (!hwif) { | 
|  | 587 | iounmap(base); | 
|  | 588 | return -ENODEV; | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | state->hwif[0] = hwif; | 
|  | 592 |  | 
|  | 593 | probe_hwif_init(hwif); | 
|  | 594 | create_proc_ide_interfaces(); | 
|  | 595 |  | 
|  | 596 | return 0; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | static int __init | 
|  | 600 | icside_register_v6(struct icside_state *state, struct expansion_card *ec) | 
|  | 601 | { | 
|  | 602 | ide_hwif_t *hwif, *mate; | 
|  | 603 | void __iomem *ioc_base, *easi_base; | 
|  | 604 | unsigned int sel = 0; | 
|  | 605 | int ret; | 
|  | 606 |  | 
|  | 607 | ioc_base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST), | 
|  | 608 | ecard_resource_len(ec, ECARD_RES_IOCFAST)); | 
|  | 609 | if (!ioc_base) { | 
|  | 610 | ret = -ENOMEM; | 
|  | 611 | goto out; | 
|  | 612 | } | 
|  | 613 |  | 
|  | 614 | easi_base = ioc_base; | 
|  | 615 |  | 
|  | 616 | if (ecard_resource_flags(ec, ECARD_RES_EASI)) { | 
|  | 617 | easi_base = ioremap(ecard_resource_start(ec, ECARD_RES_EASI), | 
|  | 618 | ecard_resource_len(ec, ECARD_RES_EASI)); | 
|  | 619 | if (!easi_base) { | 
|  | 620 | ret = -ENOMEM; | 
|  | 621 | goto unmap_slot; | 
|  | 622 | } | 
|  | 623 |  | 
|  | 624 | /* | 
|  | 625 | * Enable access to the EASI region. | 
|  | 626 | */ | 
|  | 627 | sel = 1 << 5; | 
|  | 628 | } | 
|  | 629 |  | 
|  | 630 | writeb(sel, ioc_base); | 
|  | 631 |  | 
|  | 632 | ec->irq_data      = state; | 
|  | 633 | ec->ops           = &icside_ops_arcin_v6; | 
|  | 634 |  | 
|  | 635 | state->irq_port   = easi_base; | 
|  | 636 | state->ioc_base   = ioc_base; | 
|  | 637 |  | 
|  | 638 | /* | 
|  | 639 | * Be on the safe side - disable interrupts | 
|  | 640 | */ | 
|  | 641 | icside_irqdisable_arcin_v6(ec, 0); | 
|  | 642 |  | 
|  | 643 | /* | 
|  | 644 | * Find and register the interfaces. | 
|  | 645 | */ | 
|  | 646 | hwif = icside_setup(easi_base, &icside_cardinfo_v6_1, ec); | 
|  | 647 | mate = icside_setup(easi_base, &icside_cardinfo_v6_2, ec); | 
|  | 648 |  | 
|  | 649 | if (!hwif || !mate) { | 
|  | 650 | ret = -ENODEV; | 
|  | 651 | goto unmap_port; | 
|  | 652 | } | 
|  | 653 |  | 
|  | 654 | state->hwif[0]    = hwif; | 
|  | 655 | state->hwif[1]    = mate; | 
|  | 656 |  | 
|  | 657 | hwif->maskproc    = icside_maskproc; | 
|  | 658 | hwif->channel     = 0; | 
|  | 659 | hwif->hwif_data   = state; | 
|  | 660 | hwif->mate        = mate; | 
|  | 661 | hwif->serialized  = 1; | 
|  | 662 | hwif->config_data = (unsigned long)ioc_base; | 
|  | 663 | hwif->select_data = sel; | 
|  | 664 | hwif->hw.dma      = ec->dma; | 
|  | 665 |  | 
|  | 666 | mate->maskproc    = icside_maskproc; | 
|  | 667 | mate->channel     = 1; | 
|  | 668 | mate->hwif_data   = state; | 
|  | 669 | mate->mate        = hwif; | 
|  | 670 | mate->serialized  = 1; | 
|  | 671 | mate->config_data = (unsigned long)ioc_base; | 
|  | 672 | mate->select_data = sel | 1; | 
|  | 673 | mate->hw.dma      = ec->dma; | 
|  | 674 |  | 
|  | 675 | if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { | 
|  | 676 | icside_dma_init(hwif); | 
|  | 677 | icside_dma_init(mate); | 
|  | 678 | } | 
|  | 679 |  | 
|  | 680 | probe_hwif_init(hwif); | 
|  | 681 | probe_hwif_init(mate); | 
|  | 682 | create_proc_ide_interfaces(); | 
|  | 683 |  | 
|  | 684 | return 0; | 
|  | 685 |  | 
|  | 686 | unmap_port: | 
|  | 687 | if (easi_base != ioc_base) | 
|  | 688 | iounmap(easi_base); | 
|  | 689 | unmap_slot: | 
|  | 690 | iounmap(ioc_base); | 
|  | 691 | out: | 
|  | 692 | return ret; | 
|  | 693 | } | 
|  | 694 |  | 
|  | 695 | static int __devinit | 
|  | 696 | icside_probe(struct expansion_card *ec, const struct ecard_id *id) | 
|  | 697 | { | 
|  | 698 | struct icside_state *state; | 
|  | 699 | void __iomem *idmem; | 
|  | 700 | int ret; | 
|  | 701 |  | 
|  | 702 | ret = ecard_request_resources(ec); | 
|  | 703 | if (ret) | 
|  | 704 | goto out; | 
|  | 705 |  | 
|  | 706 | state = kmalloc(sizeof(struct icside_state), GFP_KERNEL); | 
|  | 707 | if (!state) { | 
|  | 708 | ret = -ENOMEM; | 
|  | 709 | goto release; | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | memset(state, 0, sizeof(state)); | 
|  | 713 | state->type	= ICS_TYPE_NOTYPE; | 
|  | 714 | state->dev	= &ec->dev; | 
|  | 715 |  | 
|  | 716 | idmem = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST), | 
|  | 717 | ecard_resource_len(ec, ECARD_RES_IOCFAST)); | 
|  | 718 | if (idmem) { | 
|  | 719 | unsigned int type; | 
|  | 720 |  | 
|  | 721 | type = readb(idmem + ICS_IDENT_OFFSET) & 1; | 
|  | 722 | type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; | 
|  | 723 | type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; | 
|  | 724 | type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; | 
|  | 725 | iounmap(idmem); | 
|  | 726 |  | 
|  | 727 | state->type = type; | 
|  | 728 | } | 
|  | 729 |  | 
|  | 730 | switch (state->type) { | 
|  | 731 | case ICS_TYPE_A3IN: | 
|  | 732 | dev_warn(&ec->dev, "A3IN unsupported\n"); | 
|  | 733 | ret = -ENODEV; | 
|  | 734 | break; | 
|  | 735 |  | 
|  | 736 | case ICS_TYPE_A3USER: | 
|  | 737 | dev_warn(&ec->dev, "A3USER unsupported\n"); | 
|  | 738 | ret = -ENODEV; | 
|  | 739 | break; | 
|  | 740 |  | 
|  | 741 | case ICS_TYPE_V5: | 
|  | 742 | ret = icside_register_v5(state, ec); | 
|  | 743 | break; | 
|  | 744 |  | 
|  | 745 | case ICS_TYPE_V6: | 
|  | 746 | ret = icside_register_v6(state, ec); | 
|  | 747 | break; | 
|  | 748 |  | 
|  | 749 | default: | 
|  | 750 | dev_warn(&ec->dev, "unknown interface type\n"); | 
|  | 751 | ret = -ENODEV; | 
|  | 752 | break; | 
|  | 753 | } | 
|  | 754 |  | 
|  | 755 | if (ret == 0) { | 
|  | 756 | ecard_set_drvdata(ec, state); | 
|  | 757 | goto out; | 
|  | 758 | } | 
|  | 759 |  | 
|  | 760 | kfree(state); | 
|  | 761 | release: | 
|  | 762 | ecard_release_resources(ec); | 
|  | 763 | out: | 
|  | 764 | return ret; | 
|  | 765 | } | 
|  | 766 |  | 
|  | 767 | static void __devexit icside_remove(struct expansion_card *ec) | 
|  | 768 | { | 
|  | 769 | struct icside_state *state = ecard_get_drvdata(ec); | 
|  | 770 |  | 
|  | 771 | switch (state->type) { | 
|  | 772 | case ICS_TYPE_V5: | 
|  | 773 | /* FIXME: tell IDE to stop using the interface */ | 
|  | 774 |  | 
|  | 775 | /* Disable interrupts */ | 
|  | 776 | icside_irqdisable_arcin_v5(ec, 0); | 
|  | 777 | break; | 
|  | 778 |  | 
|  | 779 | case ICS_TYPE_V6: | 
|  | 780 | /* FIXME: tell IDE to stop using the interface */ | 
|  | 781 | if (ec->dma != NO_DMA) | 
|  | 782 | free_dma(ec->dma); | 
|  | 783 |  | 
|  | 784 | /* Disable interrupts */ | 
|  | 785 | icside_irqdisable_arcin_v6(ec, 0); | 
|  | 786 |  | 
|  | 787 | /* Reset the ROM pointer/EASI selection */ | 
|  | 788 | writeb(0, state->ioc_base); | 
|  | 789 | break; | 
|  | 790 | } | 
|  | 791 |  | 
|  | 792 | ecard_set_drvdata(ec, NULL); | 
|  | 793 | ec->ops = NULL; | 
|  | 794 | ec->irq_data = NULL; | 
|  | 795 |  | 
|  | 796 | if (state->ioc_base) | 
|  | 797 | iounmap(state->ioc_base); | 
|  | 798 | if (state->ioc_base != state->irq_port) | 
|  | 799 | iounmap(state->irq_port); | 
|  | 800 |  | 
|  | 801 | kfree(state); | 
|  | 802 | ecard_release_resources(ec); | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | static void icside_shutdown(struct expansion_card *ec) | 
|  | 806 | { | 
|  | 807 | struct icside_state *state = ecard_get_drvdata(ec); | 
|  | 808 | unsigned long flags; | 
|  | 809 |  | 
|  | 810 | /* | 
|  | 811 | * Disable interrupts from this card.  We need to do | 
|  | 812 | * this before disabling EASI since we may be accessing | 
|  | 813 | * this register via that region. | 
|  | 814 | */ | 
|  | 815 | local_irq_save(flags); | 
|  | 816 | ec->ops->irqdisable(ec, 0); | 
|  | 817 | local_irq_restore(flags); | 
|  | 818 |  | 
|  | 819 | /* | 
|  | 820 | * Reset the ROM pointer so that we can read the ROM | 
|  | 821 | * after a soft reboot.  This also disables access to | 
|  | 822 | * the IDE taskfile via the EASI region. | 
|  | 823 | */ | 
|  | 824 | if (state->ioc_base) | 
|  | 825 | writeb(0, state->ioc_base); | 
|  | 826 | } | 
|  | 827 |  | 
|  | 828 | static const struct ecard_id icside_ids[] = { | 
|  | 829 | { MANU_ICS,  PROD_ICS_IDE  }, | 
|  | 830 | { MANU_ICS2, PROD_ICS2_IDE }, | 
|  | 831 | { 0xffff, 0xffff } | 
|  | 832 | }; | 
|  | 833 |  | 
|  | 834 | static struct ecard_driver icside_driver = { | 
|  | 835 | .probe		= icside_probe, | 
|  | 836 | .remove		= __devexit_p(icside_remove), | 
|  | 837 | .shutdown	= icside_shutdown, | 
|  | 838 | .id_table	= icside_ids, | 
|  | 839 | .drv = { | 
|  | 840 | .name	= "icside", | 
|  | 841 | }, | 
|  | 842 | }; | 
|  | 843 |  | 
|  | 844 | static int __init icside_init(void) | 
|  | 845 | { | 
|  | 846 | return ecard_register_driver(&icside_driver); | 
|  | 847 | } | 
|  | 848 |  | 
|  | 849 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); | 
|  | 850 | MODULE_LICENSE("GPL"); | 
|  | 851 | MODULE_DESCRIPTION("ICS IDE driver"); | 
|  | 852 |  | 
|  | 853 | module_init(icside_init); |