| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* cyberstorm.c: Driver for CyberStorm SCSI Controller. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) | 
 | 4 |  * | 
 | 5 |  * The CyberStorm SCSI driver is based on David S. Miller's ESP driver | 
 | 6 |  * for the Sparc computers.  | 
 | 7 |  *  | 
 | 8 |  * This work was made possible by Phase5 who willingly (and most generously) | 
 | 9 |  * supported me with hardware and all the information I needed. | 
 | 10 |  */ | 
 | 11 |  | 
 | 12 | /* TODO: | 
 | 13 |  * | 
 | 14 |  * 1) Figure out how to make a cleaner merge with the sparc driver with regard | 
 | 15 |  *    to the caches and the Sparc MMU mapping. | 
 | 16 |  * 2) Make as few routines required outside the generic driver. A lot of the | 
 | 17 |  *    routines in this file used to be inline! | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | #include <linux/module.h> | 
 | 21 |  | 
 | 22 | #include <linux/init.h> | 
 | 23 | #include <linux/kernel.h> | 
 | 24 | #include <linux/delay.h> | 
 | 25 | #include <linux/types.h> | 
 | 26 | #include <linux/string.h> | 
 | 27 | #include <linux/slab.h> | 
 | 28 | #include <linux/blkdev.h> | 
 | 29 | #include <linux/proc_fs.h> | 
 | 30 | #include <linux/stat.h> | 
 | 31 | #include <linux/interrupt.h> | 
 | 32 |  | 
 | 33 | #include "scsi.h" | 
 | 34 | #include <scsi/scsi_host.h> | 
 | 35 | #include "NCR53C9x.h" | 
 | 36 |  | 
 | 37 | #include <linux/zorro.h> | 
 | 38 | #include <asm/irq.h> | 
 | 39 | #include <asm/amigaints.h> | 
 | 40 | #include <asm/amigahw.h> | 
 | 41 |  | 
 | 42 | #include <asm/pgtable.h> | 
 | 43 |  | 
 | 44 | /* The controller registers can be found in the Z2 config area at these | 
 | 45 |  * offsets: | 
 | 46 |  */ | 
 | 47 | #define CYBER_ESP_ADDR 0xf400 | 
 | 48 | #define CYBER_DMA_ADDR 0xf800 | 
 | 49 |  | 
 | 50 |  | 
 | 51 | /* The CyberStorm DMA interface */ | 
 | 52 | struct cyber_dma_registers { | 
 | 53 | 	volatile unsigned char dma_addr0;	/* DMA address (MSB) [0x000] */ | 
 | 54 | 	unsigned char dmapad1[1]; | 
 | 55 | 	volatile unsigned char dma_addr1;	/* DMA address       [0x002] */ | 
 | 56 | 	unsigned char dmapad2[1]; | 
 | 57 | 	volatile unsigned char dma_addr2;	/* DMA address       [0x004] */ | 
 | 58 | 	unsigned char dmapad3[1]; | 
 | 59 | 	volatile unsigned char dma_addr3;	/* DMA address (LSB) [0x006] */ | 
 | 60 | 	unsigned char dmapad4[0x3fb]; | 
 | 61 | 	volatile unsigned char cond_reg;        /* DMA cond    (ro)  [0x402] */ | 
 | 62 | #define ctrl_reg  cond_reg			/* DMA control (wo)  [0x402] */ | 
 | 63 | }; | 
 | 64 |  | 
 | 65 | /* DMA control bits */ | 
 | 66 | #define CYBER_DMA_LED    0x80	/* HD led control 1 = on */ | 
 | 67 | #define CYBER_DMA_WRITE  0x40	/* DMA direction. 1 = write */ | 
 | 68 | #define CYBER_DMA_Z3     0x20	/* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */ | 
 | 69 |  | 
 | 70 | /* DMA status bits */ | 
 | 71 | #define CYBER_DMA_HNDL_INTR 0x80	/* DMA IRQ pending? */ | 
 | 72 |  | 
 | 73 | /* The bits below appears to be Phase5 Debug bits only; they were not | 
 | 74 |  * described by Phase5 so using them may seem a bit stupid... | 
 | 75 |  */ | 
 | 76 | #define CYBER_HOST_ID 0x02	/* If set, host ID should be 7, otherwise | 
 | 77 | 				 * it should be 6. | 
 | 78 | 				 */ | 
 | 79 | #define CYBER_SLOW_CABLE 0x08	/* If *not* set, assume SLOW_CABLE */ | 
 | 80 |  | 
 | 81 | static int  dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); | 
 | 82 | static int  dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); | 
 | 83 | static void dma_dump_state(struct NCR_ESP *esp); | 
 | 84 | static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length); | 
 | 85 | static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length); | 
 | 86 | static void dma_ints_off(struct NCR_ESP *esp); | 
 | 87 | static void dma_ints_on(struct NCR_ESP *esp); | 
 | 88 | static int  dma_irq_p(struct NCR_ESP *esp); | 
 | 89 | static void dma_led_off(struct NCR_ESP *esp); | 
 | 90 | static void dma_led_on(struct NCR_ESP *esp); | 
 | 91 | static int  dma_ports_p(struct NCR_ESP *esp); | 
 | 92 | static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); | 
 | 93 |  | 
 | 94 | static unsigned char ctrl_data = 0;	/* Keep backup of the stuff written | 
 | 95 | 				 * to ctrl_reg. Always write a copy | 
 | 96 | 				 * to this register when writing to | 
 | 97 | 				 * the hardware register! | 
 | 98 | 				 */ | 
 | 99 |  | 
 | 100 | static volatile unsigned char cmd_buffer[16]; | 
 | 101 | 				/* This is where all commands are put | 
 | 102 | 				 * before they are transferred to the ESP chip | 
 | 103 | 				 * via PIO. | 
 | 104 | 				 */ | 
 | 105 |  | 
 | 106 | /***************************************************************** Detection */ | 
 | 107 | int __init cyber_esp_detect(Scsi_Host_Template *tpnt) | 
 | 108 | { | 
 | 109 | 	struct NCR_ESP *esp; | 
 | 110 | 	struct zorro_dev *z = NULL; | 
 | 111 | 	unsigned long address; | 
 | 112 |  | 
 | 113 | 	while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { | 
 | 114 | 	    unsigned long board = z->resource.start; | 
 | 115 | 	    if ((z->id == ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM || | 
 | 116 | 		 z->id == ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060) && | 
 | 117 | 		request_mem_region(board+CYBER_ESP_ADDR, | 
 | 118 | 		    		   sizeof(struct ESP_regs), "NCR53C9x")) { | 
 | 119 | 		/* Figure out if this is a CyberStorm or really a  | 
 | 120 | 		 * Fastlane/Blizzard Mk II by looking at the board size. | 
 | 121 | 		 * CyberStorm maps 64kB | 
 | 122 | 		 * (ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM does anyway) | 
 | 123 | 		 */ | 
 | 124 | 		if(z->resource.end-board != 0xffff) { | 
 | 125 | 			release_mem_region(board+CYBER_ESP_ADDR, | 
 | 126 | 					   sizeof(struct ESP_regs)); | 
 | 127 | 			return 0; | 
 | 128 | 		} | 
 | 129 | 		esp = esp_allocate(tpnt, (void *)board+CYBER_ESP_ADDR); | 
 | 130 |  | 
 | 131 | 		/* Do command transfer with programmed I/O */ | 
 | 132 | 		esp->do_pio_cmds = 1; | 
 | 133 |  | 
 | 134 | 		/* Required functions */ | 
 | 135 | 		esp->dma_bytes_sent = &dma_bytes_sent; | 
 | 136 | 		esp->dma_can_transfer = &dma_can_transfer; | 
 | 137 | 		esp->dma_dump_state = &dma_dump_state; | 
 | 138 | 		esp->dma_init_read = &dma_init_read; | 
 | 139 | 		esp->dma_init_write = &dma_init_write; | 
 | 140 | 		esp->dma_ints_off = &dma_ints_off; | 
 | 141 | 		esp->dma_ints_on = &dma_ints_on; | 
 | 142 | 		esp->dma_irq_p = &dma_irq_p; | 
 | 143 | 		esp->dma_ports_p = &dma_ports_p; | 
 | 144 | 		esp->dma_setup = &dma_setup; | 
 | 145 |  | 
 | 146 | 		/* Optional functions */ | 
 | 147 | 		esp->dma_barrier = 0; | 
 | 148 | 		esp->dma_drain = 0; | 
 | 149 | 		esp->dma_invalidate = 0; | 
 | 150 | 		esp->dma_irq_entry = 0; | 
 | 151 | 		esp->dma_irq_exit = 0; | 
 | 152 | 		esp->dma_led_on = &dma_led_on; | 
 | 153 | 		esp->dma_led_off = &dma_led_off; | 
 | 154 | 		esp->dma_poll = 0; | 
 | 155 | 		esp->dma_reset = 0; | 
 | 156 |  | 
 | 157 | 		/* SCSI chip speed */ | 
 | 158 | 		esp->cfreq = 40000000; | 
 | 159 |  | 
 | 160 | 		/* The DMA registers on the CyberStorm are mapped | 
 | 161 | 		 * relative to the device (i.e. in the same Zorro | 
 | 162 | 		 * I/O block). | 
 | 163 | 		 */ | 
 | 164 | 		address = (unsigned long)ZTWO_VADDR(board); | 
 | 165 | 		esp->dregs = (void *)(address + CYBER_DMA_ADDR); | 
 | 166 |  | 
 | 167 | 		/* ESP register base */ | 
 | 168 | 		esp->eregs = (struct ESP_regs *)(address + CYBER_ESP_ADDR); | 
 | 169 | 		 | 
 | 170 | 		/* Set the command buffer */ | 
 | 171 | 		esp->esp_command = cmd_buffer; | 
 | 172 | 		esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); | 
 | 173 |  | 
 | 174 | 		esp->irq = IRQ_AMIGA_PORTS; | 
 | 175 | 		request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ, | 
 | 176 | 			    "CyberStorm SCSI", esp->ehost); | 
 | 177 | 		/* Figure out our scsi ID on the bus */ | 
 | 178 | 		/* The DMA cond flag contains a hardcoded jumper bit | 
 | 179 | 		 * which can be used to select host number 6 or 7. | 
 | 180 | 		 * However, even though it may change, we use a hardcoded | 
 | 181 | 		 * value of 7. | 
 | 182 | 		 */ | 
 | 183 | 		esp->scsi_id = 7; | 
 | 184 | 		 | 
 | 185 | 		/* We don't have a differential SCSI-bus. */ | 
 | 186 | 		esp->diff = 0; | 
 | 187 |  | 
 | 188 | 		esp_initialize(esp); | 
 | 189 |  | 
 | 190 | 		printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); | 
 | 191 | 		esps_running = esps_in_use; | 
 | 192 | 		return esps_in_use; | 
 | 193 | 	    } | 
 | 194 | 	} | 
 | 195 | 	return 0; | 
 | 196 | } | 
 | 197 |  | 
 | 198 | /************************************************************* DMA Functions */ | 
 | 199 | static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) | 
 | 200 | { | 
 | 201 | 	/* Since the CyberStorm DMA is fully dedicated to the ESP chip, | 
 | 202 | 	 * the number of bytes sent (to the ESP chip) equals the number | 
 | 203 | 	 * of bytes in the FIFO - there is no buffering in the DMA controller. | 
 | 204 | 	 * XXXX Do I read this right? It is from host to ESP, right? | 
 | 205 | 	 */ | 
 | 206 | 	return fifo_count; | 
 | 207 | } | 
 | 208 |  | 
 | 209 | static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) | 
 | 210 | { | 
 | 211 | 	/* I don't think there's any limit on the CyberDMA. So we use what | 
 | 212 | 	 * the ESP chip can handle (24 bit). | 
 | 213 | 	 */ | 
 | 214 | 	unsigned long sz = sp->SCp.this_residual; | 
 | 215 | 	if(sz > 0x1000000) | 
 | 216 | 		sz = 0x1000000; | 
 | 217 | 	return sz; | 
 | 218 | } | 
 | 219 |  | 
 | 220 | static void dma_dump_state(struct NCR_ESP *esp) | 
 | 221 | { | 
 | 222 | 	ESPLOG(("esp%d: dma -- cond_reg<%02x>\n", | 
 | 223 | 		esp->esp_id, ((struct cyber_dma_registers *) | 
 | 224 | 			      (esp->dregs))->cond_reg)); | 
 | 225 | 	ESPLOG(("intreq:<%04x>, intena:<%04x>\n", | 
 | 226 | 		custom.intreqr, custom.intenar)); | 
 | 227 | } | 
 | 228 |  | 
 | 229 | static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) | 
 | 230 | { | 
 | 231 | 	struct cyber_dma_registers *dregs =  | 
 | 232 | 		(struct cyber_dma_registers *) esp->dregs; | 
 | 233 |  | 
 | 234 | 	cache_clear(addr, length); | 
 | 235 |  | 
 | 236 | 	addr &= ~(1); | 
 | 237 | 	dregs->dma_addr0 = (addr >> 24) & 0xff; | 
 | 238 | 	dregs->dma_addr1 = (addr >> 16) & 0xff; | 
 | 239 | 	dregs->dma_addr2 = (addr >>  8) & 0xff; | 
 | 240 | 	dregs->dma_addr3 = (addr      ) & 0xff; | 
 | 241 | 	ctrl_data &= ~(CYBER_DMA_WRITE); | 
 | 242 |  | 
 | 243 | 	/* Check if physical address is outside Z2 space and of | 
 | 244 | 	 * block length/block aligned in memory. If this is the | 
 | 245 | 	 * case, enable 32 bit transfer. In all other cases, fall back | 
 | 246 | 	 * to 16 bit transfer. | 
 | 247 | 	 * Obviously 32 bit transfer should be enabled if the DMA address | 
 | 248 | 	 * and length are 32 bit aligned. However, this leads to some | 
 | 249 | 	 * strange behavior. Even 64 bit aligned addr/length fails. | 
 | 250 | 	 * Until I've found a reason for this, 32 bit transfer is only | 
 | 251 | 	 * used for full-block transfers (1kB). | 
 | 252 | 	 *							-jskov | 
 | 253 | 	 */ | 
 | 254 | #if 0 | 
 | 255 | 	if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) && | 
 | 256 | 						(addr < 0xff0000))) | 
 | 257 | 		ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */ | 
 | 258 | 	else | 
 | 259 | 		ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */ | 
 | 260 | #else | 
 | 261 | 	ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */ | 
 | 262 | #endif | 
 | 263 | 	dregs->ctrl_reg = ctrl_data; | 
 | 264 | } | 
 | 265 |  | 
 | 266 | static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) | 
 | 267 | { | 
 | 268 | 	struct cyber_dma_registers *dregs =  | 
 | 269 | 		(struct cyber_dma_registers *) esp->dregs; | 
 | 270 |  | 
 | 271 | 	cache_push(addr, length); | 
 | 272 |  | 
 | 273 | 	addr |= 1; | 
 | 274 | 	dregs->dma_addr0 = (addr >> 24) & 0xff; | 
 | 275 | 	dregs->dma_addr1 = (addr >> 16) & 0xff; | 
 | 276 | 	dregs->dma_addr2 = (addr >>  8) & 0xff; | 
 | 277 | 	dregs->dma_addr3 = (addr      ) & 0xff; | 
 | 278 | 	ctrl_data |= CYBER_DMA_WRITE; | 
 | 279 |  | 
 | 280 | 	/* See comment above */ | 
 | 281 | #if 0 | 
 | 282 | 	if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) && | 
 | 283 | 						(addr < 0xff0000))) | 
 | 284 | 		ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */ | 
 | 285 | 	else | 
 | 286 | 		ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */ | 
 | 287 | #else | 
 | 288 | 	ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */ | 
 | 289 | #endif | 
 | 290 | 	dregs->ctrl_reg = ctrl_data; | 
 | 291 | } | 
 | 292 |  | 
 | 293 | static void dma_ints_off(struct NCR_ESP *esp) | 
 | 294 | { | 
 | 295 | 	disable_irq(esp->irq); | 
 | 296 | } | 
 | 297 |  | 
 | 298 | static void dma_ints_on(struct NCR_ESP *esp) | 
 | 299 | { | 
 | 300 | 	enable_irq(esp->irq); | 
 | 301 | } | 
 | 302 |  | 
 | 303 | static int dma_irq_p(struct NCR_ESP *esp) | 
 | 304 | { | 
 | 305 | 	/* It's important to check the DMA IRQ bit in the correct way! */ | 
 | 306 | 	return ((esp_read(esp->eregs->esp_status) & ESP_STAT_INTR) && | 
 | 307 | 		((((struct cyber_dma_registers *)(esp->dregs))->cond_reg) & | 
 | 308 | 		 CYBER_DMA_HNDL_INTR)); | 
 | 309 | } | 
 | 310 |  | 
 | 311 | static void dma_led_off(struct NCR_ESP *esp) | 
 | 312 | { | 
 | 313 | 	ctrl_data &= ~CYBER_DMA_LED; | 
 | 314 | 	((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data; | 
 | 315 | } | 
 | 316 |  | 
 | 317 | static void dma_led_on(struct NCR_ESP *esp) | 
 | 318 | { | 
 | 319 | 	ctrl_data |= CYBER_DMA_LED; | 
 | 320 | 	((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data; | 
 | 321 | } | 
 | 322 |  | 
 | 323 | static int dma_ports_p(struct NCR_ESP *esp) | 
 | 324 | { | 
 | 325 | 	return ((custom.intenar) & IF_PORTS); | 
 | 326 | } | 
 | 327 |  | 
 | 328 | static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) | 
 | 329 | { | 
 | 330 | 	/* On the Sparc, DMA_ST_WRITE means "move data from device to memory" | 
 | 331 | 	 * so when (write) is true, it actually means READ! | 
 | 332 | 	 */ | 
 | 333 | 	if(write){ | 
 | 334 | 		dma_init_read(esp, addr, count); | 
 | 335 | 	} else { | 
 | 336 | 		dma_init_write(esp, addr, count); | 
 | 337 | 	} | 
 | 338 | } | 
 | 339 |  | 
 | 340 | #define HOSTS_C | 
 | 341 |  | 
 | 342 | int cyber_esp_release(struct Scsi_Host *instance) | 
 | 343 | { | 
 | 344 | #ifdef MODULE | 
 | 345 | 	unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; | 
 | 346 |  | 
 | 347 | 	esp_deallocate((struct NCR_ESP *)instance->hostdata); | 
 | 348 | 	esp_release(); | 
 | 349 | 	release_mem_region(address, sizeof(struct ESP_regs)); | 
 | 350 | 	free_irq(IRQ_AMIGA_PORTS, esp_intr); | 
 | 351 | #endif | 
 | 352 | 	return 1; | 
 | 353 | } | 
 | 354 |  | 
 | 355 |  | 
 | 356 | static Scsi_Host_Template driver_template = { | 
 | 357 | 	.proc_name		= "esp-cyberstorm", | 
 | 358 | 	.proc_info		= esp_proc_info, | 
 | 359 | 	.name			= "CyberStorm SCSI", | 
 | 360 | 	.detect			= cyber_esp_detect, | 
 | 361 | 	.slave_alloc		= esp_slave_alloc, | 
 | 362 | 	.slave_destroy		= esp_slave_destroy, | 
 | 363 | 	.release		= cyber_esp_release, | 
 | 364 | 	.queuecommand		= esp_queue, | 
 | 365 | 	.eh_abort_handler	= esp_abort, | 
 | 366 | 	.eh_bus_reset_handler	= esp_reset, | 
 | 367 | 	.can_queue		= 7, | 
 | 368 | 	.this_id		= 7, | 
 | 369 | 	.sg_tablesize		= SG_ALL, | 
 | 370 | 	.cmd_per_lun		= 1, | 
 | 371 | 	.use_clustering		= ENABLE_CLUSTERING | 
 | 372 | }; | 
 | 373 |  | 
 | 374 |  | 
 | 375 | #include "scsi_module.c" | 
 | 376 |  | 
 | 377 | MODULE_LICENSE("GPL"); |