| David S. Miller | cd9ad58 | 2007-04-26 21:19:23 -0700 | [diff] [blame] | 1 | /* sun_esp.c: ESP front-end for Sparc SBUS systems. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | 
 | 4 |  */ | 
 | 5 |  | 
 | 6 | #include <linux/kernel.h> | 
 | 7 | #include <linux/types.h> | 
| David S. Miller | 6025dfe | 2007-04-29 16:12:29 -0700 | [diff] [blame] | 8 | #include <linux/delay.h> | 
| David S. Miller | cd9ad58 | 2007-04-26 21:19:23 -0700 | [diff] [blame] | 9 | #include <linux/module.h> | 
| Andrea Righi | 27ac792 | 2008-07-23 21:28:13 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> | 
| David S. Miller | cd9ad58 | 2007-04-26 21:19:23 -0700 | [diff] [blame] | 11 | #include <linux/init.h> | 
 | 12 |  | 
 | 13 | #include <asm/irq.h> | 
 | 14 | #include <asm/io.h> | 
 | 15 | #include <asm/dma.h> | 
 | 16 |  | 
 | 17 | #include <asm/sbus.h> | 
 | 18 |  | 
 | 19 | #include <scsi/scsi_host.h> | 
 | 20 |  | 
 | 21 | #include "esp_scsi.h" | 
 | 22 |  | 
 | 23 | #define DRV_MODULE_NAME		"sun_esp" | 
 | 24 | #define PFX DRV_MODULE_NAME	": " | 
 | 25 | #define DRV_VERSION		"1.000" | 
 | 26 | #define DRV_MODULE_RELDATE	"April 19, 2007" | 
 | 27 |  | 
 | 28 | #define dma_read32(REG) \ | 
 | 29 | 	sbus_readl(esp->dma_regs + (REG)) | 
 | 30 | #define dma_write32(VAL, REG) \ | 
 | 31 | 	sbus_writel((VAL), esp->dma_regs + (REG)) | 
 | 32 |  | 
 | 33 | static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) | 
 | 34 | { | 
 | 35 | 	struct sbus_dev *sdev = esp->dev; | 
 | 36 | 	struct sbus_dma *dma; | 
 | 37 |  | 
 | 38 | 	if (dma_sdev != NULL) { | 
 | 39 | 		for_each_dvma(dma) { | 
 | 40 | 			if (dma->sdev == dma_sdev) | 
 | 41 | 				break; | 
 | 42 | 		} | 
 | 43 | 	} else { | 
 | 44 | 		for_each_dvma(dma) { | 
 | 45 | 			if (dma->sdev == NULL) | 
 | 46 | 				break; | 
 | 47 |  | 
 | 48 | 			/* If bus + slot are the same and it has the | 
 | 49 | 			 * correct OBP name, it's ours. | 
 | 50 | 			 */ | 
 | 51 | 			if (sdev->bus == dma->sdev->bus && | 
 | 52 | 			    sdev->slot == dma->sdev->slot && | 
 | 53 | 			    (!strcmp(dma->sdev->prom_name, "dma") || | 
 | 54 | 			     !strcmp(dma->sdev->prom_name, "espdma"))) | 
 | 55 | 				break; | 
 | 56 | 		} | 
 | 57 | 	} | 
 | 58 |  | 
 | 59 | 	if (dma == NULL) { | 
 | 60 | 		printk(KERN_ERR PFX "[%s] Cannot find dma.\n", | 
 | 61 | 		       sdev->ofdev.node->full_name); | 
 | 62 | 		return -ENODEV; | 
 | 63 | 	} | 
 | 64 | 	esp->dma = dma; | 
 | 65 | 	esp->dma_regs = dma->regs; | 
 | 66 |  | 
 | 67 | 	return 0; | 
 | 68 |  | 
 | 69 | } | 
 | 70 |  | 
 | 71 | static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) | 
 | 72 | { | 
 | 73 | 	struct sbus_dev *sdev = esp->dev; | 
 | 74 | 	struct resource *res; | 
 | 75 |  | 
 | 76 | 	/* On HME, two reg sets exist, first is DVMA, | 
 | 77 | 	 * second is ESP registers. | 
 | 78 | 	 */ | 
 | 79 | 	if (hme) | 
 | 80 | 		res = &sdev->resource[1]; | 
 | 81 | 	else | 
 | 82 | 		res = &sdev->resource[0]; | 
 | 83 |  | 
 | 84 | 	esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); | 
 | 85 | 	if (!esp->regs) | 
 | 86 | 		return -ENOMEM; | 
 | 87 |  | 
 | 88 | 	return 0; | 
 | 89 | } | 
 | 90 |  | 
 | 91 | static int __devinit esp_sbus_map_command_block(struct esp *esp) | 
 | 92 | { | 
 | 93 | 	struct sbus_dev *sdev = esp->dev; | 
 | 94 |  | 
 | 95 | 	esp->command_block = sbus_alloc_consistent(sdev, 16, | 
 | 96 | 						   &esp->command_block_dma); | 
 | 97 | 	if (!esp->command_block) | 
 | 98 | 		return -ENOMEM; | 
 | 99 | 	return 0; | 
 | 100 | } | 
 | 101 |  | 
 | 102 | static int __devinit esp_sbus_register_irq(struct esp *esp) | 
 | 103 | { | 
 | 104 | 	struct Scsi_Host *host = esp->host; | 
 | 105 | 	struct sbus_dev *sdev = esp->dev; | 
 | 106 |  | 
 | 107 | 	host->irq = sdev->irqs[0]; | 
 | 108 | 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); | 
 | 109 | } | 
 | 110 |  | 
 | 111 | static void __devinit esp_get_scsi_id(struct esp *esp) | 
 | 112 | { | 
 | 113 | 	struct sbus_dev *sdev = esp->dev; | 
 | 114 | 	struct device_node *dp = sdev->ofdev.node; | 
 | 115 |  | 
 | 116 | 	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); | 
 | 117 | 	if (esp->scsi_id != 0xff) | 
 | 118 | 		goto done; | 
 | 119 |  | 
 | 120 | 	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); | 
 | 121 | 	if (esp->scsi_id != 0xff) | 
 | 122 | 		goto done; | 
 | 123 |  | 
 | 124 | 	if (!sdev->bus) { | 
 | 125 | 		/* SUN4 */ | 
 | 126 | 		esp->scsi_id = 7; | 
 | 127 | 		goto done; | 
 | 128 | 	} | 
 | 129 |  | 
 | 130 | 	esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node, | 
 | 131 | 					     "scsi-initiator-id", 7); | 
 | 132 |  | 
 | 133 | done: | 
 | 134 | 	esp->host->this_id = esp->scsi_id; | 
 | 135 | 	esp->scsi_id_mask = (1 << esp->scsi_id); | 
 | 136 | } | 
 | 137 |  | 
 | 138 | static void __devinit esp_get_differential(struct esp *esp) | 
 | 139 | { | 
 | 140 | 	struct sbus_dev *sdev = esp->dev; | 
 | 141 | 	struct device_node *dp = sdev->ofdev.node; | 
 | 142 |  | 
 | 143 | 	if (of_find_property(dp, "differential", NULL)) | 
 | 144 | 		esp->flags |= ESP_FLAG_DIFFERENTIAL; | 
 | 145 | 	else | 
 | 146 | 		esp->flags &= ~ESP_FLAG_DIFFERENTIAL; | 
 | 147 | } | 
 | 148 |  | 
 | 149 | static void __devinit esp_get_clock_params(struct esp *esp) | 
 | 150 | { | 
 | 151 | 	struct sbus_dev *sdev = esp->dev; | 
 | 152 | 	struct device_node *dp = sdev->ofdev.node; | 
 | 153 | 	struct device_node *bus_dp; | 
 | 154 | 	int fmhz; | 
 | 155 |  | 
 | 156 | 	bus_dp = NULL; | 
 | 157 | 	if (sdev != NULL && sdev->bus != NULL) | 
 | 158 | 		bus_dp = sdev->bus->ofdev.node; | 
 | 159 |  | 
 | 160 | 	fmhz = of_getintprop_default(dp, "clock-frequency", 0); | 
 | 161 | 	if (fmhz == 0) | 
 | 162 | 		fmhz = (!bus_dp) ? 0 : | 
 | 163 | 			of_getintprop_default(bus_dp, "clock-frequency", 0); | 
 | 164 |  | 
 | 165 | 	esp->cfreq = fmhz; | 
 | 166 | } | 
 | 167 |  | 
 | 168 | static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | 
 | 169 | { | 
 | 170 | 	struct sbus_dev *sdev = esp->dev; | 
 | 171 | 	struct device_node *dp = sdev->ofdev.node; | 
 | 172 | 	u8 bursts; | 
 | 173 |  | 
 | 174 | 	bursts = of_getintprop_default(dp, "burst-sizes", 0xff); | 
 | 175 | 	if (dma) { | 
 | 176 | 		struct device_node *dma_dp = dma->ofdev.node; | 
 | 177 | 		u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); | 
 | 178 | 		if (val != 0xff) | 
 | 179 | 			bursts &= val; | 
 | 180 | 	} | 
 | 181 |  | 
 | 182 | 	if (sdev->bus) { | 
 | 183 | 		u8 val = of_getintprop_default(sdev->bus->ofdev.node, | 
 | 184 | 					       "burst-sizes", 0xff); | 
 | 185 | 		if (val != 0xff) | 
 | 186 | 			bursts &= val; | 
 | 187 | 	} | 
 | 188 |  | 
 | 189 | 	if (bursts == 0xff || | 
 | 190 | 	    (bursts & DMA_BURST16) == 0 || | 
 | 191 | 	    (bursts & DMA_BURST32) == 0) | 
 | 192 | 		bursts = (DMA_BURST32 - 1); | 
 | 193 |  | 
 | 194 | 	esp->bursts = bursts; | 
 | 195 | } | 
 | 196 |  | 
 | 197 | static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) | 
 | 198 | { | 
 | 199 | 	esp_get_scsi_id(esp); | 
 | 200 | 	esp_get_differential(esp); | 
 | 201 | 	esp_get_clock_params(esp); | 
 | 202 | 	esp_get_bursts(esp, espdma); | 
 | 203 | } | 
 | 204 |  | 
 | 205 | static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) | 
 | 206 | { | 
 | 207 | 	sbus_writeb(val, esp->regs + (reg * 4UL)); | 
 | 208 | } | 
 | 209 |  | 
 | 210 | static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) | 
 | 211 | { | 
 | 212 | 	return sbus_readb(esp->regs + (reg * 4UL)); | 
 | 213 | } | 
 | 214 |  | 
 | 215 | static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, | 
 | 216 | 				      size_t sz, int dir) | 
 | 217 | { | 
 | 218 | 	return sbus_map_single(esp->dev, buf, sz, dir); | 
 | 219 | } | 
 | 220 |  | 
 | 221 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | 
 | 222 | 				  int num_sg, int dir) | 
 | 223 | { | 
 | 224 | 	return sbus_map_sg(esp->dev, sg, num_sg, dir); | 
 | 225 | } | 
 | 226 |  | 
 | 227 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | 
 | 228 | 				  size_t sz, int dir) | 
 | 229 | { | 
 | 230 | 	sbus_unmap_single(esp->dev, addr, sz, dir); | 
 | 231 | } | 
 | 232 |  | 
 | 233 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | 
 | 234 | 			      int num_sg, int dir) | 
 | 235 | { | 
 | 236 | 	sbus_unmap_sg(esp->dev, sg, num_sg, dir); | 
 | 237 | } | 
 | 238 |  | 
 | 239 | static int sbus_esp_irq_pending(struct esp *esp) | 
 | 240 | { | 
 | 241 | 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) | 
 | 242 | 		return 1; | 
 | 243 | 	return 0; | 
 | 244 | } | 
 | 245 |  | 
 | 246 | static void sbus_esp_reset_dma(struct esp *esp) | 
 | 247 | { | 
 | 248 | 	int can_do_burst16, can_do_burst32, can_do_burst64; | 
 | 249 | 	int can_do_sbus64, lim; | 
 | 250 | 	u32 val; | 
 | 251 |  | 
 | 252 | 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | 
 | 253 | 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | 
 | 254 | 	can_do_burst64 = 0; | 
 | 255 | 	can_do_sbus64 = 0; | 
 | 256 | 	if (sbus_can_dma_64bit(esp->dev)) | 
 | 257 | 		can_do_sbus64 = 1; | 
 | 258 | 	if (sbus_can_burst64(esp->sdev)) | 
 | 259 | 		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | 
 | 260 |  | 
 | 261 | 	/* Put the DVMA into a known state. */ | 
 | 262 | 	if (esp->dma->revision != dvmahme) { | 
 | 263 | 		val = dma_read32(DMA_CSR); | 
 | 264 | 		dma_write32(val | DMA_RST_SCSI, DMA_CSR); | 
 | 265 | 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | 
 | 266 | 	} | 
 | 267 | 	switch (esp->dma->revision) { | 
 | 268 | 	case dvmahme: | 
 | 269 | 		dma_write32(DMA_RESET_FAS366, DMA_CSR); | 
 | 270 | 		dma_write32(DMA_RST_SCSI, DMA_CSR); | 
 | 271 |  | 
 | 272 | 		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | | 
 | 273 | 					DMA_SCSI_DISAB | DMA_INT_ENAB); | 
 | 274 |  | 
 | 275 | 		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | | 
 | 276 | 					  DMA_BRST_SZ); | 
 | 277 |  | 
 | 278 | 		if (can_do_burst64) | 
 | 279 | 			esp->prev_hme_dmacsr |= DMA_BRST64; | 
 | 280 | 		else if (can_do_burst32) | 
 | 281 | 			esp->prev_hme_dmacsr |= DMA_BRST32; | 
 | 282 |  | 
 | 283 | 		if (can_do_sbus64) { | 
 | 284 | 			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | 
 | 285 | 			sbus_set_sbus64(esp->dev, esp->bursts); | 
 | 286 | 		} | 
 | 287 |  | 
 | 288 | 		lim = 1000; | 
 | 289 | 		while (dma_read32(DMA_CSR) & DMA_PEND_READ) { | 
 | 290 | 			if (--lim == 0) { | 
 | 291 | 				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " | 
 | 292 | 				       "will not clear!\n", | 
 | 293 | 				       esp->host->unique_id); | 
 | 294 | 				break; | 
 | 295 | 			} | 
 | 296 | 			udelay(1); | 
 | 297 | 		} | 
 | 298 |  | 
 | 299 | 		dma_write32(0, DMA_CSR); | 
 | 300 | 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | 
 | 301 |  | 
 | 302 | 		dma_write32(0, DMA_ADDR); | 
 | 303 | 		break; | 
 | 304 |  | 
 | 305 | 	case dvmarev2: | 
 | 306 | 		if (esp->rev != ESP100) { | 
 | 307 | 			val = dma_read32(DMA_CSR); | 
 | 308 | 			dma_write32(val | DMA_3CLKS, DMA_CSR); | 
 | 309 | 		} | 
 | 310 | 		break; | 
 | 311 |  | 
 | 312 | 	case dvmarev3: | 
 | 313 | 		val = dma_read32(DMA_CSR); | 
 | 314 | 		val &= ~DMA_3CLKS; | 
 | 315 | 		val |= DMA_2CLKS; | 
 | 316 | 		if (can_do_burst32) { | 
 | 317 | 			val &= ~DMA_BRST_SZ; | 
 | 318 | 			val |= DMA_BRST32; | 
 | 319 | 		} | 
 | 320 | 		dma_write32(val, DMA_CSR); | 
 | 321 | 		break; | 
 | 322 |  | 
 | 323 | 	case dvmaesc1: | 
 | 324 | 		val = dma_read32(DMA_CSR); | 
 | 325 | 		val |= DMA_ADD_ENABLE; | 
 | 326 | 		val &= ~DMA_BCNT_ENAB; | 
 | 327 | 		if (!can_do_burst32 && can_do_burst16) { | 
 | 328 | 			val |= DMA_ESC_BURST; | 
 | 329 | 		} else { | 
 | 330 | 			val &= ~(DMA_ESC_BURST); | 
 | 331 | 		} | 
 | 332 | 		dma_write32(val, DMA_CSR); | 
 | 333 | 		break; | 
 | 334 |  | 
 | 335 | 	default: | 
 | 336 | 		break; | 
 | 337 | 	} | 
 | 338 |  | 
 | 339 | 	/* Enable interrupts.  */ | 
 | 340 | 	val = dma_read32(DMA_CSR); | 
 | 341 | 	dma_write32(val | DMA_INT_ENAB, DMA_CSR); | 
 | 342 | } | 
 | 343 |  | 
 | 344 | static void sbus_esp_dma_drain(struct esp *esp) | 
 | 345 | { | 
 | 346 | 	u32 csr; | 
 | 347 | 	int lim; | 
 | 348 |  | 
 | 349 | 	if (esp->dma->revision == dvmahme) | 
 | 350 | 		return; | 
 | 351 |  | 
 | 352 | 	csr = dma_read32(DMA_CSR); | 
 | 353 | 	if (!(csr & DMA_FIFO_ISDRAIN)) | 
 | 354 | 		return; | 
 | 355 |  | 
 | 356 | 	if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) | 
 | 357 | 		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); | 
 | 358 |  | 
 | 359 | 	lim = 1000; | 
 | 360 | 	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { | 
 | 361 | 		if (--lim == 0) { | 
 | 362 | 			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", | 
 | 363 | 			       esp->host->unique_id); | 
 | 364 | 			break; | 
 | 365 | 		} | 
 | 366 | 		udelay(1); | 
 | 367 | 	} | 
 | 368 | } | 
 | 369 |  | 
 | 370 | static void sbus_esp_dma_invalidate(struct esp *esp) | 
 | 371 | { | 
 | 372 | 	if (esp->dma->revision == dvmahme) { | 
 | 373 | 		dma_write32(DMA_RST_SCSI, DMA_CSR); | 
 | 374 |  | 
 | 375 | 		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | 
 | 376 | 					 (DMA_PARITY_OFF | DMA_2CLKS | | 
 | 377 | 					  DMA_SCSI_DISAB | DMA_INT_ENAB)) & | 
 | 378 | 					~(DMA_ST_WRITE | DMA_ENABLE)); | 
 | 379 |  | 
 | 380 | 		dma_write32(0, DMA_CSR); | 
 | 381 | 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | 
 | 382 |  | 
 | 383 | 		/* This is necessary to avoid having the SCSI channel | 
 | 384 | 		 * engine lock up on us. | 
 | 385 | 		 */ | 
 | 386 | 		dma_write32(0, DMA_ADDR); | 
 | 387 | 	} else { | 
 | 388 | 		u32 val; | 
 | 389 | 		int lim; | 
 | 390 |  | 
 | 391 | 		lim = 1000; | 
 | 392 | 		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { | 
 | 393 | 			if (--lim == 0) { | 
 | 394 | 				printk(KERN_ALERT PFX "esp%d: DMA will not " | 
 | 395 | 				       "invalidate!\n", esp->host->unique_id); | 
 | 396 | 				break; | 
 | 397 | 			} | 
 | 398 | 			udelay(1); | 
 | 399 | 		} | 
 | 400 |  | 
 | 401 | 		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | 
 | 402 | 		val |= DMA_FIFO_INV; | 
 | 403 | 		dma_write32(val, DMA_CSR); | 
 | 404 | 		val &= ~DMA_FIFO_INV; | 
 | 405 | 		dma_write32(val, DMA_CSR); | 
 | 406 | 	} | 
 | 407 | } | 
 | 408 |  | 
 | 409 | static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, | 
 | 410 | 				  u32 dma_count, int write, u8 cmd) | 
 | 411 | { | 
 | 412 | 	u32 csr; | 
 | 413 |  | 
 | 414 | 	BUG_ON(!(cmd & ESP_CMD_DMA)); | 
 | 415 |  | 
 | 416 | 	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); | 
 | 417 | 	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); | 
 | 418 | 	if (esp->rev == FASHME) { | 
 | 419 | 		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); | 
 | 420 | 		sbus_esp_write8(esp, 0, FAS_RHI); | 
 | 421 |  | 
 | 422 | 		scsi_esp_cmd(esp, cmd); | 
 | 423 |  | 
 | 424 | 		csr = esp->prev_hme_dmacsr; | 
 | 425 | 		csr |= DMA_SCSI_DISAB | DMA_ENABLE; | 
 | 426 | 		if (write) | 
 | 427 | 			csr |= DMA_ST_WRITE; | 
 | 428 | 		else | 
 | 429 | 			csr &= ~DMA_ST_WRITE; | 
 | 430 | 		esp->prev_hme_dmacsr = csr; | 
 | 431 |  | 
 | 432 | 		dma_write32(dma_count, DMA_COUNT); | 
 | 433 | 		dma_write32(addr, DMA_ADDR); | 
 | 434 | 		dma_write32(csr, DMA_CSR); | 
 | 435 | 	} else { | 
 | 436 | 		csr = dma_read32(DMA_CSR); | 
 | 437 | 		csr |= DMA_ENABLE; | 
 | 438 | 		if (write) | 
 | 439 | 			csr |= DMA_ST_WRITE; | 
 | 440 | 		else | 
 | 441 | 			csr &= ~DMA_ST_WRITE; | 
 | 442 | 		dma_write32(csr, DMA_CSR); | 
 | 443 | 		if (esp->dma->revision == dvmaesc1) { | 
 | 444 | 			u32 end = PAGE_ALIGN(addr + dma_count + 16U); | 
 | 445 | 			dma_write32(end - addr, DMA_COUNT); | 
 | 446 | 		} | 
 | 447 | 		dma_write32(addr, DMA_ADDR); | 
 | 448 |  | 
 | 449 | 		scsi_esp_cmd(esp, cmd); | 
 | 450 | 	} | 
 | 451 |  | 
 | 452 | } | 
 | 453 |  | 
 | 454 | static int sbus_esp_dma_error(struct esp *esp) | 
 | 455 | { | 
 | 456 | 	u32 csr = dma_read32(DMA_CSR); | 
 | 457 |  | 
 | 458 | 	if (csr & DMA_HNDL_ERROR) | 
 | 459 | 		return 1; | 
 | 460 |  | 
 | 461 | 	return 0; | 
 | 462 | } | 
 | 463 |  | 
 | 464 | static const struct esp_driver_ops sbus_esp_ops = { | 
 | 465 | 	.esp_write8	=	sbus_esp_write8, | 
 | 466 | 	.esp_read8	=	sbus_esp_read8, | 
 | 467 | 	.map_single	=	sbus_esp_map_single, | 
 | 468 | 	.map_sg		=	sbus_esp_map_sg, | 
 | 469 | 	.unmap_single	=	sbus_esp_unmap_single, | 
 | 470 | 	.unmap_sg	=	sbus_esp_unmap_sg, | 
 | 471 | 	.irq_pending	=	sbus_esp_irq_pending, | 
 | 472 | 	.reset_dma	=	sbus_esp_reset_dma, | 
 | 473 | 	.dma_drain	=	sbus_esp_dma_drain, | 
 | 474 | 	.dma_invalidate	=	sbus_esp_dma_invalidate, | 
 | 475 | 	.send_dma_cmd	=	sbus_esp_send_dma_cmd, | 
 | 476 | 	.dma_error	=	sbus_esp_dma_error, | 
 | 477 | }; | 
 | 478 |  | 
 | 479 | static int __devinit esp_sbus_probe_one(struct device *dev, | 
 | 480 | 					struct sbus_dev *esp_dev, | 
 | 481 | 					struct sbus_dev *espdma, | 
 | 482 | 					struct sbus_bus *sbus, | 
 | 483 | 					int hme) | 
 | 484 | { | 
 | 485 | 	struct scsi_host_template *tpnt = &scsi_esp_template; | 
 | 486 | 	struct Scsi_Host *host; | 
 | 487 | 	struct esp *esp; | 
 | 488 | 	int err; | 
 | 489 |  | 
 | 490 | 	host = scsi_host_alloc(tpnt, sizeof(struct esp)); | 
 | 491 |  | 
 | 492 | 	err = -ENOMEM; | 
 | 493 | 	if (!host) | 
 | 494 | 		goto fail; | 
 | 495 |  | 
 | 496 | 	host->max_id = (hme ? 16 : 8); | 
| Christoph Hellwig | 2b14ec7 | 2007-05-31 20:12:32 +0200 | [diff] [blame] | 497 | 	esp = shost_priv(host); | 
| David S. Miller | cd9ad58 | 2007-04-26 21:19:23 -0700 | [diff] [blame] | 498 |  | 
 | 499 | 	esp->host = host; | 
 | 500 | 	esp->dev = esp_dev; | 
 | 501 | 	esp->ops = &sbus_esp_ops; | 
 | 502 |  | 
 | 503 | 	if (hme) | 
 | 504 | 		esp->flags |= ESP_FLAG_WIDE_CAPABLE; | 
 | 505 |  | 
 | 506 | 	err = esp_sbus_find_dma(esp, espdma); | 
 | 507 | 	if (err < 0) | 
 | 508 | 		goto fail_unlink; | 
 | 509 |  | 
 | 510 | 	err = esp_sbus_map_regs(esp, hme); | 
 | 511 | 	if (err < 0) | 
 | 512 | 		goto fail_unlink; | 
 | 513 |  | 
 | 514 | 	err = esp_sbus_map_command_block(esp); | 
 | 515 | 	if (err < 0) | 
 | 516 | 		goto fail_unmap_regs; | 
 | 517 |  | 
 | 518 | 	err = esp_sbus_register_irq(esp); | 
 | 519 | 	if (err < 0) | 
 | 520 | 		goto fail_unmap_command_block; | 
 | 521 |  | 
 | 522 | 	esp_sbus_get_props(esp, espdma); | 
 | 523 |  | 
 | 524 | 	/* Before we try to touch the ESP chip, ESC1 dma can | 
 | 525 | 	 * come up with the reset bit set, so make sure that | 
 | 526 | 	 * is clear first. | 
 | 527 | 	 */ | 
 | 528 | 	if (esp->dma->revision == dvmaesc1) { | 
 | 529 | 		u32 val = dma_read32(DMA_CSR); | 
 | 530 |  | 
 | 531 | 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | 
 | 532 | 	} | 
 | 533 |  | 
 | 534 | 	dev_set_drvdata(&esp_dev->ofdev.dev, esp); | 
 | 535 |  | 
 | 536 | 	err = scsi_esp_register(esp, dev); | 
 | 537 | 	if (err) | 
 | 538 | 		goto fail_free_irq; | 
 | 539 |  | 
 | 540 | 	return 0; | 
 | 541 |  | 
 | 542 | fail_free_irq: | 
 | 543 | 	free_irq(host->irq, esp); | 
 | 544 | fail_unmap_command_block: | 
 | 545 | 	sbus_free_consistent(esp->dev, 16, | 
 | 546 | 			     esp->command_block, | 
 | 547 | 			     esp->command_block_dma); | 
 | 548 | fail_unmap_regs: | 
 | 549 | 	sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | 
 | 550 | fail_unlink: | 
 | 551 | 	scsi_host_put(host); | 
 | 552 | fail: | 
 | 553 | 	return err; | 
 | 554 | } | 
 | 555 |  | 
 | 556 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | 
 | 557 | { | 
 | 558 | 	struct sbus_dev *sdev = to_sbus_device(&dev->dev); | 
 | 559 | 	struct device_node *dp = dev->node; | 
 | 560 | 	struct sbus_dev *dma_sdev = NULL; | 
 | 561 | 	int hme = 0; | 
 | 562 |  | 
 | 563 | 	if (dp->parent && | 
 | 564 | 	    (!strcmp(dp->parent->name, "espdma") || | 
 | 565 | 	     !strcmp(dp->parent->name, "dma"))) | 
 | 566 | 		dma_sdev = sdev->parent; | 
 | 567 | 	else if (!strcmp(dp->name, "SUNW,fas")) { | 
 | 568 | 		dma_sdev = sdev; | 
 | 569 | 		hme = 1; | 
 | 570 | 	} | 
 | 571 |  | 
 | 572 | 	return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, | 
 | 573 | 				  sdev->bus, hme); | 
 | 574 | } | 
 | 575 |  | 
 | 576 | static int __devexit esp_sbus_remove(struct of_device *dev) | 
 | 577 | { | 
 | 578 | 	struct esp *esp = dev_get_drvdata(&dev->dev); | 
 | 579 | 	unsigned int irq = esp->host->irq; | 
 | 580 | 	u32 val; | 
 | 581 |  | 
 | 582 | 	scsi_esp_unregister(esp); | 
 | 583 |  | 
 | 584 | 	/* Disable interrupts.  */ | 
 | 585 | 	val = dma_read32(DMA_CSR); | 
 | 586 | 	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); | 
 | 587 |  | 
 | 588 | 	free_irq(irq, esp); | 
 | 589 | 	sbus_free_consistent(esp->dev, 16, | 
 | 590 | 			     esp->command_block, | 
 | 591 | 			     esp->command_block_dma); | 
 | 592 | 	sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | 
 | 593 |  | 
 | 594 | 	scsi_host_put(esp->host); | 
 | 595 |  | 
 | 596 | 	return 0; | 
 | 597 | } | 
 | 598 |  | 
 | 599 | static struct of_device_id esp_match[] = { | 
 | 600 | 	{ | 
 | 601 | 		.name = "SUNW,esp", | 
 | 602 | 	}, | 
 | 603 | 	{ | 
 | 604 | 		.name = "SUNW,fas", | 
 | 605 | 	}, | 
 | 606 | 	{ | 
 | 607 | 		.name = "esp", | 
 | 608 | 	}, | 
 | 609 | 	{}, | 
 | 610 | }; | 
 | 611 | MODULE_DEVICE_TABLE(of, esp_match); | 
 | 612 |  | 
 | 613 | static struct of_platform_driver esp_sbus_driver = { | 
 | 614 | 	.name		= "esp", | 
 | 615 | 	.match_table	= esp_match, | 
 | 616 | 	.probe		= esp_sbus_probe, | 
 | 617 | 	.remove		= __devexit_p(esp_sbus_remove), | 
 | 618 | }; | 
 | 619 |  | 
 | 620 | static int __init sunesp_init(void) | 
 | 621 | { | 
 | 622 | 	return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | 
 | 623 | } | 
 | 624 |  | 
 | 625 | static void __exit sunesp_exit(void) | 
 | 626 | { | 
 | 627 | 	of_unregister_driver(&esp_sbus_driver); | 
 | 628 | } | 
 | 629 |  | 
 | 630 | MODULE_DESCRIPTION("Sun ESP SCSI driver"); | 
 | 631 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | 
 | 632 | MODULE_LICENSE("GPL"); | 
 | 633 | MODULE_VERSION(DRV_VERSION); | 
 | 634 |  | 
 | 635 | module_init(sunesp_init); | 
 | 636 | module_exit(sunesp_exit); |