| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 1 | /* | 
 | 2 |  * sata_inic162x.c - Driver for Initio 162x SATA controllers | 
 | 3 |  * | 
 | 4 |  * Copyright 2006  SUSE Linux Products GmbH | 
 | 5 |  * Copyright 2006  Tejun Heo <teheo@novell.com> | 
 | 6 |  * | 
 | 7 |  * This file is released under GPL v2. | 
 | 8 |  * | 
 | 9 |  * This controller is eccentric and easily locks up if something isn't | 
 | 10 |  * right.  Documentation is available at initio's website but it only | 
 | 11 |  * documents registers (not programming model). | 
 | 12 |  * | 
 | 13 |  * - ATA disks work. | 
 | 14 |  * - Hotplug works. | 
 | 15 |  * - ATAPI read works but burning doesn't.  This thing is really | 
 | 16 |  *   peculiar about ATAPI and I couldn't figure out how ATAPI PIO and | 
 | 17 |  *   ATAPI DMA WRITE should be programmed.  If you've got a clue, be | 
 | 18 |  *   my guest. | 
 | 19 |  * - Both STR and STD work. | 
 | 20 |  */ | 
 | 21 |  | 
 | 22 | #include <linux/kernel.h> | 
 | 23 | #include <linux/module.h> | 
 | 24 | #include <linux/pci.h> | 
 | 25 | #include <scsi/scsi_host.h> | 
 | 26 | #include <linux/libata.h> | 
 | 27 | #include <linux/blkdev.h> | 
 | 28 | #include <scsi/scsi_device.h> | 
 | 29 |  | 
 | 30 | #define DRV_NAME	"sata_inic162x" | 
| Jeff Garzik | 2a3103c | 2007-08-31 04:54:06 -0400 | [diff] [blame] | 31 | #define DRV_VERSION	"0.3" | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 32 |  | 
 | 33 | enum { | 
 | 34 | 	MMIO_BAR		= 5, | 
 | 35 |  | 
 | 36 | 	NR_PORTS		= 2, | 
 | 37 |  | 
 | 38 | 	HOST_CTL		= 0x7c, | 
 | 39 | 	HOST_STAT		= 0x7e, | 
 | 40 | 	HOST_IRQ_STAT		= 0xbc, | 
 | 41 | 	HOST_IRQ_MASK		= 0xbe, | 
 | 42 |  | 
 | 43 | 	PORT_SIZE		= 0x40, | 
 | 44 |  | 
 | 45 | 	/* registers for ATA TF operation */ | 
 | 46 | 	PORT_TF			= 0x00, | 
 | 47 | 	PORT_ALT_STAT		= 0x08, | 
 | 48 | 	PORT_IRQ_STAT		= 0x09, | 
 | 49 | 	PORT_IRQ_MASK		= 0x0a, | 
 | 50 | 	PORT_PRD_CTL		= 0x0b, | 
 | 51 | 	PORT_PRD_ADDR		= 0x0c, | 
 | 52 | 	PORT_PRD_XFERLEN	= 0x10, | 
 | 53 |  | 
 | 54 | 	/* IDMA register */ | 
 | 55 | 	PORT_IDMA_CTL		= 0x14, | 
 | 56 |  | 
 | 57 | 	PORT_SCR		= 0x20, | 
 | 58 |  | 
 | 59 | 	/* HOST_CTL bits */ | 
 | 60 | 	HCTL_IRQOFF		= (1 << 8),  /* global IRQ off */ | 
 | 61 | 	HCTL_PWRDWN		= (1 << 13), /* power down PHYs */ | 
 | 62 | 	HCTL_SOFTRST		= (1 << 13), /* global reset (no phy reset) */ | 
 | 63 | 	HCTL_RPGSEL		= (1 << 15), /* register page select */ | 
 | 64 |  | 
 | 65 | 	HCTL_KNOWN_BITS		= HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | | 
 | 66 | 				  HCTL_RPGSEL, | 
 | 67 |  | 
 | 68 | 	/* HOST_IRQ_(STAT|MASK) bits */ | 
 | 69 | 	HIRQ_PORT0		= (1 << 0), | 
 | 70 | 	HIRQ_PORT1		= (1 << 1), | 
 | 71 | 	HIRQ_SOFT		= (1 << 14), | 
 | 72 | 	HIRQ_GLOBAL		= (1 << 15), /* STAT only */ | 
 | 73 |  | 
 | 74 | 	/* PORT_IRQ_(STAT|MASK) bits */ | 
 | 75 | 	PIRQ_OFFLINE		= (1 << 0),  /* device unplugged */ | 
 | 76 | 	PIRQ_ONLINE		= (1 << 1),  /* device plugged */ | 
 | 77 | 	PIRQ_COMPLETE		= (1 << 2),  /* completion interrupt */ | 
 | 78 | 	PIRQ_FATAL		= (1 << 3),  /* fatal error */ | 
 | 79 | 	PIRQ_ATA		= (1 << 4),  /* ATA interrupt */ | 
 | 80 | 	PIRQ_REPLY		= (1 << 5),  /* reply FIFO not empty */ | 
 | 81 | 	PIRQ_PENDING		= (1 << 7),  /* port IRQ pending (STAT only) */ | 
 | 82 |  | 
 | 83 | 	PIRQ_ERR		= PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, | 
 | 84 |  | 
 | 85 | 	PIRQ_MASK_DMA_READ	= PIRQ_REPLY | PIRQ_ATA, | 
 | 86 | 	PIRQ_MASK_OTHER		= PIRQ_REPLY | PIRQ_COMPLETE, | 
 | 87 | 	PIRQ_MASK_FREEZE	= 0xff, | 
 | 88 |  | 
 | 89 | 	/* PORT_PRD_CTL bits */ | 
 | 90 | 	PRD_CTL_START		= (1 << 0), | 
 | 91 | 	PRD_CTL_WR		= (1 << 3), | 
 | 92 | 	PRD_CTL_DMAEN		= (1 << 7),  /* DMA enable */ | 
 | 93 |  | 
 | 94 | 	/* PORT_IDMA_CTL bits */ | 
 | 95 | 	IDMA_CTL_RST_ATA	= (1 << 2),  /* hardreset ATA bus */ | 
 | 96 | 	IDMA_CTL_RST_IDMA	= (1 << 5),  /* reset IDMA machinary */ | 
 | 97 | 	IDMA_CTL_GO		= (1 << 7),  /* IDMA mode go */ | 
 | 98 | 	IDMA_CTL_ATA_NIEN	= (1 << 8),  /* ATA IRQ disable */ | 
 | 99 | }; | 
 | 100 |  | 
 | 101 | struct inic_host_priv { | 
 | 102 | 	u16	cached_hctl; | 
 | 103 | }; | 
 | 104 |  | 
 | 105 | struct inic_port_priv { | 
 | 106 | 	u8	dfl_prdctl; | 
 | 107 | 	u8	cached_prdctl; | 
 | 108 | 	u8	cached_pirq_mask; | 
 | 109 | }; | 
 | 110 |  | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 111 | static struct scsi_host_template inic_sht = { | 
 | 112 | 	.module			= THIS_MODULE, | 
 | 113 | 	.name			= DRV_NAME, | 
 | 114 | 	.ioctl			= ata_scsi_ioctl, | 
 | 115 | 	.queuecommand		= ata_scsi_queuecmd, | 
 | 116 | 	.can_queue		= ATA_DEF_QUEUE, | 
 | 117 | 	.this_id		= ATA_SHT_THIS_ID, | 
 | 118 | 	.sg_tablesize		= LIBATA_MAX_PRD, | 
 | 119 | 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN, | 
 | 120 | 	.emulated		= ATA_SHT_EMULATED, | 
 | 121 | 	.use_clustering		= ATA_SHT_USE_CLUSTERING, | 
 | 122 | 	.proc_name		= DRV_NAME, | 
 | 123 | 	.dma_boundary		= ATA_DMA_BOUNDARY, | 
| FUJITA Tomonori | b7d8629 | 2008-02-04 22:28:05 -0800 | [diff] [blame] | 124 | 	.slave_configure	= ata_scsi_slave_config, | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 125 | 	.slave_destroy		= ata_scsi_slave_destroy, | 
 | 126 | 	.bios_param		= ata_std_bios_param, | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 127 | }; | 
 | 128 |  | 
 | 129 | static const int scr_map[] = { | 
 | 130 | 	[SCR_STATUS]	= 0, | 
 | 131 | 	[SCR_ERROR]	= 1, | 
 | 132 | 	[SCR_CONTROL]	= 2, | 
 | 133 | }; | 
 | 134 |  | 
| Jeff Garzik | 5796d1c | 2007-10-26 00:03:37 -0400 | [diff] [blame] | 135 | static void __iomem *inic_port_base(struct ata_port *ap) | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 136 | { | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 137 | 	return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 138 | } | 
 | 139 |  | 
 | 140 | static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) | 
 | 141 | { | 
 | 142 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 143 | 	struct inic_port_priv *pp = ap->private_data; | 
 | 144 |  | 
 | 145 | 	writeb(mask, port_base + PORT_IRQ_MASK); | 
 | 146 | 	pp->cached_pirq_mask = mask; | 
 | 147 | } | 
 | 148 |  | 
 | 149 | static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) | 
 | 150 | { | 
 | 151 | 	struct inic_port_priv *pp = ap->private_data; | 
 | 152 |  | 
 | 153 | 	if (pp->cached_pirq_mask != mask) | 
 | 154 | 		__inic_set_pirq_mask(ap, mask); | 
 | 155 | } | 
 | 156 |  | 
 | 157 | static void inic_reset_port(void __iomem *port_base) | 
 | 158 | { | 
 | 159 | 	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 
 | 160 | 	u16 ctl; | 
 | 161 |  | 
 | 162 | 	ctl = readw(idma_ctl); | 
 | 163 | 	ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); | 
 | 164 |  | 
 | 165 | 	/* mask IRQ and assert reset */ | 
 | 166 | 	writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); | 
 | 167 | 	readw(idma_ctl); /* flush */ | 
 | 168 |  | 
 | 169 | 	/* give it some time */ | 
 | 170 | 	msleep(1); | 
 | 171 |  | 
 | 172 | 	/* release reset */ | 
 | 173 | 	writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); | 
 | 174 |  | 
 | 175 | 	/* clear irq */ | 
 | 176 | 	writeb(0xff, port_base + PORT_IRQ_STAT); | 
 | 177 |  | 
 | 178 | 	/* reenable ATA IRQ, turn off IDMA mode */ | 
 | 179 | 	writew(ctl, idma_ctl); | 
 | 180 | } | 
 | 181 |  | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 182 | static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 183 | { | 
| Jeff Garzik | 59f9988 | 2007-05-28 07:07:20 -0400 | [diff] [blame] | 184 | 	void __iomem *scr_addr = ap->ioaddr.scr_addr; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 185 | 	void __iomem *addr; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 186 |  | 
 | 187 | 	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 188 | 		return -EINVAL; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 189 |  | 
 | 190 | 	addr = scr_addr + scr_map[sc_reg] * 4; | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 191 | 	*val = readl(scr_addr + scr_map[sc_reg] * 4); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 192 |  | 
 | 193 | 	/* this controller has stuck DIAG.N, ignore it */ | 
 | 194 | 	if (sc_reg == SCR_ERROR) | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 195 | 		*val &= ~SERR_PHYRDY_CHG; | 
 | 196 | 	return 0; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 197 | } | 
 | 198 |  | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 199 | static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 200 | { | 
| Jeff Garzik | 59f9988 | 2007-05-28 07:07:20 -0400 | [diff] [blame] | 201 | 	void __iomem *scr_addr = ap->ioaddr.scr_addr; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 202 | 	void __iomem *addr; | 
 | 203 |  | 
 | 204 | 	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 205 | 		return -EINVAL; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 206 |  | 
 | 207 | 	addr = scr_addr + scr_map[sc_reg] * 4; | 
 | 208 | 	writel(val, scr_addr + scr_map[sc_reg] * 4); | 
| Tejun Heo | da3dbb1 | 2007-07-16 14:29:40 +0900 | [diff] [blame] | 209 | 	return 0; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 210 | } | 
 | 211 |  | 
 | 212 | /* | 
 | 213 |  * In TF mode, inic162x is very similar to SFF device.  TF registers | 
 | 214 |  * function the same.  DMA engine behaves similary using the same PRD | 
 | 215 |  * format as BMDMA but different command register, interrupt and event | 
 | 216 |  * notification methods are used.  The following inic_bmdma_*() | 
 | 217 |  * functions do the impedance matching. | 
 | 218 |  */ | 
 | 219 | static void inic_bmdma_setup(struct ata_queued_cmd *qc) | 
 | 220 | { | 
 | 221 | 	struct ata_port *ap = qc->ap; | 
 | 222 | 	struct inic_port_priv *pp = ap->private_data; | 
 | 223 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 224 | 	int rw = qc->tf.flags & ATA_TFLAG_WRITE; | 
 | 225 |  | 
 | 226 | 	/* make sure device sees PRD table writes */ | 
 | 227 | 	wmb(); | 
 | 228 |  | 
 | 229 | 	/* load transfer length */ | 
 | 230 | 	writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); | 
 | 231 |  | 
 | 232 | 	/* turn on DMA and specify data direction */ | 
 | 233 | 	pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; | 
 | 234 | 	if (!rw) | 
 | 235 | 		pp->cached_prdctl |= PRD_CTL_WR; | 
 | 236 | 	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | 
 | 237 |  | 
 | 238 | 	/* issue r/w command */ | 
 | 239 | 	ap->ops->exec_command(ap, &qc->tf); | 
 | 240 | } | 
 | 241 |  | 
 | 242 | static void inic_bmdma_start(struct ata_queued_cmd *qc) | 
 | 243 | { | 
 | 244 | 	struct ata_port *ap = qc->ap; | 
 | 245 | 	struct inic_port_priv *pp = ap->private_data; | 
 | 246 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 247 |  | 
 | 248 | 	/* start host DMA transaction */ | 
 | 249 | 	pp->cached_prdctl |= PRD_CTL_START; | 
 | 250 | 	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | 
 | 251 | } | 
 | 252 |  | 
 | 253 | static void inic_bmdma_stop(struct ata_queued_cmd *qc) | 
 | 254 | { | 
 | 255 | 	struct ata_port *ap = qc->ap; | 
 | 256 | 	struct inic_port_priv *pp = ap->private_data; | 
 | 257 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 258 |  | 
 | 259 | 	/* stop DMA engine */ | 
 | 260 | 	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | 
 | 261 | } | 
 | 262 |  | 
 | 263 | static u8 inic_bmdma_status(struct ata_port *ap) | 
 | 264 | { | 
 | 265 | 	/* event is already verified by the interrupt handler */ | 
 | 266 | 	return ATA_DMA_INTR; | 
 | 267 | } | 
 | 268 |  | 
 | 269 | static void inic_irq_clear(struct ata_port *ap) | 
 | 270 | { | 
 | 271 | 	/* noop */ | 
 | 272 | } | 
 | 273 |  | 
 | 274 | static void inic_host_intr(struct ata_port *ap) | 
 | 275 | { | 
 | 276 | 	void __iomem *port_base = inic_port_base(ap); | 
| Tejun Heo | 9af5c9c | 2007-08-06 18:36:22 +0900 | [diff] [blame] | 277 | 	struct ata_eh_info *ehi = &ap->link.eh_info; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 278 | 	u8 irq_stat; | 
 | 279 |  | 
 | 280 | 	/* fetch and clear irq */ | 
 | 281 | 	irq_stat = readb(port_base + PORT_IRQ_STAT); | 
 | 282 | 	writeb(irq_stat, port_base + PORT_IRQ_STAT); | 
 | 283 |  | 
 | 284 | 	if (likely(!(irq_stat & PIRQ_ERR))) { | 
| Tejun Heo | 9af5c9c | 2007-08-06 18:36:22 +0900 | [diff] [blame] | 285 | 		struct ata_queued_cmd *qc = | 
 | 286 | 			ata_qc_from_tag(ap, ap->link.active_tag); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 287 |  | 
 | 288 | 		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 
 | 289 | 			ata_chk_status(ap);	/* clear ATA interrupt */ | 
 | 290 | 			return; | 
 | 291 | 		} | 
 | 292 |  | 
 | 293 | 		if (likely(ata_host_intr(ap, qc))) | 
 | 294 | 			return; | 
 | 295 |  | 
 | 296 | 		ata_chk_status(ap);	/* clear ATA interrupt */ | 
 | 297 | 		ata_port_printk(ap, KERN_WARNING, "unhandled " | 
 | 298 | 				"interrupt, irq_stat=%x\n", irq_stat); | 
 | 299 | 		return; | 
 | 300 | 	} | 
 | 301 |  | 
 | 302 | 	/* error */ | 
 | 303 | 	ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); | 
 | 304 |  | 
 | 305 | 	if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { | 
 | 306 | 		ata_ehi_hotplugged(ehi); | 
 | 307 | 		ata_port_freeze(ap); | 
 | 308 | 	} else | 
 | 309 | 		ata_port_abort(ap); | 
 | 310 | } | 
 | 311 |  | 
 | 312 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) | 
 | 313 | { | 
 | 314 | 	struct ata_host *host = dev_instance; | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 315 | 	void __iomem *mmio_base = host->iomap[MMIO_BAR]; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 316 | 	u16 host_irq_stat; | 
 | 317 | 	int i, handled = 0;; | 
 | 318 |  | 
 | 319 | 	host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); | 
 | 320 |  | 
 | 321 | 	if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) | 
 | 322 | 		goto out; | 
 | 323 |  | 
 | 324 | 	spin_lock(&host->lock); | 
 | 325 |  | 
 | 326 | 	for (i = 0; i < NR_PORTS; i++) { | 
 | 327 | 		struct ata_port *ap = host->ports[i]; | 
 | 328 |  | 
 | 329 | 		if (!(host_irq_stat & (HIRQ_PORT0 << i))) | 
 | 330 | 			continue; | 
 | 331 |  | 
 | 332 | 		if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { | 
 | 333 | 			inic_host_intr(ap); | 
 | 334 | 			handled++; | 
 | 335 | 		} else { | 
 | 336 | 			if (ata_ratelimit()) | 
 | 337 | 				dev_printk(KERN_ERR, host->dev, "interrupt " | 
 | 338 | 					   "from disabled port %d (0x%x)\n", | 
 | 339 | 					   i, host_irq_stat); | 
 | 340 | 		} | 
 | 341 | 	} | 
 | 342 |  | 
 | 343 | 	spin_unlock(&host->lock); | 
 | 344 |  | 
 | 345 |  out: | 
 | 346 | 	return IRQ_RETVAL(handled); | 
 | 347 | } | 
 | 348 |  | 
 | 349 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | 
 | 350 | { | 
 | 351 | 	struct ata_port *ap = qc->ap; | 
 | 352 |  | 
 | 353 | 	/* ATA IRQ doesn't wait for DMA transfer completion and vice | 
 | 354 | 	 * versa.  Mask IRQ selectively to detect command completion. | 
 | 355 | 	 * Without it, ATA DMA read command can cause data corruption. | 
 | 356 | 	 * | 
 | 357 | 	 * Something similar might be needed for ATAPI writes.  I | 
 | 358 | 	 * tried a lot of combinations but couldn't find the solution. | 
 | 359 | 	 */ | 
 | 360 | 	if (qc->tf.protocol == ATA_PROT_DMA && | 
 | 361 | 	    !(qc->tf.flags & ATA_TFLAG_WRITE)) | 
 | 362 | 		inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); | 
 | 363 | 	else | 
 | 364 | 		inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 
 | 365 |  | 
 | 366 | 	/* Issuing a command to yet uninitialized port locks up the | 
 | 367 | 	 * controller.  Most of the time, this happens for the first | 
 | 368 | 	 * command after reset which are ATA and ATAPI IDENTIFYs. | 
 | 369 | 	 * Fast fail if stat is 0x7f or 0xff for those commands. | 
 | 370 | 	 */ | 
 | 371 | 	if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | 
 | 372 | 		     qc->tf.command == ATA_CMD_ID_ATAPI)) { | 
 | 373 | 		u8 stat = ata_chk_status(ap); | 
 | 374 | 		if (stat == 0x7f || stat == 0xff) | 
 | 375 | 			return AC_ERR_HSM; | 
 | 376 | 	} | 
 | 377 |  | 
 | 378 | 	return ata_qc_issue_prot(qc); | 
 | 379 | } | 
 | 380 |  | 
 | 381 | static void inic_freeze(struct ata_port *ap) | 
 | 382 | { | 
 | 383 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 384 |  | 
 | 385 | 	__inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); | 
 | 386 |  | 
 | 387 | 	ata_chk_status(ap); | 
 | 388 | 	writeb(0xff, port_base + PORT_IRQ_STAT); | 
 | 389 |  | 
 | 390 | 	readb(port_base + PORT_IRQ_STAT); /* flush */ | 
 | 391 | } | 
 | 392 |  | 
 | 393 | static void inic_thaw(struct ata_port *ap) | 
 | 394 | { | 
 | 395 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 396 |  | 
 | 397 | 	ata_chk_status(ap); | 
 | 398 | 	writeb(0xff, port_base + PORT_IRQ_STAT); | 
 | 399 |  | 
 | 400 | 	__inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 
 | 401 |  | 
 | 402 | 	readb(port_base + PORT_IRQ_STAT); /* flush */ | 
 | 403 | } | 
 | 404 |  | 
 | 405 | /* | 
 | 406 |  * SRST and SControl hardreset don't give valid signature on this | 
 | 407 |  * controller.  Only controller specific hardreset mechanism works. | 
 | 408 |  */ | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 409 | static int inic_hardreset(struct ata_link *link, unsigned int *class, | 
| Tejun Heo | d4b2bab | 2007-02-02 16:50:52 +0900 | [diff] [blame] | 410 | 			  unsigned long deadline) | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 411 | { | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 412 | 	struct ata_port *ap = link->ap; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 413 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 414 | 	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 415 | 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 416 | 	u16 val; | 
 | 417 | 	int rc; | 
 | 418 |  | 
 | 419 | 	/* hammer it into sane state */ | 
 | 420 | 	inic_reset_port(port_base); | 
 | 421 |  | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 422 | 	val = readw(idma_ctl); | 
 | 423 | 	writew(val | IDMA_CTL_RST_ATA, idma_ctl); | 
 | 424 | 	readw(idma_ctl);	/* flush */ | 
 | 425 | 	msleep(1); | 
 | 426 | 	writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | 
 | 427 |  | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 428 | 	rc = sata_link_resume(link, timing, deadline); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 429 | 	if (rc) { | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 430 | 		ata_link_printk(link, KERN_WARNING, "failed to resume " | 
| Tejun Heo | fe33460 | 2007-02-02 15:29:52 +0900 | [diff] [blame] | 431 | 				"link after reset (errno=%d)\n", rc); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 432 | 		return rc; | 
 | 433 | 	} | 
 | 434 |  | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 435 | 	*class = ATA_DEV_NONE; | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 436 | 	if (ata_link_online(link)) { | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 437 | 		struct ata_taskfile tf; | 
 | 438 |  | 
| Tejun Heo | fe33460 | 2007-02-02 15:29:52 +0900 | [diff] [blame] | 439 | 		/* wait a while before checking status */ | 
| Tejun Heo | 88ff6ea | 2007-10-16 14:21:24 -0700 | [diff] [blame] | 440 | 		ata_wait_after_reset(ap, deadline); | 
| Tejun Heo | fe33460 | 2007-02-02 15:29:52 +0900 | [diff] [blame] | 441 |  | 
| Tejun Heo | d4b2bab | 2007-02-02 16:50:52 +0900 | [diff] [blame] | 442 | 		rc = ata_wait_ready(ap, deadline); | 
| Tejun Heo | 9b89391 | 2007-02-02 16:50:52 +0900 | [diff] [blame] | 443 | 		/* link occupied, -ENODEV too is an error */ | 
 | 444 | 		if (rc) { | 
| Tejun Heo | cc0680a | 2007-08-06 18:36:23 +0900 | [diff] [blame] | 445 | 			ata_link_printk(link, KERN_WARNING, "device not ready " | 
| Tejun Heo | d4b2bab | 2007-02-02 16:50:52 +0900 | [diff] [blame] | 446 | 					"after hardreset (errno=%d)\n", rc); | 
 | 447 | 			return rc; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 448 | 		} | 
 | 449 |  | 
 | 450 | 		ata_tf_read(ap, &tf); | 
 | 451 | 		*class = ata_dev_classify(&tf); | 
 | 452 | 		if (*class == ATA_DEV_UNKNOWN) | 
 | 453 | 			*class = ATA_DEV_NONE; | 
 | 454 | 	} | 
 | 455 |  | 
 | 456 | 	return 0; | 
 | 457 | } | 
 | 458 |  | 
 | 459 | static void inic_error_handler(struct ata_port *ap) | 
 | 460 | { | 
 | 461 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 462 | 	struct inic_port_priv *pp = ap->private_data; | 
 | 463 | 	unsigned long flags; | 
 | 464 |  | 
 | 465 | 	/* reset PIO HSM and stop DMA engine */ | 
 | 466 | 	inic_reset_port(port_base); | 
 | 467 |  | 
 | 468 | 	spin_lock_irqsave(ap->lock, flags); | 
 | 469 | 	ap->hsm_task_state = HSM_ST_IDLE; | 
 | 470 | 	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | 
 | 471 | 	spin_unlock_irqrestore(ap->lock, flags); | 
 | 472 |  | 
 | 473 | 	/* PIO and DMA engines have been stopped, perform recovery */ | 
 | 474 | 	ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset, | 
 | 475 | 		  ata_std_postreset); | 
 | 476 | } | 
 | 477 |  | 
 | 478 | static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | 
 | 479 | { | 
 | 480 | 	/* make DMA engine forget about the failed command */ | 
| Tejun Heo | a51d644 | 2007-03-20 15:24:11 +0900 | [diff] [blame] | 481 | 	if (qc->flags & ATA_QCFLAG_FAILED) | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 482 | 		inic_reset_port(inic_port_base(qc->ap)); | 
 | 483 | } | 
 | 484 |  | 
| Alan | cd0d3bb | 2007-03-02 00:56:15 +0000 | [diff] [blame] | 485 | static void inic_dev_config(struct ata_device *dev) | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 486 | { | 
 | 487 | 	/* inic can only handle upto LBA28 max sectors */ | 
 | 488 | 	if (dev->max_sectors > ATA_MAX_SECTORS) | 
 | 489 | 		dev->max_sectors = ATA_MAX_SECTORS; | 
| Tejun Heo | 90c9378 | 2007-06-29 11:33:08 +0900 | [diff] [blame] | 490 |  | 
 | 491 | 	if (dev->n_sectors >= 1 << 28) { | 
 | 492 | 		ata_dev_printk(dev, KERN_ERR, | 
 | 493 | 	"ERROR: This driver doesn't support LBA48 yet and may cause\n" | 
 | 494 | 	"                data corruption on such devices.  Disabling.\n"); | 
 | 495 | 		ata_dev_disable(dev); | 
 | 496 | 	} | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 497 | } | 
 | 498 |  | 
 | 499 | static void init_port(struct ata_port *ap) | 
 | 500 | { | 
 | 501 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 502 |  | 
 | 503 | 	/* Setup PRD address */ | 
 | 504 | 	writel(ap->prd_dma, port_base + PORT_PRD_ADDR); | 
 | 505 | } | 
 | 506 |  | 
 | 507 | static int inic_port_resume(struct ata_port *ap) | 
 | 508 | { | 
 | 509 | 	init_port(ap); | 
 | 510 | 	return 0; | 
 | 511 | } | 
 | 512 |  | 
 | 513 | static int inic_port_start(struct ata_port *ap) | 
 | 514 | { | 
 | 515 | 	void __iomem *port_base = inic_port_base(ap); | 
 | 516 | 	struct inic_port_priv *pp; | 
 | 517 | 	u8 tmp; | 
 | 518 | 	int rc; | 
 | 519 |  | 
 | 520 | 	/* alloc and initialize private data */ | 
| Tejun Heo | 24dc5f3 | 2007-01-20 16:00:28 +0900 | [diff] [blame] | 521 | 	pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 522 | 	if (!pp) | 
 | 523 | 		return -ENOMEM; | 
 | 524 | 	ap->private_data = pp; | 
 | 525 |  | 
 | 526 | 	/* default PRD_CTL value, DMAEN, WR and START off */ | 
 | 527 | 	tmp = readb(port_base + PORT_PRD_CTL); | 
 | 528 | 	tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); | 
 | 529 | 	pp->dfl_prdctl = tmp; | 
 | 530 |  | 
 | 531 | 	/* Alloc resources */ | 
 | 532 | 	rc = ata_port_start(ap); | 
 | 533 | 	if (rc) { | 
 | 534 | 		kfree(pp); | 
 | 535 | 		return rc; | 
 | 536 | 	} | 
 | 537 |  | 
 | 538 | 	init_port(ap); | 
 | 539 |  | 
 | 540 | 	return 0; | 
 | 541 | } | 
 | 542 |  | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 543 | static struct ata_port_operations inic_port_ops = { | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 544 | 	.tf_load		= ata_tf_load, | 
 | 545 | 	.tf_read		= ata_tf_read, | 
 | 546 | 	.check_status		= ata_check_status, | 
 | 547 | 	.exec_command		= ata_exec_command, | 
 | 548 | 	.dev_select		= ata_std_dev_select, | 
 | 549 |  | 
 | 550 | 	.scr_read		= inic_scr_read, | 
 | 551 | 	.scr_write		= inic_scr_write, | 
 | 552 |  | 
 | 553 | 	.bmdma_setup		= inic_bmdma_setup, | 
 | 554 | 	.bmdma_start		= inic_bmdma_start, | 
 | 555 | 	.bmdma_stop		= inic_bmdma_stop, | 
 | 556 | 	.bmdma_status		= inic_bmdma_status, | 
 | 557 |  | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 558 | 	.irq_clear		= inic_irq_clear, | 
| Akira Iguchi | 246ce3b | 2007-01-26 16:27:58 +0900 | [diff] [blame] | 559 | 	.irq_on			= ata_irq_on, | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 560 |  | 
 | 561 | 	.qc_prep	 	= ata_qc_prep, | 
 | 562 | 	.qc_issue		= inic_qc_issue, | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 563 | 	.data_xfer		= ata_data_xfer, | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 564 |  | 
 | 565 | 	.freeze			= inic_freeze, | 
 | 566 | 	.thaw			= inic_thaw, | 
 | 567 | 	.error_handler		= inic_error_handler, | 
 | 568 | 	.post_internal_cmd	= inic_post_internal_cmd, | 
 | 569 | 	.dev_config		= inic_dev_config, | 
 | 570 |  | 
 | 571 | 	.port_resume		= inic_port_resume, | 
 | 572 |  | 
 | 573 | 	.port_start		= inic_port_start, | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 574 | }; | 
 | 575 |  | 
 | 576 | static struct ata_port_info inic_port_info = { | 
| Tejun Heo | 0dc3688 | 2007-12-18 16:34:43 -0500 | [diff] [blame] | 577 | 	/* For some reason, ATAPI_PROT_PIO is broken on this | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 578 | 	 * controller, and no, PIO_POLLING does't fix it.  It somehow | 
 | 579 | 	 * manages to report the wrong ireason and ignoring ireason | 
 | 580 | 	 * results in machine lock up.  Tell libata to always prefer | 
 | 581 | 	 * DMA. | 
 | 582 | 	 */ | 
 | 583 | 	.flags			= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 
 | 584 | 	.pio_mask		= 0x1f,	/* pio0-4 */ | 
 | 585 | 	.mwdma_mask		= 0x07, /* mwdma0-2 */ | 
| Jeff Garzik | bf6263a | 2007-07-09 12:16:50 -0400 | [diff] [blame] | 586 | 	.udma_mask		= ATA_UDMA6, | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 587 | 	.port_ops		= &inic_port_ops | 
 | 588 | }; | 
 | 589 |  | 
 | 590 | static int init_controller(void __iomem *mmio_base, u16 hctl) | 
 | 591 | { | 
 | 592 | 	int i; | 
 | 593 | 	u16 val; | 
 | 594 |  | 
 | 595 | 	hctl &= ~HCTL_KNOWN_BITS; | 
 | 596 |  | 
 | 597 | 	/* Soft reset whole controller.  Spec says reset duration is 3 | 
 | 598 | 	 * PCI clocks, be generous and give it 10ms. | 
 | 599 | 	 */ | 
 | 600 | 	writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); | 
 | 601 | 	readw(mmio_base + HOST_CTL); /* flush */ | 
 | 602 |  | 
 | 603 | 	for (i = 0; i < 10; i++) { | 
 | 604 | 		msleep(1); | 
 | 605 | 		val = readw(mmio_base + HOST_CTL); | 
 | 606 | 		if (!(val & HCTL_SOFTRST)) | 
 | 607 | 			break; | 
 | 608 | 	} | 
 | 609 |  | 
 | 610 | 	if (val & HCTL_SOFTRST) | 
 | 611 | 		return -EIO; | 
 | 612 |  | 
 | 613 | 	/* mask all interrupts and reset ports */ | 
 | 614 | 	for (i = 0; i < NR_PORTS; i++) { | 
 | 615 | 		void __iomem *port_base = mmio_base + i * PORT_SIZE; | 
 | 616 |  | 
 | 617 | 		writeb(0xff, port_base + PORT_IRQ_MASK); | 
 | 618 | 		inic_reset_port(port_base); | 
 | 619 | 	} | 
 | 620 |  | 
 | 621 | 	/* port IRQ is masked now, unmask global IRQ */ | 
 | 622 | 	writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); | 
 | 623 | 	val = readw(mmio_base + HOST_IRQ_MASK); | 
 | 624 | 	val &= ~(HIRQ_PORT0 | HIRQ_PORT1); | 
 | 625 | 	writew(val, mmio_base + HOST_IRQ_MASK); | 
 | 626 |  | 
 | 627 | 	return 0; | 
 | 628 | } | 
 | 629 |  | 
| Tejun Heo | 438ac6d | 2007-03-02 17:31:26 +0900 | [diff] [blame] | 630 | #ifdef CONFIG_PM | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 631 | static int inic_pci_device_resume(struct pci_dev *pdev) | 
 | 632 | { | 
 | 633 | 	struct ata_host *host = dev_get_drvdata(&pdev->dev); | 
 | 634 | 	struct inic_host_priv *hpriv = host->private_data; | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 635 | 	void __iomem *mmio_base = host->iomap[MMIO_BAR]; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 636 | 	int rc; | 
 | 637 |  | 
| Dmitriy Monakhov | 5aea408 | 2007-03-06 02:37:54 -0800 | [diff] [blame] | 638 | 	rc = ata_pci_device_do_resume(pdev); | 
 | 639 | 	if (rc) | 
 | 640 | 		return rc; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 641 |  | 
 | 642 | 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 643 | 		rc = init_controller(mmio_base, hpriv->cached_hctl); | 
 | 644 | 		if (rc) | 
 | 645 | 			return rc; | 
 | 646 | 	} | 
 | 647 |  | 
 | 648 | 	ata_host_resume(host); | 
 | 649 |  | 
 | 650 | 	return 0; | 
 | 651 | } | 
| Tejun Heo | 438ac6d | 2007-03-02 17:31:26 +0900 | [diff] [blame] | 652 | #endif | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 653 |  | 
 | 654 | static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 
 | 655 | { | 
 | 656 | 	static int printed_version; | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 657 | 	const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; | 
 | 658 | 	struct ata_host *host; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 659 | 	struct inic_host_priv *hpriv; | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 660 | 	void __iomem * const *iomap; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 661 | 	int i, rc; | 
 | 662 |  | 
 | 663 | 	if (!printed_version++) | 
 | 664 | 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 
 | 665 |  | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 666 | 	/* alloc host */ | 
 | 667 | 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); | 
 | 668 | 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | 
 | 669 | 	if (!host || !hpriv) | 
 | 670 | 		return -ENOMEM; | 
 | 671 |  | 
 | 672 | 	host->private_data = hpriv; | 
 | 673 |  | 
 | 674 | 	/* acquire resources and fill host */ | 
| Tejun Heo | 24dc5f3 | 2007-01-20 16:00:28 +0900 | [diff] [blame] | 675 | 	rc = pcim_enable_device(pdev); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 676 | 	if (rc) | 
 | 677 | 		return rc; | 
 | 678 |  | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 679 | 	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); | 
 | 680 | 	if (rc) | 
 | 681 | 		return rc; | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 682 | 	host->iomap = iomap = pcim_iomap_table(pdev); | 
 | 683 |  | 
 | 684 | 	for (i = 0; i < NR_PORTS; i++) { | 
| Tejun Heo | cbcdd87 | 2007-08-18 13:14:55 +0900 | [diff] [blame] | 685 | 		struct ata_port *ap = host->ports[i]; | 
 | 686 | 		struct ata_ioports *port = &ap->ioaddr; | 
 | 687 | 		unsigned int offset = i * PORT_SIZE; | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 688 |  | 
 | 689 | 		port->cmd_addr = iomap[2 * i]; | 
 | 690 | 		port->altstatus_addr = | 
 | 691 | 		port->ctl_addr = (void __iomem *) | 
 | 692 | 			((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); | 
| Tejun Heo | cbcdd87 | 2007-08-18 13:14:55 +0900 | [diff] [blame] | 693 | 		port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 694 |  | 
 | 695 | 		ata_std_ports(port); | 
| Tejun Heo | cbcdd87 | 2007-08-18 13:14:55 +0900 | [diff] [blame] | 696 |  | 
 | 697 | 		ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); | 
 | 698 | 		ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); | 
 | 699 | 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | 
 | 700 | 		  (unsigned long long)pci_resource_start(pdev, 2 * i), | 
 | 701 | 		  (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | | 
 | 702 | 				      ATA_PCI_CTL_OFS); | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 703 | 	} | 
 | 704 |  | 
 | 705 | 	hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 706 |  | 
 | 707 | 	/* Set dma_mask.  This devices doesn't support 64bit addressing. */ | 
 | 708 | 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 
 | 709 | 	if (rc) { | 
 | 710 | 		dev_printk(KERN_ERR, &pdev->dev, | 
 | 711 | 			   "32-bit DMA enable failed\n"); | 
| Tejun Heo | 24dc5f3 | 2007-01-20 16:00:28 +0900 | [diff] [blame] | 712 | 		return rc; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 713 | 	} | 
 | 714 |  | 
 | 715 | 	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 
 | 716 | 	if (rc) { | 
 | 717 | 		dev_printk(KERN_ERR, &pdev->dev, | 
 | 718 | 			   "32-bit consistent DMA enable failed\n"); | 
| Tejun Heo | 24dc5f3 | 2007-01-20 16:00:28 +0900 | [diff] [blame] | 719 | 		return rc; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 720 | 	} | 
 | 721 |  | 
| FUJITA Tomonori | b7d8629 | 2008-02-04 22:28:05 -0800 | [diff] [blame] | 722 | 	/* | 
 | 723 | 	 * This controller is braindamaged.  dma_boundary is 0xffff | 
 | 724 | 	 * like others but it will lock up the whole machine HARD if | 
 | 725 | 	 * 65536 byte PRD entry is fed. Reduce maximum segment size. | 
 | 726 | 	 */ | 
 | 727 | 	rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); | 
 | 728 | 	if (rc) { | 
 | 729 | 		dev_printk(KERN_ERR, &pdev->dev, | 
 | 730 | 			   "failed to set the maximum segment size.\n"); | 
 | 731 | 		return rc; | 
 | 732 | 	} | 
 | 733 |  | 
| Tejun Heo | 0d5ff56 | 2007-02-01 15:06:36 +0900 | [diff] [blame] | 734 | 	rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 735 | 	if (rc) { | 
 | 736 | 		dev_printk(KERN_ERR, &pdev->dev, | 
 | 737 | 			   "failed to initialize controller\n"); | 
| Tejun Heo | 24dc5f3 | 2007-01-20 16:00:28 +0900 | [diff] [blame] | 738 | 		return rc; | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 739 | 	} | 
 | 740 |  | 
 | 741 | 	pci_set_master(pdev); | 
| Tejun Heo | 4447d35 | 2007-04-17 23:44:08 +0900 | [diff] [blame] | 742 | 	return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, | 
 | 743 | 				 &inic_sht); | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 744 | } | 
 | 745 |  | 
 | 746 | static const struct pci_device_id inic_pci_tbl[] = { | 
 | 747 | 	{ PCI_VDEVICE(INIT, 0x1622), }, | 
 | 748 | 	{ }, | 
 | 749 | }; | 
 | 750 |  | 
 | 751 | static struct pci_driver inic_pci_driver = { | 
 | 752 | 	.name 		= DRV_NAME, | 
 | 753 | 	.id_table	= inic_pci_tbl, | 
| Tejun Heo | 438ac6d | 2007-03-02 17:31:26 +0900 | [diff] [blame] | 754 | #ifdef CONFIG_PM | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 755 | 	.suspend	= ata_pci_device_suspend, | 
 | 756 | 	.resume		= inic_pci_device_resume, | 
| Tejun Heo | 438ac6d | 2007-03-02 17:31:26 +0900 | [diff] [blame] | 757 | #endif | 
| Tejun Heo | 1fd7a69 | 2007-01-03 17:32:45 +0900 | [diff] [blame] | 758 | 	.probe 		= inic_init_one, | 
 | 759 | 	.remove		= ata_pci_remove_one, | 
 | 760 | }; | 
 | 761 |  | 
 | 762 | static int __init inic_init(void) | 
 | 763 | { | 
 | 764 | 	return pci_register_driver(&inic_pci_driver); | 
 | 765 | } | 
 | 766 |  | 
 | 767 | static void __exit inic_exit(void) | 
 | 768 | { | 
 | 769 | 	pci_unregister_driver(&inic_pci_driver); | 
 | 770 | } | 
 | 771 |  | 
 | 772 | MODULE_AUTHOR("Tejun Heo"); | 
 | 773 | MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); | 
 | 774 | MODULE_LICENSE("GPL v2"); | 
 | 775 | MODULE_DEVICE_TABLE(pci, inic_pci_tbl); | 
 | 776 | MODULE_VERSION(DRV_VERSION); | 
 | 777 |  | 
 | 778 | module_init(inic_init); | 
 | 779 | module_exit(inic_exit); |