blob: e5615be21565178928009b0570932c8d656faeae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Robert Hancockfbbb2622006-10-27 19:08:41 -070052#define DRV_VERSION "3.1"
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Tejun Heo27e4b272006-06-17 15:49:55 +090064 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050065 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050066 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090067 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Tejun Heo27e4b272006-06-17 15:49:55 +090070 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
75
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
77
Tejun Heo39f87582006-06-17 15:49:56 +090078 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090079 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090081
Tejun Heo27e4b272006-06-17 15:49:55 +090082 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050083 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jeff Garzik10ad05d2006-03-22 23:50:50 -050086 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070089 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500169};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171/* ADMA Physical Region Descriptor - one SG segment */
172struct nv_adma_prd {
173 __le64 addr;
174 __le32 len;
175 u8 flags;
176 u8 packet_len;
177 __le16 reserved;
178};
179
180enum nv_adma_regbits {
181 CMDEND = (1 << 15), /* end of command list */
182 WNB = (1 << 14), /* wait-not-BSY */
183 IGN = (1 << 13), /* ignore this entry */
184 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
185 DA2 = (1 << (2 + 8)),
186 DA1 = (1 << (1 + 8)),
187 DA0 = (1 << (0 + 8)),
188};
189
190/* ADMA Command Parameter Block
191 The first 5 SG segments are stored inside the Command Parameter Block itself.
192 If there are more than 5 segments the remainder are stored in a separate
193 memory area indicated by next_aprd. */
194struct nv_adma_cpb {
195 u8 resp_flags; /* 0 */
196 u8 reserved1; /* 1 */
197 u8 ctl_flags; /* 2 */
198 /* len is length of taskfile in 64 bit words */
199 u8 len; /* 3 */
200 u8 tag; /* 4 */
201 u8 next_cpb_idx; /* 5 */
202 __le16 reserved2; /* 6-7 */
203 __le16 tf[12]; /* 8-31 */
204 struct nv_adma_prd aprd[5]; /* 32-111 */
205 __le64 next_aprd; /* 112-119 */
206 __le64 reserved3; /* 120-127 */
207};
208
209
210struct nv_adma_port_priv {
211 struct nv_adma_cpb *cpb;
212 dma_addr_t cpb_dma;
213 struct nv_adma_prd *aprd;
214 dma_addr_t aprd_dma;
215 u8 flags;
216};
217
218#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
Jeff Garzikcca39742006-08-24 03:19:22 -0400221static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100222static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
223static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
224static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
226static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Tejun Heo39f87582006-06-17 15:49:56 +0900228static void nv_nf2_freeze(struct ata_port *ap);
229static void nv_nf2_thaw(struct ata_port *ap);
230static void nv_ck804_freeze(struct ata_port *ap);
231static void nv_ck804_thaw(struct ata_port *ap);
232static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700233static int nv_adma_slave_config(struct scsi_device *sdev);
234static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
235static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
236static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
237static void nv_adma_irq_clear(struct ata_port *ap);
238static int nv_adma_port_start(struct ata_port *ap);
239static void nv_adma_port_stop(struct ata_port *ap);
240static void nv_adma_error_handler(struct ata_port *ap);
241static void nv_adma_host_stop(struct ata_host *host);
242static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
243static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
244static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
245static u8 nv_adma_bmdma_status(struct ata_port *ap);
Tejun Heo39f87582006-06-17 15:49:56 +0900246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247enum nv_host_type
248{
249 GENERIC,
250 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900251 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700252 CK804,
253 ADMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254};
255
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500256static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400257 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
258 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Peer Chen8fc2d9c2006-11-01 05:23:11 -0500271 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC }, /* MCP65 */
272 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC }, /* MCP65 */
273 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC }, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC }, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA, 0x0550), GENERIC }, /* MCP67 */
276 { PCI_VDEVICE(NVIDIA, 0x0551), GENERIC }, /* MCP67 */
277 { PCI_VDEVICE(NVIDIA, 0x0552), GENERIC }, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA, 0x0553), GENERIC }, /* MCP67 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
280 PCI_ANY_ID, PCI_ANY_ID,
281 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
Daniel Drake541134c2005-07-03 13:44:39 +0100282 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
283 PCI_ANY_ID, PCI_ANY_ID,
284 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400285
286 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287};
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static struct pci_driver nv_pci_driver = {
290 .name = DRV_NAME,
291 .id_table = nv_pci_tbl,
292 .probe = nv_init_one,
293 .remove = ata_pci_remove_one,
294};
295
Jeff Garzik193515d2005-11-07 00:59:37 -0500296static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 .module = THIS_MODULE,
298 .name = DRV_NAME,
299 .ioctl = ata_scsi_ioctl,
300 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 .can_queue = ATA_DEF_QUEUE,
302 .this_id = ATA_SHT_THIS_ID,
303 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
305 .emulated = ATA_SHT_EMULATED,
306 .use_clustering = ATA_SHT_USE_CLUSTERING,
307 .proc_name = DRV_NAME,
308 .dma_boundary = ATA_DMA_BOUNDARY,
309 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900310 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 .bios_param = ata_std_bios_param,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312};
313
Robert Hancockfbbb2622006-10-27 19:08:41 -0700314static struct scsi_host_template nv_adma_sht = {
315 .module = THIS_MODULE,
316 .name = DRV_NAME,
317 .ioctl = ata_scsi_ioctl,
318 .queuecommand = ata_scsi_queuecmd,
319 .can_queue = NV_ADMA_MAX_CPBS,
320 .this_id = ATA_SHT_THIS_ID,
321 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
322 .max_sectors = ATA_MAX_SECTORS,
323 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
324 .emulated = ATA_SHT_EMULATED,
325 .use_clustering = ATA_SHT_USE_CLUSTERING,
326 .proc_name = DRV_NAME,
327 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
328 .slave_configure = nv_adma_slave_config,
329 .slave_destroy = ata_scsi_slave_destroy,
330 .bios_param = ata_std_bios_param,
331};
332
Tejun Heoada364e2006-06-17 15:49:56 +0900333static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 .port_disable = ata_port_disable,
335 .tf_load = ata_tf_load,
336 .tf_read = ata_tf_read,
337 .exec_command = ata_exec_command,
338 .check_status = ata_check_status,
339 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 .bmdma_setup = ata_bmdma_setup,
341 .bmdma_start = ata_bmdma_start,
342 .bmdma_stop = ata_bmdma_stop,
343 .bmdma_status = ata_bmdma_status,
344 .qc_prep = ata_qc_prep,
345 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900346 .freeze = ata_bmdma_freeze,
347 .thaw = ata_bmdma_thaw,
348 .error_handler = nv_error_handler,
349 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Alan Coxa6b2c5d2006-05-22 16:59:59 +0100350 .data_xfer = ata_pio_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900351 .irq_handler = nv_generic_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 .irq_clear = ata_bmdma_irq_clear,
353 .scr_read = nv_scr_read,
354 .scr_write = nv_scr_write,
355 .port_start = ata_port_start,
356 .port_stop = ata_port_stop,
Tejun Heoe6faf082006-06-17 15:49:55 +0900357 .host_stop = ata_pci_host_stop,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358};
359
Tejun Heoada364e2006-06-17 15:49:56 +0900360static const struct ata_port_operations nv_nf2_ops = {
361 .port_disable = ata_port_disable,
362 .tf_load = ata_tf_load,
363 .tf_read = ata_tf_read,
364 .exec_command = ata_exec_command,
365 .check_status = ata_check_status,
366 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900367 .bmdma_setup = ata_bmdma_setup,
368 .bmdma_start = ata_bmdma_start,
369 .bmdma_stop = ata_bmdma_stop,
370 .bmdma_status = ata_bmdma_status,
371 .qc_prep = ata_qc_prep,
372 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900373 .freeze = nv_nf2_freeze,
374 .thaw = nv_nf2_thaw,
375 .error_handler = nv_error_handler,
376 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900377 .data_xfer = ata_pio_data_xfer,
378 .irq_handler = nv_nf2_interrupt,
379 .irq_clear = ata_bmdma_irq_clear,
380 .scr_read = nv_scr_read,
381 .scr_write = nv_scr_write,
382 .port_start = ata_port_start,
383 .port_stop = ata_port_stop,
384 .host_stop = ata_pci_host_stop,
385};
386
387static const struct ata_port_operations nv_ck804_ops = {
388 .port_disable = ata_port_disable,
389 .tf_load = ata_tf_load,
390 .tf_read = ata_tf_read,
391 .exec_command = ata_exec_command,
392 .check_status = ata_check_status,
393 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900394 .bmdma_setup = ata_bmdma_setup,
395 .bmdma_start = ata_bmdma_start,
396 .bmdma_stop = ata_bmdma_stop,
397 .bmdma_status = ata_bmdma_status,
398 .qc_prep = ata_qc_prep,
399 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900400 .freeze = nv_ck804_freeze,
401 .thaw = nv_ck804_thaw,
402 .error_handler = nv_error_handler,
403 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900404 .data_xfer = ata_pio_data_xfer,
405 .irq_handler = nv_ck804_interrupt,
406 .irq_clear = ata_bmdma_irq_clear,
407 .scr_read = nv_scr_read,
408 .scr_write = nv_scr_write,
409 .port_start = ata_port_start,
410 .port_stop = ata_port_stop,
411 .host_stop = nv_ck804_host_stop,
412};
413
Robert Hancockfbbb2622006-10-27 19:08:41 -0700414static const struct ata_port_operations nv_adma_ops = {
415 .port_disable = ata_port_disable,
416 .tf_load = ata_tf_load,
417 .tf_read = ata_tf_read,
418 .exec_command = ata_exec_command,
419 .check_status = ata_check_status,
420 .dev_select = ata_std_dev_select,
421 .bmdma_setup = nv_adma_bmdma_setup,
422 .bmdma_start = nv_adma_bmdma_start,
423 .bmdma_stop = nv_adma_bmdma_stop,
424 .bmdma_status = nv_adma_bmdma_status,
425 .qc_prep = nv_adma_qc_prep,
426 .qc_issue = nv_adma_qc_issue,
427 .freeze = nv_ck804_freeze,
428 .thaw = nv_ck804_thaw,
429 .error_handler = nv_adma_error_handler,
430 .post_internal_cmd = nv_adma_bmdma_stop,
431 .data_xfer = ata_mmio_data_xfer,
432 .irq_handler = nv_adma_interrupt,
433 .irq_clear = nv_adma_irq_clear,
434 .scr_read = nv_scr_read,
435 .scr_write = nv_scr_write,
436 .port_start = nv_adma_port_start,
437 .port_stop = nv_adma_port_stop,
438 .host_stop = nv_adma_host_stop,
439};
440
Tejun Heoada364e2006-06-17 15:49:56 +0900441static struct ata_port_info nv_port_info[] = {
442 /* generic */
443 {
444 .sht = &nv_sht,
Jeff Garzikcca39742006-08-24 03:19:22 -0400445 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900446 .pio_mask = NV_PIO_MASK,
447 .mwdma_mask = NV_MWDMA_MASK,
448 .udma_mask = NV_UDMA_MASK,
449 .port_ops = &nv_generic_ops,
450 },
451 /* nforce2/3 */
452 {
453 .sht = &nv_sht,
Jeff Garzikcca39742006-08-24 03:19:22 -0400454 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900455 .pio_mask = NV_PIO_MASK,
456 .mwdma_mask = NV_MWDMA_MASK,
457 .udma_mask = NV_UDMA_MASK,
458 .port_ops = &nv_nf2_ops,
459 },
460 /* ck804 */
461 {
462 .sht = &nv_sht,
Jeff Garzikcca39742006-08-24 03:19:22 -0400463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900464 .pio_mask = NV_PIO_MASK,
465 .mwdma_mask = NV_MWDMA_MASK,
466 .udma_mask = NV_UDMA_MASK,
467 .port_ops = &nv_ck804_ops,
468 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700469 /* ADMA */
470 {
471 .sht = &nv_adma_sht,
472 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
473 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
474 .pio_mask = NV_PIO_MASK,
475 .mwdma_mask = NV_MWDMA_MASK,
476 .udma_mask = NV_UDMA_MASK,
477 .port_ops = &nv_adma_ops,
478 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479};
480
481MODULE_AUTHOR("NVIDIA");
482MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
483MODULE_LICENSE("GPL");
484MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
485MODULE_VERSION(DRV_VERSION);
486
Robert Hancockfbbb2622006-10-27 19:08:41 -0700487static int adma_enabled = 1;
488
489static int nv_adma_slave_config(struct scsi_device *sdev)
490{
491 struct ata_port *ap = ata_shost_to_port(sdev->host);
492 u64 bounce_limit;
493 unsigned long segment_boundary;
494 unsigned short sg_tablesize;
495 int rc;
496
497 rc = ata_scsi_slave_config(sdev);
498
499 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
500 /* Not a proper libata device, ignore */
501 return rc;
502
503 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
504 /*
505 * NVIDIA reports that ADMA mode does not support ATAPI commands.
506 * Therefore ATAPI commands are sent through the legacy interface.
507 * However, the legacy interface only supports 32-bit DMA.
508 * Restrict DMA parameters as required by the legacy interface
509 * when an ATAPI device is connected.
510 */
511 bounce_limit = ATA_DMA_MASK;
512 segment_boundary = ATA_DMA_BOUNDARY;
513 /* Subtract 1 since an extra entry may be needed for padding, see
514 libata-scsi.c */
515 sg_tablesize = LIBATA_MAX_PRD - 1;
516 }
517 else {
518 bounce_limit = *ap->dev->dma_mask;
519 segment_boundary = NV_ADMA_DMA_BOUNDARY;
520 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
521 }
522
523 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
524 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
525 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
526 ata_port_printk(ap, KERN_INFO,
527 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
528 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
529 return rc;
530}
531
532static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, u16 *cpb)
533{
534 unsigned int idx = 0;
535
536 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
537
538 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
539 cpb[idx++] = cpu_to_le16(IGN);
540 cpb[idx++] = cpu_to_le16(IGN);
541 cpb[idx++] = cpu_to_le16(IGN);
542 cpb[idx++] = cpu_to_le16(IGN);
543 cpb[idx++] = cpu_to_le16(IGN);
544 }
545 else {
546 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
547 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
548 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
549 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
550 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
551 }
552 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
553 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
554 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
555 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
556 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
557
558 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
559
560 return idx;
561}
562
563static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
564 unsigned int port_no)
565{
566 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
567 return mmio;
568}
569
570static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
571{
572 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
573}
574
575static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
576{
577 return (ap->host->mmio_base + NV_ADMA_GEN);
578}
579
580static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
581{
582 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
583}
584
585static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
586{
587 struct nv_adma_port_priv *pp = ap->private_data;
588 int complete = 0, have_err = 0;
589 u16 flags = pp->cpb[cpb_num].resp_flags;
590
591 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
592
593 if (flags & NV_CPB_RESP_DONE) {
594 VPRINTK("CPB flags done, flags=0x%x\n", flags);
595 complete = 1;
596 }
597 if (flags & NV_CPB_RESP_ATA_ERR) {
598 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
599 have_err = 1;
600 complete = 1;
601 }
602 if (flags & NV_CPB_RESP_CMD_ERR) {
603 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
604 have_err = 1;
605 complete = 1;
606 }
607 if (flags & NV_CPB_RESP_CPB_ERR) {
608 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
609 have_err = 1;
610 complete = 1;
611 }
612 if(complete || force_err)
613 {
614 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
615 if(likely(qc)) {
616 u8 ata_status = 0;
617 /* Only use the ATA port status for non-NCQ commands.
618 For NCQ commands the current status may have nothing to do with
619 the command just completed. */
620 if(qc->tf.protocol != ATA_PROT_NCQ)
621 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
622
623 if(have_err || force_err)
624 ata_status |= ATA_ERR;
625
626 qc->err_mask |= ac_err_mask(ata_status);
627 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
628 qc->err_mask);
629 ata_qc_complete(qc);
630 }
631 }
632}
633
634static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
635{
636 struct ata_host *host = dev_instance;
637 int i, handled = 0;
638
639 spin_lock(&host->lock);
640
641 for (i = 0; i < host->n_ports; i++) {
642 struct ata_port *ap = host->ports[i];
643
644 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
645 struct nv_adma_port_priv *pp = ap->private_data;
646 void __iomem *mmio = nv_adma_ctl_block(ap);
647 u16 status;
648 u32 gen_ctl;
649 int have_global_err = 0;
650 u32 notifier, notifier_error;
651
652 /* if in ATA register mode, use standard ata interrupt handler */
653 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
654 struct ata_queued_cmd *qc;
655 VPRINTK("in ATA register mode\n");
656 qc = ata_qc_from_tag(ap, ap->active_tag);
657 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
658 handled += ata_host_intr(ap, qc);
659 else {
660 /* No request pending? Clear interrupt status
661 anyway, in case there's one pending. */
662 ap->ops->check_status(ap);
663 handled++;
664 }
665 continue;
666 }
667
668 notifier = readl(mmio + NV_ADMA_NOTIFIER);
669 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
670
671 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
672
673 /* Seems necessary to clear notifiers even when they were 0.
674 Otherwise we seem to stop receiving further interrupts.
675 Unsure why. */
676 writel(notifier | notifier_error, nv_adma_notifier_clear_block(ap));
677
678 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
679 !notifier_error)
680 /* Nothing to do */
681 continue;
682
683 status = readw(mmio + NV_ADMA_STAT);
684
685 /* Clear status. Ensure the controller sees the clearing before we start
686 looking at any of the CPB statuses, so that any CPB completions after
687 this point in the handler will raise another interrupt. */
688 writew(status, mmio + NV_ADMA_STAT);
689 readw(mmio + NV_ADMA_STAT); /* flush posted write */
690 rmb();
691
692 /* freeze if hotplugged */
693 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
694 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
695 ata_port_freeze(ap);
696 handled++;
697 continue;
698 }
699
700 if (status & NV_ADMA_STAT_TIMEOUT) {
701 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
702 have_global_err = 1;
703 }
704 if (status & NV_ADMA_STAT_CPBERR) {
705 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
706 have_global_err = 1;
707 }
708 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
709 /** Check CPBs for completed commands */
710
711 if(ata_tag_valid(ap->active_tag))
712 /* Non-NCQ command */
713 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
714 (notifier_error & (1 << ap->active_tag)));
715 else {
716 int pos;
717 u32 active = ap->sactive;
718 while( (pos = ffs(active)) ) {
719 pos--;
720 nv_adma_check_cpb(ap, pos, have_global_err ||
721 (notifier_error & (1 << pos)) );
722 active &= ~(1 << pos );
723 }
724 }
725 }
726
727 handled++; /* irq handled if we got here */
728 }
729 }
730
731 spin_unlock(&host->lock);
732
733 return IRQ_RETVAL(handled);
734}
735
736static void nv_adma_irq_clear(struct ata_port *ap)
737{
738 void __iomem *mmio = nv_adma_ctl_block(ap);
739 u16 status = readw(mmio + NV_ADMA_STAT);
740 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
741 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
742
743 /* clear ADMA status */
744 writew(status, mmio + NV_ADMA_STAT);
745 writel(notifier | notifier_error,
746 nv_adma_notifier_clear_block(ap));
747
748 /** clear legacy status */
749 ap->flags &= ~ATA_FLAG_MMIO;
750 ata_bmdma_irq_clear(ap);
751 ap->flags |= ATA_FLAG_MMIO;
752}
753
754static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
755{
756 struct nv_adma_port_priv *pp = qc->ap->private_data;
757
758 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
759 WARN_ON(1);
760 return;
761 }
762
763 qc->ap->flags &= ~ATA_FLAG_MMIO;
764 ata_bmdma_setup(qc);
765 qc->ap->flags |= ATA_FLAG_MMIO;
766}
767
768static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
769{
770 struct nv_adma_port_priv *pp = qc->ap->private_data;
771
772 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
773 WARN_ON(1);
774 return;
775 }
776
777 qc->ap->flags &= ~ATA_FLAG_MMIO;
778 ata_bmdma_start(qc);
779 qc->ap->flags |= ATA_FLAG_MMIO;
780}
781
782static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
783{
784 struct nv_adma_port_priv *pp = qc->ap->private_data;
785
786 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
787 return;
788
789 qc->ap->flags &= ~ATA_FLAG_MMIO;
790 ata_bmdma_stop(qc);
791 qc->ap->flags |= ATA_FLAG_MMIO;
792}
793
794static u8 nv_adma_bmdma_status(struct ata_port *ap)
795{
796 u8 status;
797 struct nv_adma_port_priv *pp = ap->private_data;
798
799 WARN_ON(pp->flags & NV_ADMA_PORT_REGISTER_MODE);
800
801 ap->flags &= ~ATA_FLAG_MMIO;
802 status = ata_bmdma_status(ap);
803 ap->flags |= ATA_FLAG_MMIO;
804 return status;
805}
806
807static void nv_adma_register_mode(struct ata_port *ap)
808{
809 void __iomem *mmio = nv_adma_ctl_block(ap);
810 struct nv_adma_port_priv *pp = ap->private_data;
811 u16 tmp;
812
813 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
814 return;
815
816 tmp = readw(mmio + NV_ADMA_CTL);
817 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
818
819 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
820}
821
822static void nv_adma_mode(struct ata_port *ap)
823{
824 void __iomem *mmio = nv_adma_ctl_block(ap);
825 struct nv_adma_port_priv *pp = ap->private_data;
826 u16 tmp;
827
828 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
829 return;
830
831 tmp = readw(mmio + NV_ADMA_CTL);
832 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
833
834 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
835}
836
837static int nv_adma_port_start(struct ata_port *ap)
838{
839 struct device *dev = ap->host->dev;
840 struct nv_adma_port_priv *pp;
841 int rc;
842 void *mem;
843 dma_addr_t mem_dma;
844 void __iomem *mmio = nv_adma_ctl_block(ap);
845 u16 tmp;
846
847 VPRINTK("ENTER\n");
848
849 rc = ata_port_start(ap);
850 if (rc)
851 return rc;
852
853 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
854 if (!pp) {
855 rc = -ENOMEM;
856 goto err_out;
857 }
858
859 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
860 &mem_dma, GFP_KERNEL);
861
862 if (!mem) {
863 rc = -ENOMEM;
864 goto err_out_kfree;
865 }
866 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
867
868 /*
869 * First item in chunk of DMA memory:
870 * 128-byte command parameter block (CPB)
871 * one for each command tag
872 */
873 pp->cpb = mem;
874 pp->cpb_dma = mem_dma;
875
876 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
877 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
878
879 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
880 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
881
882 /*
883 * Second item: block of ADMA_SGTBL_LEN s/g entries
884 */
885 pp->aprd = mem;
886 pp->aprd_dma = mem_dma;
887
888 ap->private_data = pp;
889
890 /* clear any outstanding interrupt conditions */
891 writew(0xffff, mmio + NV_ADMA_STAT);
892
893 /* initialize port variables */
894 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
895
896 /* clear CPB fetch count */
897 writew(0, mmio + NV_ADMA_CPB_COUNT);
898
899 /* clear GO for register mode */
900 tmp = readw(mmio + NV_ADMA_CTL);
901 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
902
903 tmp = readw(mmio + NV_ADMA_CTL);
904 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
905 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
906 udelay(1);
907 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
908 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
909
910 return 0;
911
912err_out_kfree:
913 kfree(pp);
914err_out:
915 ata_port_stop(ap);
916 return rc;
917}
918
919static void nv_adma_port_stop(struct ata_port *ap)
920{
921 struct device *dev = ap->host->dev;
922 struct nv_adma_port_priv *pp = ap->private_data;
923 void __iomem *mmio = nv_adma_ctl_block(ap);
924
925 VPRINTK("ENTER\n");
926
927 writew(0, mmio + NV_ADMA_CTL);
928
929 ap->private_data = NULL;
930 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
931 kfree(pp);
932 ata_port_stop(ap);
933}
934
935
936static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
937{
938 void __iomem *mmio = probe_ent->mmio_base;
939 struct ata_ioports *ioport = &probe_ent->port[port];
940
941 VPRINTK("ENTER\n");
942
943 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
944
945 ioport->cmd_addr = (unsigned long) mmio;
946 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
947 ioport->error_addr =
948 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
949 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
950 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
951 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
952 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
953 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
954 ioport->status_addr =
955 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
956 ioport->altstatus_addr =
957 ioport->ctl_addr = (unsigned long) mmio + 0x20;
958}
959
960static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
961{
962 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
963 unsigned int i;
964 u32 tmp32;
965
966 VPRINTK("ENTER\n");
967
968 /* enable ADMA on the ports */
969 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
970 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
971 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
972 NV_MCP_SATA_CFG_20_PORT1_EN |
973 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
974
975 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
976
977 for (i = 0; i < probe_ent->n_ports; i++)
978 nv_adma_setup_port(probe_ent, i);
979
980 for (i = 0; i < probe_ent->n_ports; i++) {
981 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
982 u16 tmp;
983
984 /* enable interrupt, clear reset if not already clear */
985 tmp = readw(mmio + NV_ADMA_CTL);
986 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
987 }
988
989 return 0;
990}
991
992static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
993 struct scatterlist *sg,
994 int idx,
995 struct nv_adma_prd *aprd)
996{
997 u32 flags;
998
999 memset(aprd, 0, sizeof(struct nv_adma_prd));
1000
1001 flags = 0;
1002 if (qc->tf.flags & ATA_TFLAG_WRITE)
1003 flags |= NV_APRD_WRITE;
1004 if (idx == qc->n_elem - 1)
1005 flags |= NV_APRD_END;
1006 else if (idx != 4)
1007 flags |= NV_APRD_CONT;
1008
1009 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1010 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1011 aprd->flags = cpu_to_le32(flags);
1012}
1013
1014static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1015{
1016 struct nv_adma_port_priv *pp = qc->ap->private_data;
1017 unsigned int idx;
1018 struct nv_adma_prd *aprd;
1019 struct scatterlist *sg;
1020
1021 VPRINTK("ENTER\n");
1022
1023 idx = 0;
1024
1025 ata_for_each_sg(sg, qc) {
1026 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1027 nv_adma_fill_aprd(qc, sg, idx, aprd);
1028 idx++;
1029 }
1030 if (idx > 5)
1031 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1032}
1033
1034static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1035{
1036 struct nv_adma_port_priv *pp = qc->ap->private_data;
1037 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1038 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1039 NV_CPB_CTL_APRD_VALID |
1040 NV_CPB_CTL_IEN;
1041
1042 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1043
1044 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1045 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
1046 ata_qc_prep(qc);
1047 return;
1048 }
1049
1050 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1051
1052 cpb->len = 3;
1053 cpb->tag = qc->tag;
1054 cpb->next_cpb_idx = 0;
1055
1056 /* turn on NCQ flags for NCQ commands */
1057 if (qc->tf.protocol == ATA_PROT_NCQ)
1058 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1059
1060 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1061
1062 nv_adma_fill_sg(qc, cpb);
1063
1064 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1065 finished filling in all of the contents */
1066 wmb();
1067 cpb->ctl_flags = ctl_flags;
1068}
1069
1070static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1071{
1072 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1073
1074 VPRINTK("ENTER\n");
1075
1076 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1077 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
1078 /* use ATA register mode */
1079 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1080 nv_adma_register_mode(qc->ap);
1081 return ata_qc_issue_prot(qc);
1082 } else
1083 nv_adma_mode(qc->ap);
1084
1085 /* write append register, command tag in lower 8 bits
1086 and (number of cpbs to append -1) in top 8 bits */
1087 wmb();
1088 writew(qc->tag, mmio + NV_ADMA_APPEND);
1089
1090 DPRINTK("Issued tag %u\n",qc->tag);
1091
1092 return 0;
1093}
1094
David Howells7d12e782006-10-05 14:55:46 +01001095static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
Jeff Garzikcca39742006-08-24 03:19:22 -04001097 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 unsigned int i;
1099 unsigned int handled = 0;
1100 unsigned long flags;
1101
Jeff Garzikcca39742006-08-24 03:19:22 -04001102 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Jeff Garzikcca39742006-08-24 03:19:22 -04001104 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 struct ata_port *ap;
1106
Jeff Garzikcca39742006-08-24 03:19:22 -04001107 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001108 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001109 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 struct ata_queued_cmd *qc;
1111
1112 qc = ata_qc_from_tag(ap, ap->active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001113 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001115 else
1116 // No request pending? Clear interrupt status
1117 // anyway, in case there's one pending.
1118 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
1120
1121 }
1122
Jeff Garzikcca39742006-08-24 03:19:22 -04001123 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 return IRQ_RETVAL(handled);
1126}
1127
Tejun Heoada364e2006-06-17 15:49:56 +09001128static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
1129{
1130 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
1131 int handled;
1132
Tejun Heo5a44eff2006-06-17 15:49:56 +09001133 /* freeze if hotplugged */
1134 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
1135 ata_port_freeze(ap);
1136 return 1;
1137 }
1138
Tejun Heoada364e2006-06-17 15:49:56 +09001139 /* bail out if not our interrupt */
1140 if (!(irq_stat & NV_INT_DEV))
1141 return 0;
1142
1143 /* DEV interrupt w/ no active qc? */
1144 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
1145 ata_check_status(ap);
1146 return 1;
1147 }
1148
1149 /* handle interrupt */
1150 handled = ata_host_intr(ap, qc);
1151 if (unlikely(!handled)) {
1152 /* spurious, clear it */
1153 ata_check_status(ap);
1154 }
1155
1156 return 1;
1157}
1158
Jeff Garzikcca39742006-08-24 03:19:22 -04001159static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001160{
1161 int i, handled = 0;
1162
Jeff Garzikcca39742006-08-24 03:19:22 -04001163 for (i = 0; i < host->n_ports; i++) {
1164 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001165
1166 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1167 handled += nv_host_intr(ap, irq_stat);
1168
1169 irq_stat >>= NV_INT_PORT_SHIFT;
1170 }
1171
1172 return IRQ_RETVAL(handled);
1173}
1174
David Howells7d12e782006-10-05 14:55:46 +01001175static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001176{
Jeff Garzikcca39742006-08-24 03:19:22 -04001177 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001178 u8 irq_stat;
1179 irqreturn_t ret;
1180
Jeff Garzikcca39742006-08-24 03:19:22 -04001181 spin_lock(&host->lock);
1182 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1183 ret = nv_do_interrupt(host, irq_stat);
1184 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001185
1186 return ret;
1187}
1188
David Howells7d12e782006-10-05 14:55:46 +01001189static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001190{
Jeff Garzikcca39742006-08-24 03:19:22 -04001191 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001192 u8 irq_stat;
1193 irqreturn_t ret;
1194
Jeff Garzikcca39742006-08-24 03:19:22 -04001195 spin_lock(&host->lock);
1196 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1197 ret = nv_do_interrupt(host, irq_stat);
1198 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001199
1200 return ret;
1201}
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1204{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 if (sc_reg > SCR_CONTROL)
1206 return 0xffffffffU;
1207
Jeff Garzik02cbd922006-03-22 23:59:46 -05001208 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
1211static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1212{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 if (sc_reg > SCR_CONTROL)
1214 return;
1215
Jeff Garzik02cbd922006-03-22 23:59:46 -05001216 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217}
1218
Tejun Heo39f87582006-06-17 15:49:56 +09001219static void nv_nf2_freeze(struct ata_port *ap)
1220{
Jeff Garzikcca39742006-08-24 03:19:22 -04001221 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001222 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1223 u8 mask;
1224
1225 mask = inb(scr_addr + NV_INT_ENABLE);
1226 mask &= ~(NV_INT_ALL << shift);
1227 outb(mask, scr_addr + NV_INT_ENABLE);
1228}
1229
1230static void nv_nf2_thaw(struct ata_port *ap)
1231{
Jeff Garzikcca39742006-08-24 03:19:22 -04001232 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001233 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1234 u8 mask;
1235
1236 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1237
1238 mask = inb(scr_addr + NV_INT_ENABLE);
1239 mask |= (NV_INT_MASK << shift);
1240 outb(mask, scr_addr + NV_INT_ENABLE);
1241}
1242
1243static void nv_ck804_freeze(struct ata_port *ap)
1244{
Jeff Garzikcca39742006-08-24 03:19:22 -04001245 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001246 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1247 u8 mask;
1248
1249 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1250 mask &= ~(NV_INT_ALL << shift);
1251 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1252}
1253
1254static void nv_ck804_thaw(struct ata_port *ap)
1255{
Jeff Garzikcca39742006-08-24 03:19:22 -04001256 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001257 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1258 u8 mask;
1259
1260 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1261
1262 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1263 mask |= (NV_INT_MASK << shift);
1264 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1265}
1266
1267static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1268{
1269 unsigned int dummy;
1270
1271 /* SATA hardreset fails to retrieve proper device signature on
1272 * some controllers. Don't classify on hardreset. For more
1273 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1274 */
1275 return sata_std_hardreset(ap, &dummy);
1276}
1277
1278static void nv_error_handler(struct ata_port *ap)
1279{
1280 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1281 nv_hardreset, ata_std_postreset);
1282}
1283
Robert Hancockfbbb2622006-10-27 19:08:41 -07001284static void nv_adma_error_handler(struct ata_port *ap)
1285{
1286 struct nv_adma_port_priv *pp = ap->private_data;
1287 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1288 void __iomem *mmio = nv_adma_ctl_block(ap);
1289 int i;
1290 u16 tmp;
1291
1292 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1293 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1294 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1295 u32 status = readw(mmio + NV_ADMA_STAT);
1296
1297 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1298 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1299 notifier, notifier_error, gen_ctl, status);
1300
1301 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1302 struct nv_adma_cpb *cpb = &pp->cpb[i];
1303 if( cpb->ctl_flags || cpb->resp_flags )
1304 ata_port_printk(ap, KERN_ERR,
1305 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1306 i, cpb->ctl_flags, cpb->resp_flags);
1307 }
1308
1309 /* Push us back into port register mode for error handling. */
1310 nv_adma_register_mode(ap);
1311
1312 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1313
1314 /* Mark all of the CPBs as invalid to prevent them from being executed */
1315 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1316 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1317
1318 /* clear CPB fetch count */
1319 writew(0, mmio + NV_ADMA_CPB_COUNT);
1320
1321 /* Reset channel */
1322 tmp = readw(mmio + NV_ADMA_CTL);
1323 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1324 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1325 udelay(1);
1326 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1327 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1328 }
1329
1330 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1331 nv_hardreset, ata_std_postreset);
1332}
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1335{
1336 static int printed_version = 0;
Jeff Garzik29da9f62006-09-25 21:56:33 -04001337 struct ata_port_info *ppi[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 struct ata_probe_ent *probe_ent;
1339 int pci_dev_busy = 0;
1340 int rc;
1341 u32 bar;
Jeff Garzik02cbd922006-03-22 23:59:46 -05001342 unsigned long base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001343 unsigned long type = ent->driver_data;
1344 int mask_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
1346 // Make sure this is a SATA controller by counting the number of bars
1347 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1348 // it's an IDE controller and we ignore it.
1349 for (bar=0; bar<6; bar++)
1350 if (pci_resource_start(pdev, bar) == 0)
1351 return -ENODEV;
1352
Robert Hancockfbbb2622006-10-27 19:08:41 -07001353 if ( !printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05001354 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
1356 rc = pci_enable_device(pdev);
1357 if (rc)
1358 goto err_out;
1359
1360 rc = pci_request_regions(pdev, DRV_NAME);
1361 if (rc) {
1362 pci_dev_busy = 1;
1363 goto err_out_disable;
1364 }
1365
Robert Hancockfbbb2622006-10-27 19:08:41 -07001366 if(type >= CK804 && adma_enabled) {
1367 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1368 type = ADMA;
1369 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1370 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1371 mask_set = 1;
1372 }
1373
1374 if(!mask_set) {
1375 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1376 if (rc)
1377 goto err_out_regions;
1378 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1379 if (rc)
1380 goto err_out_regions;
1381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
1383 rc = -ENOMEM;
1384
Robert Hancockfbbb2622006-10-27 19:08:41 -07001385 ppi[0] = ppi[1] = &nv_port_info[type];
Jeff Garzik29da9f62006-09-25 21:56:33 -04001386 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 if (!probe_ent)
1388 goto err_out_regions;
1389
Jeff Garzik02cbd922006-03-22 23:59:46 -05001390 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1391 if (!probe_ent->mmio_base) {
1392 rc = -EIO;
Tejun Heoe6faf082006-06-17 15:49:55 +09001393 goto err_out_free_ent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 }
1395
Jeff Garzik02cbd922006-03-22 23:59:46 -05001396 base = (unsigned long)probe_ent->mmio_base;
1397
1398 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1399 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1400
Tejun Heoada364e2006-06-17 15:49:56 +09001401 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001402 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09001403 u8 regval;
1404
1405 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1406 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1407 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1408 }
1409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 pci_set_master(pdev);
1411
Robert Hancockfbbb2622006-10-27 19:08:41 -07001412 if (type == ADMA) {
1413 rc = nv_adma_host_init(probe_ent);
1414 if (rc)
1415 goto err_out_iounmap;
1416 }
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 rc = ata_device_add(probe_ent);
1419 if (rc != NV_PORTS)
1420 goto err_out_iounmap;
1421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 kfree(probe_ent);
1423
1424 return 0;
1425
1426err_out_iounmap:
Jeff Garzik02cbd922006-03-22 23:59:46 -05001427 pci_iounmap(pdev, probe_ent->mmio_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428err_out_free_ent:
1429 kfree(probe_ent);
1430err_out_regions:
1431 pci_release_regions(pdev);
1432err_out_disable:
1433 if (!pci_dev_busy)
1434 pci_disable_device(pdev);
1435err_out:
1436 return rc;
1437}
1438
Jeff Garzikcca39742006-08-24 03:19:22 -04001439static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09001440{
Jeff Garzikcca39742006-08-24 03:19:22 -04001441 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09001442 u8 regval;
1443
1444 /* disable SATA space for CK804 */
1445 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1446 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1447 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1448
Jeff Garzikcca39742006-08-24 03:19:22 -04001449 ata_pci_host_stop(host);
Tejun Heoada364e2006-06-17 15:49:56 +09001450}
1451
Robert Hancockfbbb2622006-10-27 19:08:41 -07001452static void nv_adma_host_stop(struct ata_host *host)
1453{
1454 struct pci_dev *pdev = to_pci_dev(host->dev);
1455 int i;
1456 u32 tmp32;
1457
1458 for (i = 0; i < host->n_ports; i++) {
1459 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1460 u16 tmp;
1461
1462 /* disable interrupt */
1463 tmp = readw(mmio + NV_ADMA_CTL);
1464 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1465 }
1466
1467 /* disable ADMA on the ports */
1468 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1469 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1470 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1471 NV_MCP_SATA_CFG_20_PORT1_EN |
1472 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1473
1474 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1475
1476 nv_ck804_host_stop(host);
1477}
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479static int __init nv_init(void)
1480{
Pavel Roskinb7887192006-08-10 18:13:18 +09001481 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482}
1483
1484static void __exit nv_exit(void)
1485{
1486 pci_unregister_driver(&nv_pci_driver);
1487}
1488
1489module_init(nv_init);
1490module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001491module_param_named(adma, adma_enabled, bool, 0444);
1492MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");