blob: 75b76535c7202dc11ef41f5fec5537551d98eb74 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040052#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400166 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Kuan Luof140f0f2007-10-15 15:16:53 -0400172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500201};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Robert Hancockfbbb2622006-10-27 19:08:41 -0700203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239};
240
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600250 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700251 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600252 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700253};
254
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600255struct nv_host_priv {
256 unsigned long type;
257};
258
Kuan Luof140f0f2007-10-15 15:16:53 -0400259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700296
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900298#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600299static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900300#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400301static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Tejun Heo39f87582006-06-17 15:49:56 +0900308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
312static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700313static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600314static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700315static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318static void nv_adma_irq_clear(struct ata_port *ap);
319static int nv_adma_port_start(struct ata_port *ap);
320static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900321#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600322static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900324#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600325static void nv_adma_freeze(struct ata_port *ap);
326static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700327static void nv_adma_error_handler(struct ata_port *ap);
328static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600329static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800330static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900331
Kuan Luof140f0f2007-10-15 15:16:53 -0400332static void nv_mcp55_thaw(struct ata_port *ap);
333static void nv_mcp55_freeze(struct ata_port *ap);
334static void nv_swncq_error_handler(struct ata_port *ap);
335static int nv_swncq_slave_config(struct scsi_device *sdev);
336static int nv_swncq_port_start(struct ata_port *ap);
337static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342#ifdef CONFIG_PM
343static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344static int nv_swncq_port_resume(struct ata_port *ap);
345#endif
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347enum nv_host_type
348{
349 GENERIC,
350 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900351 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700352 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400353 ADMA,
354 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355};
356
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500357static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400372
373 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374};
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376static struct pci_driver nv_pci_driver = {
377 .name = DRV_NAME,
378 .id_table = nv_pci_tbl,
379 .probe = nv_init_one,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900380#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600381 .suspend = ata_pci_device_suspend,
382 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900383#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200384 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385};
386
Jeff Garzik193515d2005-11-07 00:59:37 -0500387static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 .module = THIS_MODULE,
389 .name = DRV_NAME,
390 .ioctl = ata_scsi_ioctl,
391 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 .can_queue = ATA_DEF_QUEUE,
393 .this_id = ATA_SHT_THIS_ID,
394 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
396 .emulated = ATA_SHT_EMULATED,
397 .use_clustering = ATA_SHT_USE_CLUSTERING,
398 .proc_name = DRV_NAME,
399 .dma_boundary = ATA_DMA_BOUNDARY,
400 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900401 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 .bios_param = ata_std_bios_param,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403};
404
Robert Hancockfbbb2622006-10-27 19:08:41 -0700405static struct scsi_host_template nv_adma_sht = {
406 .module = THIS_MODULE,
407 .name = DRV_NAME,
408 .ioctl = ata_scsi_ioctl,
409 .queuecommand = ata_scsi_queuecmd,
Robert Hancock1e0b5ab2007-06-28 18:52:24 -0600410 .change_queue_depth = ata_scsi_change_queue_depth,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700411 .can_queue = NV_ADMA_MAX_CPBS,
412 .this_id = ATA_SHT_THIS_ID,
413 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700414 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
415 .emulated = ATA_SHT_EMULATED,
416 .use_clustering = ATA_SHT_USE_CLUSTERING,
417 .proc_name = DRV_NAME,
418 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
419 .slave_configure = nv_adma_slave_config,
420 .slave_destroy = ata_scsi_slave_destroy,
421 .bios_param = ata_std_bios_param,
422};
423
Kuan Luof140f0f2007-10-15 15:16:53 -0400424static struct scsi_host_template nv_swncq_sht = {
425 .module = THIS_MODULE,
426 .name = DRV_NAME,
427 .ioctl = ata_scsi_ioctl,
428 .queuecommand = ata_scsi_queuecmd,
429 .change_queue_depth = ata_scsi_change_queue_depth,
430 .can_queue = ATA_MAX_QUEUE,
431 .this_id = ATA_SHT_THIS_ID,
432 .sg_tablesize = LIBATA_MAX_PRD,
433 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
434 .emulated = ATA_SHT_EMULATED,
435 .use_clustering = ATA_SHT_USE_CLUSTERING,
436 .proc_name = DRV_NAME,
437 .dma_boundary = ATA_DMA_BOUNDARY,
438 .slave_configure = nv_swncq_slave_config,
439 .slave_destroy = ata_scsi_slave_destroy,
440 .bios_param = ata_std_bios_param,
441};
442
Tejun Heoada364e2006-06-17 15:49:56 +0900443static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 .tf_load = ata_tf_load,
445 .tf_read = ata_tf_read,
446 .exec_command = ata_exec_command,
447 .check_status = ata_check_status,
448 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 .bmdma_setup = ata_bmdma_setup,
450 .bmdma_start = ata_bmdma_start,
451 .bmdma_stop = ata_bmdma_stop,
452 .bmdma_status = ata_bmdma_status,
453 .qc_prep = ata_qc_prep,
454 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900455 .freeze = ata_bmdma_freeze,
456 .thaw = ata_bmdma_thaw,
457 .error_handler = nv_error_handler,
458 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900459 .data_xfer = ata_data_xfer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900461 .irq_on = ata_irq_on,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 .scr_read = nv_scr_read,
463 .scr_write = nv_scr_write,
464 .port_start = ata_port_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465};
466
Tejun Heoada364e2006-06-17 15:49:56 +0900467static const struct ata_port_operations nv_nf2_ops = {
Tejun Heoada364e2006-06-17 15:49:56 +0900468 .tf_load = ata_tf_load,
469 .tf_read = ata_tf_read,
470 .exec_command = ata_exec_command,
471 .check_status = ata_check_status,
472 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900473 .bmdma_setup = ata_bmdma_setup,
474 .bmdma_start = ata_bmdma_start,
475 .bmdma_stop = ata_bmdma_stop,
476 .bmdma_status = ata_bmdma_status,
477 .qc_prep = ata_qc_prep,
478 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900479 .freeze = nv_nf2_freeze,
480 .thaw = nv_nf2_thaw,
481 .error_handler = nv_error_handler,
482 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900483 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900484 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900485 .irq_on = ata_irq_on,
Tejun Heoada364e2006-06-17 15:49:56 +0900486 .scr_read = nv_scr_read,
487 .scr_write = nv_scr_write,
488 .port_start = ata_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900489};
490
491static const struct ata_port_operations nv_ck804_ops = {
Tejun Heoada364e2006-06-17 15:49:56 +0900492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .exec_command = ata_exec_command,
495 .check_status = ata_check_status,
496 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900497 .bmdma_setup = ata_bmdma_setup,
498 .bmdma_start = ata_bmdma_start,
499 .bmdma_stop = ata_bmdma_stop,
500 .bmdma_status = ata_bmdma_status,
501 .qc_prep = ata_qc_prep,
502 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900503 .freeze = nv_ck804_freeze,
504 .thaw = nv_ck804_thaw,
505 .error_handler = nv_error_handler,
506 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900507 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900508 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900509 .irq_on = ata_irq_on,
Tejun Heoada364e2006-06-17 15:49:56 +0900510 .scr_read = nv_scr_read,
511 .scr_write = nv_scr_write,
512 .port_start = ata_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900513 .host_stop = nv_ck804_host_stop,
514};
515
Robert Hancockfbbb2622006-10-27 19:08:41 -0700516static const struct ata_port_operations nv_adma_ops = {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700517 .tf_load = ata_tf_load,
Robert Hancockf2fb3442007-03-26 21:43:36 -0800518 .tf_read = nv_adma_tf_read,
Robert Hancock2dec7552006-11-26 14:20:19 -0600519 .check_atapi_dma = nv_adma_check_atapi_dma,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700520 .exec_command = ata_exec_command,
521 .check_status = ata_check_status,
522 .dev_select = ata_std_dev_select,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600523 .bmdma_setup = ata_bmdma_setup,
524 .bmdma_start = ata_bmdma_start,
525 .bmdma_stop = ata_bmdma_stop,
526 .bmdma_status = ata_bmdma_status,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900527 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700528 .qc_prep = nv_adma_qc_prep,
529 .qc_issue = nv_adma_qc_issue,
Robert Hancock53014e22007-05-05 15:36:36 -0600530 .freeze = nv_adma_freeze,
531 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700532 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600533 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900534 .data_xfer = ata_data_xfer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700535 .irq_clear = nv_adma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900536 .irq_on = ata_irq_on,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700537 .scr_read = nv_scr_read,
538 .scr_write = nv_scr_write,
539 .port_start = nv_adma_port_start,
540 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900541#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600542 .port_suspend = nv_adma_port_suspend,
543 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900544#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700545 .host_stop = nv_adma_host_stop,
546};
547
Kuan Luof140f0f2007-10-15 15:16:53 -0400548static const struct ata_port_operations nv_swncq_ops = {
549 .tf_load = ata_tf_load,
550 .tf_read = ata_tf_read,
551 .exec_command = ata_exec_command,
552 .check_status = ata_check_status,
553 .dev_select = ata_std_dev_select,
554 .bmdma_setup = ata_bmdma_setup,
555 .bmdma_start = ata_bmdma_start,
556 .bmdma_stop = ata_bmdma_stop,
557 .bmdma_status = ata_bmdma_status,
558 .qc_defer = ata_std_qc_defer,
559 .qc_prep = nv_swncq_qc_prep,
560 .qc_issue = nv_swncq_qc_issue,
561 .freeze = nv_mcp55_freeze,
562 .thaw = nv_mcp55_thaw,
563 .error_handler = nv_swncq_error_handler,
564 .post_internal_cmd = ata_bmdma_post_internal_cmd,
565 .data_xfer = ata_data_xfer,
566 .irq_clear = ata_bmdma_irq_clear,
567 .irq_on = ata_irq_on,
568 .scr_read = nv_scr_read,
569 .scr_write = nv_scr_write,
570#ifdef CONFIG_PM
571 .port_suspend = nv_swncq_port_suspend,
572 .port_resume = nv_swncq_port_resume,
573#endif
574 .port_start = nv_swncq_port_start,
575};
576
Tejun Heo1626aeb2007-05-04 12:43:58 +0200577static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900578 /* generic */
579 {
580 .sht = &nv_sht,
Tejun Heo0c887582007-08-06 18:36:23 +0900581 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900582 .pio_mask = NV_PIO_MASK,
583 .mwdma_mask = NV_MWDMA_MASK,
584 .udma_mask = NV_UDMA_MASK,
585 .port_ops = &nv_generic_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900586 .irq_handler = nv_generic_interrupt,
Tejun Heoada364e2006-06-17 15:49:56 +0900587 },
588 /* nforce2/3 */
589 {
590 .sht = &nv_sht,
Tejun Heo0c887582007-08-06 18:36:23 +0900591 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900592 .pio_mask = NV_PIO_MASK,
593 .mwdma_mask = NV_MWDMA_MASK,
594 .udma_mask = NV_UDMA_MASK,
595 .port_ops = &nv_nf2_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900596 .irq_handler = nv_nf2_interrupt,
Tejun Heoada364e2006-06-17 15:49:56 +0900597 },
598 /* ck804 */
599 {
600 .sht = &nv_sht,
Tejun Heo0c887582007-08-06 18:36:23 +0900601 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900602 .pio_mask = NV_PIO_MASK,
603 .mwdma_mask = NV_MWDMA_MASK,
604 .udma_mask = NV_UDMA_MASK,
605 .port_ops = &nv_ck804_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900606 .irq_handler = nv_ck804_interrupt,
Tejun Heoada364e2006-06-17 15:49:56 +0900607 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700608 /* ADMA */
609 {
610 .sht = &nv_adma_sht,
611 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
612 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
613 .pio_mask = NV_PIO_MASK,
614 .mwdma_mask = NV_MWDMA_MASK,
615 .udma_mask = NV_UDMA_MASK,
616 .port_ops = &nv_adma_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900617 .irq_handler = nv_adma_interrupt,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700618 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400619 /* SWNCQ */
620 {
621 .sht = &nv_swncq_sht,
622 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
623 ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400624 .pio_mask = NV_PIO_MASK,
625 .mwdma_mask = NV_MWDMA_MASK,
626 .udma_mask = NV_UDMA_MASK,
627 .port_ops = &nv_swncq_ops,
628 .irq_handler = nv_swncq_interrupt,
629 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630};
631
632MODULE_AUTHOR("NVIDIA");
633MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
634MODULE_LICENSE("GPL");
635MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
636MODULE_VERSION(DRV_VERSION);
637
Robert Hancockfbbb2622006-10-27 19:08:41 -0700638static int adma_enabled = 1;
Kuan Luof140f0f2007-10-15 15:16:53 -0400639static int swncq_enabled;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700640
Robert Hancock2dec7552006-11-26 14:20:19 -0600641static void nv_adma_register_mode(struct ata_port *ap)
642{
Robert Hancock2dec7552006-11-26 14:20:19 -0600643 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600644 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800645 u16 tmp, status;
646 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600647
648 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
649 return;
650
Robert Hancocka2cfe812007-02-05 16:26:03 -0800651 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400652 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800653 ndelay(50);
654 status = readw(mmio + NV_ADMA_STAT);
655 count++;
656 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400657 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800658 ata_port_printk(ap, KERN_WARNING,
659 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
660 status);
661
Robert Hancock2dec7552006-11-26 14:20:19 -0600662 tmp = readw(mmio + NV_ADMA_CTL);
663 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
664
Robert Hancocka2cfe812007-02-05 16:26:03 -0800665 count = 0;
666 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400667 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800668 ndelay(50);
669 status = readw(mmio + NV_ADMA_STAT);
670 count++;
671 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400672 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800673 ata_port_printk(ap, KERN_WARNING,
674 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
675 status);
676
Robert Hancock2dec7552006-11-26 14:20:19 -0600677 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
678}
679
680static void nv_adma_mode(struct ata_port *ap)
681{
Robert Hancock2dec7552006-11-26 14:20:19 -0600682 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600683 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800684 u16 tmp, status;
685 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600686
687 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
688 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500689
Robert Hancock2dec7552006-11-26 14:20:19 -0600690 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
691
692 tmp = readw(mmio + NV_ADMA_CTL);
693 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
694
Robert Hancocka2cfe812007-02-05 16:26:03 -0800695 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400696 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800697 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
698 ndelay(50);
699 status = readw(mmio + NV_ADMA_STAT);
700 count++;
701 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400702 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800703 ata_port_printk(ap, KERN_WARNING,
704 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
705 status);
706
Robert Hancock2dec7552006-11-26 14:20:19 -0600707 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
708}
709
Robert Hancockfbbb2622006-10-27 19:08:41 -0700710static int nv_adma_slave_config(struct scsi_device *sdev)
711{
712 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600713 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600714 struct nv_adma_port_priv *port0, *port1;
715 struct scsi_device *sdev0, *sdev1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600716 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600717 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700718 unsigned short sg_tablesize;
719 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600720 int adma_enable;
721 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700722
723 rc = ata_scsi_slave_config(sdev);
724
725 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
726 /* Not a proper libata device, ignore */
727 return rc;
728
Robert Hancock8959d302008-02-04 19:39:02 -0600729 spin_lock_irqsave(ap->lock, flags);
730
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900731 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700732 /*
733 * NVIDIA reports that ADMA mode does not support ATAPI commands.
734 * Therefore ATAPI commands are sent through the legacy interface.
735 * However, the legacy interface only supports 32-bit DMA.
736 * Restrict DMA parameters as required by the legacy interface
737 * when an ATAPI device is connected.
738 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700739 segment_boundary = ATA_DMA_BOUNDARY;
740 /* Subtract 1 since an extra entry may be needed for padding, see
741 libata-scsi.c */
742 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500743
Robert Hancock2dec7552006-11-26 14:20:19 -0600744 /* Since the legacy DMA engine is in use, we need to disable ADMA
745 on the port. */
746 adma_enable = 0;
747 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400748 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700749 segment_boundary = NV_ADMA_DMA_BOUNDARY;
750 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600751 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700752 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500753
Robert Hancock2dec7552006-11-26 14:20:19 -0600754 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700755
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400756 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600757 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
758 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
759 else
760 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
761 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500762
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400763 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600764 new_reg = current_reg | config_mask;
765 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400766 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600767 new_reg = current_reg & ~config_mask;
768 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
769 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500770
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400771 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600772 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500773
Robert Hancock8959d302008-02-04 19:39:02 -0600774 port0 = ap->host->ports[0]->private_data;
775 port1 = ap->host->ports[1]->private_data;
776 sdev0 = ap->host->ports[0]->link.device[0].sdev;
777 sdev1 = ap->host->ports[1]->link.device[0].sdev;
778 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
779 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
780 /** We have to set the DMA mask to 32-bit if either port is in
781 ATAPI mode, since they are on the same PCI device which is
782 used for DMA mapping. If we set the mask we also need to set
783 the bounce limit on both ports to ensure that the block
784 layer doesn't feed addresses that cause DMA mapping to
785 choke. If either SCSI device is not allocated yet, it's OK
786 since that port will discover its correct setting when it
787 does get allocated.
788 Note: Setting 32-bit mask should not fail. */
789 if (sdev0)
790 blk_queue_bounce_limit(sdev0->request_queue,
791 ATA_DMA_MASK);
792 if (sdev1)
793 blk_queue_bounce_limit(sdev1->request_queue,
794 ATA_DMA_MASK);
795
796 pci_set_dma_mask(pdev, ATA_DMA_MASK);
797 } else {
798 /** This shouldn't fail as it was set to this value before */
799 pci_set_dma_mask(pdev, pp->adma_dma_mask);
800 if (sdev0)
801 blk_queue_bounce_limit(sdev0->request_queue,
802 pp->adma_dma_mask);
803 if (sdev1)
804 blk_queue_bounce_limit(sdev1->request_queue,
805 pp->adma_dma_mask);
806 }
807
Robert Hancockfbbb2622006-10-27 19:08:41 -0700808 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
809 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
810 ata_port_printk(ap, KERN_INFO,
Robert Hancock8959d302008-02-04 19:39:02 -0600811 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
812 (unsigned long long)*ap->host->dev->dma_mask,
813 segment_boundary, sg_tablesize);
814
815 spin_unlock_irqrestore(ap->lock, flags);
816
Robert Hancockfbbb2622006-10-27 19:08:41 -0700817 return rc;
818}
819
Robert Hancock2dec7552006-11-26 14:20:19 -0600820static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
821{
822 struct nv_adma_port_priv *pp = qc->ap->private_data;
823 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
824}
825
Robert Hancockf2fb3442007-03-26 21:43:36 -0800826static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
827{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600828 /* Other than when internal or pass-through commands are executed,
829 the only time this function will be called in ADMA mode will be
830 if a command fails. In the failure case we don't care about going
831 into register mode with ADMA commands pending, as the commands will
832 all shortly be aborted anyway. We assume that NCQ commands are not
833 issued via passthrough, which is the only way that switching into
834 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800835 nv_adma_register_mode(ap);
836
837 ata_tf_read(ap, tf);
838}
839
Robert Hancock2dec7552006-11-26 14:20:19 -0600840static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700841{
842 unsigned int idx = 0;
843
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400844 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600845 if (tf->flags & ATA_TFLAG_LBA48) {
846 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
847 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
848 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
849 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
850 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
851 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
852 } else
853 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500854
Robert Hancockac3d6b82007-02-19 19:02:46 -0600855 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
856 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
857 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
858 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700859 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500860
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400861 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600862 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700863
864 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500865
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400866 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600867 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700868
869 return idx;
870}
871
Robert Hancock5bd28a42007-02-05 16:26:01 -0800872static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700873{
874 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600875 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700876
877 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
878
Robert Hancock5bd28a42007-02-05 16:26:01 -0800879 if (unlikely((force_err ||
880 flags & (NV_CPB_RESP_ATA_ERR |
881 NV_CPB_RESP_CMD_ERR |
882 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900883 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800884 int freeze = 0;
885
886 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400887 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800888 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900889 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800890 ehi->err_mask |= AC_ERR_DEV;
891 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900892 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800893 ehi->err_mask |= AC_ERR_DEV;
894 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900895 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800896 ehi->err_mask |= AC_ERR_SYSTEM;
897 freeze = 1;
898 } else {
899 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900900 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800901 ehi->err_mask |= AC_ERR_OTHER;
902 freeze = 1;
903 }
904 /* Kill all commands. EH will determine what actually failed. */
905 if (freeze)
906 ata_port_freeze(ap);
907 else
908 ata_port_abort(ap);
909 return 1;
910 }
911
Robert Hancockf2fb3442007-03-26 21:43:36 -0800912 if (likely(flags & NV_CPB_RESP_DONE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700913 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800914 VPRINTK("CPB flags done, flags=0x%x\n", flags);
915 if (likely(qc)) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400916 DPRINTK("Completing qc from tag %d\n", cpb_num);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700917 ata_qc_complete(qc);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600918 } else {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900919 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600920 /* Notifier bits set without a command may indicate the drive
921 is misbehaving. Raise host state machine violation on this
922 condition. */
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400923 ata_port_printk(ap, KERN_ERR,
924 "notifier for tag %d with no cmd?\n",
925 cpb_num);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600926 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +0900927 ehi->action |= ATA_EH_RESET;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600928 ata_port_freeze(ap);
929 return 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700930 }
931 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800932 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700933}
934
Robert Hancock2dec7552006-11-26 14:20:19 -0600935static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
936{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900937 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600938
939 /* freeze if hotplugged */
940 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
941 ata_port_freeze(ap);
942 return 1;
943 }
944
945 /* bail out if not our interrupt */
946 if (!(irq_stat & NV_INT_DEV))
947 return 0;
948
949 /* DEV interrupt w/ no active qc? */
950 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
951 ata_check_status(ap);
952 return 1;
953 }
954
955 /* handle interrupt */
Robert Hancockf740d162007-01-23 20:09:02 -0600956 return ata_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600957}
958
Robert Hancockfbbb2622006-10-27 19:08:41 -0700959static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
960{
961 struct ata_host *host = dev_instance;
962 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600963 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700964
965 spin_lock(&host->lock);
966
967 for (i = 0; i < host->n_ports; i++) {
968 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600969 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700970
971 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
972 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600973 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700974 u16 status;
975 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700976 u32 notifier, notifier_error;
Jeff Garzika617c092007-05-21 20:14:23 -0400977
Robert Hancock53014e22007-05-05 15:36:36 -0600978 /* if ADMA is disabled, use standard ata interrupt handler */
979 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
980 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
981 >> (NV_INT_PORT_SHIFT * i);
982 handled += nv_host_intr(ap, irq_stat);
983 continue;
984 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700985
Robert Hancock53014e22007-05-05 15:36:36 -0600986 /* if in ATA register mode, check for standard interrupts */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700987 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900988 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600989 >> (NV_INT_PORT_SHIFT * i);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400990 if (ata_tag_valid(ap->link.active_tag))
Robert Hancockf740d162007-01-23 20:09:02 -0600991 /** NV_INT_DEV indication seems unreliable at times
992 at least in ADMA mode. Force it on always when a
993 command is active, to prevent losing interrupts. */
994 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -0600995 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700996 }
997
998 notifier = readl(mmio + NV_ADMA_NOTIFIER);
999 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -06001000 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001001
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001002 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001003
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001004 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
Robert Hancockfbbb2622006-10-27 19:08:41 -07001005 !notifier_error)
1006 /* Nothing to do */
1007 continue;
1008
1009 status = readw(mmio + NV_ADMA_STAT);
1010
1011 /* Clear status. Ensure the controller sees the clearing before we start
1012 looking at any of the CPB statuses, so that any CPB completions after
1013 this point in the handler will raise another interrupt. */
1014 writew(status, mmio + NV_ADMA_STAT);
1015 readw(mmio + NV_ADMA_STAT); /* flush posted write */
1016 rmb();
1017
Robert Hancock5bd28a42007-02-05 16:26:01 -08001018 handled++; /* irq handled if we got here */
1019
1020 /* freeze if hotplugged or controller error */
1021 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
1022 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -06001023 NV_ADMA_STAT_TIMEOUT |
1024 NV_ADMA_STAT_SERROR))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001025 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -08001026
1027 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001028 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
Robert Hancock5bd28a42007-02-05 16:26:01 -08001029 if (status & NV_ADMA_STAT_TIMEOUT) {
1030 ehi->err_mask |= AC_ERR_SYSTEM;
Tejun Heob64bbc32007-07-16 14:29:39 +09001031 ata_ehi_push_desc(ehi, "timeout");
Robert Hancock5bd28a42007-02-05 16:26:01 -08001032 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1033 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +09001034 ata_ehi_push_desc(ehi, "hotplug");
Robert Hancock5bd28a42007-02-05 16:26:01 -08001035 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1036 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +09001037 ata_ehi_push_desc(ehi, "hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -06001038 } else if (status & NV_ADMA_STAT_SERROR) {
1039 /* let libata analyze SError and figure out the cause */
Tejun Heob64bbc32007-07-16 14:29:39 +09001040 ata_ehi_push_desc(ehi, "SError");
1041 } else
1042 ata_ehi_push_desc(ehi, "unknown");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001043 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001044 continue;
1045 }
1046
Robert Hancock5bd28a42007-02-05 16:26:01 -08001047 if (status & (NV_ADMA_STAT_DONE |
Robert Hancocka1fe7822008-01-29 19:53:19 -06001048 NV_ADMA_STAT_CPBERR |
1049 NV_ADMA_STAT_CMD_COMPLETE)) {
1050 u32 check_commands = notifier_clears[i];
Robert Hancock721449b2007-02-19 19:03:08 -06001051 int pos, error = 0;
Robert Hancock8ba5e4c2007-03-08 18:02:18 -06001052
Robert Hancocka1fe7822008-01-29 19:53:19 -06001053 if (status & NV_ADMA_STAT_CPBERR) {
1054 /* Check all active commands */
1055 if (ata_tag_valid(ap->link.active_tag))
1056 check_commands = 1 <<
1057 ap->link.active_tag;
1058 else
1059 check_commands = ap->
1060 link.sactive;
1061 }
Robert Hancock8ba5e4c2007-03-08 18:02:18 -06001062
Robert Hancockfbbb2622006-10-27 19:08:41 -07001063 /** Check CPBs for completed commands */
Robert Hancock721449b2007-02-19 19:03:08 -06001064 while ((pos = ffs(check_commands)) && !error) {
1065 pos--;
1066 error = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001067 notifier_error & (1 << pos));
1068 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001069 }
1070 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001071 }
1072 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -05001073
Jeff Garzikb4479162007-10-25 20:47:30 -04001074 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -06001075 /* Note: Both notifier clear registers must be written
1076 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001077 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1078 writel(notifier_clears[0], pp->notifier_clear_block);
1079 pp = host->ports[1]->private_data;
1080 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -06001081 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001082
1083 spin_unlock(&host->lock);
1084
1085 return IRQ_RETVAL(handled);
1086}
1087
Robert Hancock53014e22007-05-05 15:36:36 -06001088static void nv_adma_freeze(struct ata_port *ap)
1089{
1090 struct nv_adma_port_priv *pp = ap->private_data;
1091 void __iomem *mmio = pp->ctl_block;
1092 u16 tmp;
1093
1094 nv_ck804_freeze(ap);
1095
1096 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1097 return;
1098
1099 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001100 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001101 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1102
1103 /* Disable interrupt */
1104 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001105 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001106 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001107 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001108}
1109
1110static void nv_adma_thaw(struct ata_port *ap)
1111{
1112 struct nv_adma_port_priv *pp = ap->private_data;
1113 void __iomem *mmio = pp->ctl_block;
1114 u16 tmp;
1115
1116 nv_ck804_thaw(ap);
1117
1118 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1119 return;
1120
1121 /* Enable interrupt */
1122 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001123 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001124 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001125 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001126}
1127
Robert Hancockfbbb2622006-10-27 19:08:41 -07001128static void nv_adma_irq_clear(struct ata_port *ap)
1129{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001130 struct nv_adma_port_priv *pp = ap->private_data;
1131 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001132 u32 notifier_clears[2];
1133
1134 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1135 ata_bmdma_irq_clear(ap);
1136 return;
1137 }
1138
1139 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001140 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001141 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001142
1143 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001144 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001145
Robert Hancock53014e22007-05-05 15:36:36 -06001146 /* clear notifiers - note both ports need to be written with
1147 something even though we are only clearing on one */
1148 if (ap->port_no == 0) {
1149 notifier_clears[0] = 0xFFFFFFFF;
1150 notifier_clears[1] = 0;
1151 } else {
1152 notifier_clears[0] = 0;
1153 notifier_clears[1] = 0xFFFFFFFF;
1154 }
1155 pp = ap->host->ports[0]->private_data;
1156 writel(notifier_clears[0], pp->notifier_clear_block);
1157 pp = ap->host->ports[1]->private_data;
1158 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001159}
1160
Robert Hancockf5ecac22007-02-20 21:49:10 -06001161static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001162{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001163 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001164
Jeff Garzikb4479162007-10-25 20:47:30 -04001165 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Robert Hancockf5ecac22007-02-20 21:49:10 -06001166 ata_bmdma_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001167}
1168
1169static int nv_adma_port_start(struct ata_port *ap)
1170{
1171 struct device *dev = ap->host->dev;
1172 struct nv_adma_port_priv *pp;
1173 int rc;
1174 void *mem;
1175 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001176 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001177 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001178 u16 tmp;
1179
1180 VPRINTK("ENTER\n");
1181
Robert Hancock8959d302008-02-04 19:39:02 -06001182 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1183 pad buffers */
1184 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1185 if (rc)
1186 return rc;
1187 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1188 if (rc)
1189 return rc;
1190
Robert Hancockfbbb2622006-10-27 19:08:41 -07001191 rc = ata_port_start(ap);
1192 if (rc)
1193 return rc;
1194
Tejun Heo24dc5f32007-01-20 16:00:28 +09001195 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1196 if (!pp)
1197 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001198
Tejun Heo0d5ff562007-02-01 15:06:36 +09001199 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001200 ap->port_no * NV_ADMA_PORT_SIZE;
1201 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001202 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001203 pp->notifier_clear_block = pp->gen_block +
1204 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1205
Robert Hancock8959d302008-02-04 19:39:02 -06001206 /* Now that the legacy PRD and padding buffer are allocated we can
1207 safely raise the DMA mask to allocate the CPB/APRD table.
1208 These are allowed to fail since we store the value that ends up
1209 being used to set as the bounce limit in slave_config later if
1210 needed. */
1211 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1212 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1213 pp->adma_dma_mask = *dev->dma_mask;
1214
Tejun Heo24dc5f32007-01-20 16:00:28 +09001215 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1216 &mem_dma, GFP_KERNEL);
1217 if (!mem)
1218 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001219 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1220
1221 /*
1222 * First item in chunk of DMA memory:
1223 * 128-byte command parameter block (CPB)
1224 * one for each command tag
1225 */
1226 pp->cpb = mem;
1227 pp->cpb_dma = mem_dma;
1228
1229 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001230 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001231
1232 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1233 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1234
1235 /*
1236 * Second item: block of ADMA_SGTBL_LEN s/g entries
1237 */
1238 pp->aprd = mem;
1239 pp->aprd_dma = mem_dma;
1240
1241 ap->private_data = pp;
1242
1243 /* clear any outstanding interrupt conditions */
1244 writew(0xffff, mmio + NV_ADMA_STAT);
1245
1246 /* initialize port variables */
1247 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1248
1249 /* clear CPB fetch count */
1250 writew(0, mmio + NV_ADMA_CPB_COUNT);
1251
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001252 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001253 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001254 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1255 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001256
1257 tmp = readw(mmio + NV_ADMA_CTL);
1258 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001259 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001260 udelay(1);
1261 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001262 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001263
1264 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001265}
1266
1267static void nv_adma_port_stop(struct ata_port *ap)
1268{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001269 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001270 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001271
1272 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001273 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001274}
1275
Tejun Heo438ac6d2007-03-02 17:31:26 +09001276#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001277static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1278{
1279 struct nv_adma_port_priv *pp = ap->private_data;
1280 void __iomem *mmio = pp->ctl_block;
1281
1282 /* Go to register mode - clears GO */
1283 nv_adma_register_mode(ap);
1284
1285 /* clear CPB fetch count */
1286 writew(0, mmio + NV_ADMA_CPB_COUNT);
1287
1288 /* disable interrupt, shut down port */
1289 writew(0, mmio + NV_ADMA_CTL);
1290
1291 return 0;
1292}
1293
1294static int nv_adma_port_resume(struct ata_port *ap)
1295{
1296 struct nv_adma_port_priv *pp = ap->private_data;
1297 void __iomem *mmio = pp->ctl_block;
1298 u16 tmp;
1299
1300 /* set CPB block location */
1301 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001302 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001303
1304 /* clear any outstanding interrupt conditions */
1305 writew(0xffff, mmio + NV_ADMA_STAT);
1306
1307 /* initialize port variables */
1308 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1309
1310 /* clear CPB fetch count */
1311 writew(0, mmio + NV_ADMA_CPB_COUNT);
1312
1313 /* clear GO for register mode, enable interrupt */
1314 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001315 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1316 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001317
1318 tmp = readw(mmio + NV_ADMA_CTL);
1319 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001320 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001321 udelay(1);
1322 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001323 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001324
1325 return 0;
1326}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001327#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001328
Tejun Heo9a829cc2007-04-17 23:44:08 +09001329static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001330{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001331 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1332 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001333
1334 VPRINTK("ENTER\n");
1335
Tejun Heo9a829cc2007-04-17 23:44:08 +09001336 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001337
Tejun Heo0d5ff562007-02-01 15:06:36 +09001338 ioport->cmd_addr = mmio;
1339 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001340 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001341 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1342 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1343 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1344 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1345 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1346 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001347 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001348 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001349 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001350 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001351}
1352
Tejun Heo9a829cc2007-04-17 23:44:08 +09001353static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001354{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001355 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001356 unsigned int i;
1357 u32 tmp32;
1358
1359 VPRINTK("ENTER\n");
1360
1361 /* enable ADMA on the ports */
1362 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1363 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1364 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1365 NV_MCP_SATA_CFG_20_PORT1_EN |
1366 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1367
1368 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1369
Tejun Heo9a829cc2007-04-17 23:44:08 +09001370 for (i = 0; i < host->n_ports; i++)
1371 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001372
Robert Hancockfbbb2622006-10-27 19:08:41 -07001373 return 0;
1374}
1375
1376static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1377 struct scatterlist *sg,
1378 int idx,
1379 struct nv_adma_prd *aprd)
1380{
Robert Hancock41949ed2007-02-19 19:02:27 -06001381 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001382 if (qc->tf.flags & ATA_TFLAG_WRITE)
1383 flags |= NV_APRD_WRITE;
1384 if (idx == qc->n_elem - 1)
1385 flags |= NV_APRD_END;
1386 else if (idx != 4)
1387 flags |= NV_APRD_CONT;
1388
1389 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1390 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001391 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001392 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001393}
1394
1395static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1396{
1397 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001398 struct nv_adma_prd *aprd;
1399 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001400 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001401
1402 VPRINTK("ENTER\n");
1403
Tejun Heoff2aeb12007-12-05 16:43:11 +09001404 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1405 aprd = (si < 5) ? &cpb->aprd[si] :
1406 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1407 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001408 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001409 if (si > 5)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001410 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001411 else
1412 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001413}
1414
Robert Hancock382a6652007-02-05 16:26:02 -08001415static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1416{
1417 struct nv_adma_port_priv *pp = qc->ap->private_data;
1418
1419 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001420 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001421 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001422 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001423 return 1;
1424
Jeff Garzikb4479162007-10-25 20:47:30 -04001425 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001426 (qc->tf.protocol == ATA_PROT_NODATA))
1427 return 0;
1428
1429 return 1;
1430}
1431
Robert Hancockfbbb2622006-10-27 19:08:41 -07001432static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1433{
1434 struct nv_adma_port_priv *pp = qc->ap->private_data;
1435 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1436 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001437 NV_CPB_CTL_IEN;
1438
Robert Hancock382a6652007-02-05 16:26:02 -08001439 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001440 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1441 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001442 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001443 ata_qc_prep(qc);
1444 return;
1445 }
1446
Robert Hancock41949ed2007-02-19 19:02:27 -06001447 cpb->resp_flags = NV_CPB_RESP_DONE;
1448 wmb();
1449 cpb->ctl_flags = 0;
1450 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001451
1452 cpb->len = 3;
1453 cpb->tag = qc->tag;
1454 cpb->next_cpb_idx = 0;
1455
1456 /* turn on NCQ flags for NCQ commands */
1457 if (qc->tf.protocol == ATA_PROT_NCQ)
1458 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1459
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001460 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1461
Robert Hancockfbbb2622006-10-27 19:08:41 -07001462 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1463
Jeff Garzikb4479162007-10-25 20:47:30 -04001464 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001465 nv_adma_fill_sg(qc, cpb);
1466 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1467 } else
1468 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001469
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001470 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1471 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001472 wmb();
1473 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001474 wmb();
1475 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001476}
1477
1478static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1479{
Robert Hancock2dec7552006-11-26 14:20:19 -06001480 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001481 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001482 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001483
1484 VPRINTK("ENTER\n");
1485
Robert Hancock3f3debd2007-11-25 16:59:36 -06001486 /* We can't handle result taskfile with NCQ commands, since
1487 retrieving the taskfile switches us out of ADMA mode and would abort
1488 existing commands. */
1489 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1490 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1491 ata_dev_printk(qc->dev, KERN_ERR,
1492 "NCQ w/ RESULT_TF not allowed\n");
1493 return AC_ERR_SYSTEM;
1494 }
1495
Robert Hancock382a6652007-02-05 16:26:02 -08001496 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001497 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001498 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001499 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1500 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001501 nv_adma_register_mode(qc->ap);
1502 return ata_qc_issue_prot(qc);
1503 } else
1504 nv_adma_mode(qc->ap);
1505
1506 /* write append register, command tag in lower 8 bits
1507 and (number of cpbs to append -1) in top 8 bits */
1508 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001509
Jeff Garzikb4479162007-10-25 20:47:30 -04001510 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001511 /* Seems to need some delay before switching between NCQ and
1512 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001513 udelay(20);
1514 pp->last_issue_ncq = curr_ncq;
1515 }
1516
Robert Hancockfbbb2622006-10-27 19:08:41 -07001517 writew(qc->tag, mmio + NV_ADMA_APPEND);
1518
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001519 DPRINTK("Issued tag %u\n", qc->tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001520
1521 return 0;
1522}
1523
David Howells7d12e782006-10-05 14:55:46 +01001524static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525{
Jeff Garzikcca39742006-08-24 03:19:22 -04001526 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 unsigned int i;
1528 unsigned int handled = 0;
1529 unsigned long flags;
1530
Jeff Garzikcca39742006-08-24 03:19:22 -04001531 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Jeff Garzikcca39742006-08-24 03:19:22 -04001533 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 struct ata_port *ap;
1535
Jeff Garzikcca39742006-08-24 03:19:22 -04001536 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001537 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001538 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 struct ata_queued_cmd *qc;
1540
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001541 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001542 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001544 else
1545 // No request pending? Clear interrupt status
1546 // anyway, in case there's one pending.
1547 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 }
1549
1550 }
1551
Jeff Garzikcca39742006-08-24 03:19:22 -04001552 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554 return IRQ_RETVAL(handled);
1555}
1556
Jeff Garzikcca39742006-08-24 03:19:22 -04001557static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001558{
1559 int i, handled = 0;
1560
Jeff Garzikcca39742006-08-24 03:19:22 -04001561 for (i = 0; i < host->n_ports; i++) {
1562 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001563
1564 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1565 handled += nv_host_intr(ap, irq_stat);
1566
1567 irq_stat >>= NV_INT_PORT_SHIFT;
1568 }
1569
1570 return IRQ_RETVAL(handled);
1571}
1572
David Howells7d12e782006-10-05 14:55:46 +01001573static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001574{
Jeff Garzikcca39742006-08-24 03:19:22 -04001575 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001576 u8 irq_stat;
1577 irqreturn_t ret;
1578
Jeff Garzikcca39742006-08-24 03:19:22 -04001579 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001580 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001581 ret = nv_do_interrupt(host, irq_stat);
1582 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001583
1584 return ret;
1585}
1586
David Howells7d12e782006-10-05 14:55:46 +01001587static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001588{
Jeff Garzikcca39742006-08-24 03:19:22 -04001589 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001590 u8 irq_stat;
1591 irqreturn_t ret;
1592
Jeff Garzikcca39742006-08-24 03:19:22 -04001593 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001594 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001595 ret = nv_do_interrupt(host, irq_stat);
1596 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001597
1598 return ret;
1599}
1600
Tejun Heoda3dbb12007-07-16 14:29:40 +09001601static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001604 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
Tejun Heoda3dbb12007-07-16 14:29:40 +09001606 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1607 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
1609
Tejun Heoda3dbb12007-07-16 14:29:40 +09001610static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001613 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Tejun Heo0d5ff562007-02-01 15:06:36 +09001615 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001616 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617}
1618
Tejun Heo39f87582006-06-17 15:49:56 +09001619static void nv_nf2_freeze(struct ata_port *ap)
1620{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001621 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001622 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1623 u8 mask;
1624
Tejun Heo0d5ff562007-02-01 15:06:36 +09001625 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001626 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001627 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001628}
1629
1630static void nv_nf2_thaw(struct ata_port *ap)
1631{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001632 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001633 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1634 u8 mask;
1635
Tejun Heo0d5ff562007-02-01 15:06:36 +09001636 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001637
Tejun Heo0d5ff562007-02-01 15:06:36 +09001638 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001639 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001640 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001641}
1642
1643static void nv_ck804_freeze(struct ata_port *ap)
1644{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001645 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001646 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1647 u8 mask;
1648
1649 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1650 mask &= ~(NV_INT_ALL << shift);
1651 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1652}
1653
1654static void nv_ck804_thaw(struct ata_port *ap)
1655{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001656 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001657 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1658 u8 mask;
1659
1660 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1661
1662 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1663 mask |= (NV_INT_MASK << shift);
1664 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1665}
1666
Kuan Luof140f0f2007-10-15 15:16:53 -04001667static void nv_mcp55_freeze(struct ata_port *ap)
1668{
1669 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1670 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1671 u32 mask;
1672
1673 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1674
1675 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1676 mask &= ~(NV_INT_ALL_MCP55 << shift);
1677 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1678 ata_bmdma_freeze(ap);
1679}
1680
1681static void nv_mcp55_thaw(struct ata_port *ap)
1682{
1683 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1684 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1685 u32 mask;
1686
1687 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1688
1689 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1690 mask |= (NV_INT_MASK_MCP55 << shift);
1691 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1692 ata_bmdma_thaw(ap);
1693}
1694
Tejun Heocc0680a2007-08-06 18:36:23 +09001695static int nv_hardreset(struct ata_link *link, unsigned int *class,
Tejun Heod4b2bab2007-02-02 16:50:52 +09001696 unsigned long deadline)
Tejun Heo39f87582006-06-17 15:49:56 +09001697{
1698 unsigned int dummy;
1699
1700 /* SATA hardreset fails to retrieve proper device signature on
1701 * some controllers. Don't classify on hardreset. For more
Fernando Luis Vázquez Cao647c5952007-11-07 16:33:49 +09001702 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
Tejun Heo39f87582006-06-17 15:49:56 +09001703 */
Tejun Heocc0680a2007-08-06 18:36:23 +09001704 return sata_std_hardreset(link, &dummy, deadline);
Tejun Heo39f87582006-06-17 15:49:56 +09001705}
1706
1707static void nv_error_handler(struct ata_port *ap)
1708{
1709 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1710 nv_hardreset, ata_std_postreset);
1711}
1712
Robert Hancockfbbb2622006-10-27 19:08:41 -07001713static void nv_adma_error_handler(struct ata_port *ap)
1714{
1715 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001716 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001717 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001718 int i;
1719 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001720
Jeff Garzikb4479162007-10-25 20:47:30 -04001721 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001722 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1723 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1724 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1725 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001726 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1727 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001728
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001729 ata_port_printk(ap, KERN_ERR,
1730 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001731 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1732 "next cpb count 0x%X next cpb idx 0x%x\n",
1733 notifier, notifier_error, gen_ctl, status,
1734 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001735
Jeff Garzikb4479162007-10-25 20:47:30 -04001736 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001737 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001738 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001739 ap->link.sactive & (1 << i))
Robert Hancock2cb27852007-02-11 18:34:44 -06001740 ata_port_printk(ap, KERN_ERR,
1741 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1742 i, cpb->ctl_flags, cpb->resp_flags);
1743 }
1744 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001745
Robert Hancockfbbb2622006-10-27 19:08:41 -07001746 /* Push us back into port register mode for error handling. */
1747 nv_adma_register_mode(ap);
1748
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001749 /* Mark all of the CPBs as invalid to prevent them from
1750 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001751 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001752 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1753
1754 /* clear CPB fetch count */
1755 writew(0, mmio + NV_ADMA_CPB_COUNT);
1756
1757 /* Reset channel */
1758 tmp = readw(mmio + NV_ADMA_CTL);
1759 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001760 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001761 udelay(1);
1762 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001763 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001764 }
1765
1766 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1767 nv_hardreset, ata_std_postreset);
1768}
1769
Kuan Luof140f0f2007-10-15 15:16:53 -04001770static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1771{
1772 struct nv_swncq_port_priv *pp = ap->private_data;
1773 struct defer_queue *dq = &pp->defer_queue;
1774
1775 /* queue is full */
1776 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1777 dq->defer_bits |= (1 << qc->tag);
1778 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1779}
1780
1781static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1782{
1783 struct nv_swncq_port_priv *pp = ap->private_data;
1784 struct defer_queue *dq = &pp->defer_queue;
1785 unsigned int tag;
1786
1787 if (dq->head == dq->tail) /* null queue */
1788 return NULL;
1789
1790 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1791 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1792 WARN_ON(!(dq->defer_bits & (1 << tag)));
1793 dq->defer_bits &= ~(1 << tag);
1794
1795 return ata_qc_from_tag(ap, tag);
1796}
1797
1798static void nv_swncq_fis_reinit(struct ata_port *ap)
1799{
1800 struct nv_swncq_port_priv *pp = ap->private_data;
1801
1802 pp->dhfis_bits = 0;
1803 pp->dmafis_bits = 0;
1804 pp->sdbfis_bits = 0;
1805 pp->ncq_flags = 0;
1806}
1807
1808static void nv_swncq_pp_reinit(struct ata_port *ap)
1809{
1810 struct nv_swncq_port_priv *pp = ap->private_data;
1811 struct defer_queue *dq = &pp->defer_queue;
1812
1813 dq->head = 0;
1814 dq->tail = 0;
1815 dq->defer_bits = 0;
1816 pp->qc_active = 0;
1817 pp->last_issue_tag = ATA_TAG_POISON;
1818 nv_swncq_fis_reinit(ap);
1819}
1820
1821static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1822{
1823 struct nv_swncq_port_priv *pp = ap->private_data;
1824
1825 writew(fis, pp->irq_block);
1826}
1827
1828static void __ata_bmdma_stop(struct ata_port *ap)
1829{
1830 struct ata_queued_cmd qc;
1831
1832 qc.ap = ap;
1833 ata_bmdma_stop(&qc);
1834}
1835
1836static void nv_swncq_ncq_stop(struct ata_port *ap)
1837{
1838 struct nv_swncq_port_priv *pp = ap->private_data;
1839 unsigned int i;
1840 u32 sactive;
1841 u32 done_mask;
1842
1843 ata_port_printk(ap, KERN_ERR,
1844 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1845 ap->qc_active, ap->link.sactive);
1846 ata_port_printk(ap, KERN_ERR,
1847 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1848 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1849 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1850 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1851
1852 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1853 ap->ops->check_status(ap),
1854 ioread8(ap->ioaddr.error_addr));
1855
1856 sactive = readl(pp->sactive_block);
1857 done_mask = pp->qc_active ^ sactive;
1858
1859 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1860 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1861 u8 err = 0;
1862 if (pp->qc_active & (1 << i))
1863 err = 0;
1864 else if (done_mask & (1 << i))
1865 err = 1;
1866 else
1867 continue;
1868
1869 ata_port_printk(ap, KERN_ERR,
1870 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1871 (pp->dhfis_bits >> i) & 0x1,
1872 (pp->dmafis_bits >> i) & 0x1,
1873 (pp->sdbfis_bits >> i) & 0x1,
1874 (sactive >> i) & 0x1,
1875 (err ? "error! tag doesn't exit" : " "));
1876 }
1877
1878 nv_swncq_pp_reinit(ap);
1879 ap->ops->irq_clear(ap);
1880 __ata_bmdma_stop(ap);
1881 nv_swncq_irq_clear(ap, 0xffff);
1882}
1883
1884static void nv_swncq_error_handler(struct ata_port *ap)
1885{
1886 struct ata_eh_context *ehc = &ap->link.eh_context;
1887
1888 if (ap->link.sactive) {
1889 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001890 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001891 }
1892
1893 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1894 nv_hardreset, ata_std_postreset);
1895}
1896
1897#ifdef CONFIG_PM
1898static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1899{
1900 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1901 u32 tmp;
1902
1903 /* clear irq */
1904 writel(~0, mmio + NV_INT_STATUS_MCP55);
1905
1906 /* disable irq */
1907 writel(0, mmio + NV_INT_ENABLE_MCP55);
1908
1909 /* disable swncq */
1910 tmp = readl(mmio + NV_CTL_MCP55);
1911 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1912 writel(tmp, mmio + NV_CTL_MCP55);
1913
1914 return 0;
1915}
1916
1917static int nv_swncq_port_resume(struct ata_port *ap)
1918{
1919 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1920 u32 tmp;
1921
1922 /* clear irq */
1923 writel(~0, mmio + NV_INT_STATUS_MCP55);
1924
1925 /* enable irq */
1926 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1927
1928 /* enable swncq */
1929 tmp = readl(mmio + NV_CTL_MCP55);
1930 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1931
1932 return 0;
1933}
1934#endif
1935
1936static void nv_swncq_host_init(struct ata_host *host)
1937{
1938 u32 tmp;
1939 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1940 struct pci_dev *pdev = to_pci_dev(host->dev);
1941 u8 regval;
1942
1943 /* disable ECO 398 */
1944 pci_read_config_byte(pdev, 0x7f, &regval);
1945 regval &= ~(1 << 7);
1946 pci_write_config_byte(pdev, 0x7f, regval);
1947
1948 /* enable swncq */
1949 tmp = readl(mmio + NV_CTL_MCP55);
1950 VPRINTK("HOST_CTL:0x%X\n", tmp);
1951 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1952
1953 /* enable irq intr */
1954 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1955 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1956 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1957
1958 /* clear port irq */
1959 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1960}
1961
1962static int nv_swncq_slave_config(struct scsi_device *sdev)
1963{
1964 struct ata_port *ap = ata_shost_to_port(sdev->host);
1965 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1966 struct ata_device *dev;
1967 int rc;
1968 u8 rev;
1969 u8 check_maxtor = 0;
1970 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1971
1972 rc = ata_scsi_slave_config(sdev);
1973 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1974 /* Not a proper libata device, ignore */
1975 return rc;
1976
1977 dev = &ap->link.device[sdev->id];
1978 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1979 return rc;
1980
1981 /* if MCP51 and Maxtor, then disable ncq */
1982 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1983 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1984 check_maxtor = 1;
1985
1986 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1987 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1988 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1989 pci_read_config_byte(pdev, 0x8, &rev);
1990 if (rev <= 0xa2)
1991 check_maxtor = 1;
1992 }
1993
1994 if (!check_maxtor)
1995 return rc;
1996
1997 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1998
1999 if (strncmp(model_num, "Maxtor", 6) == 0) {
2000 ata_scsi_change_queue_depth(sdev, 1);
2001 ata_dev_printk(dev, KERN_NOTICE,
2002 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
2003 }
2004
2005 return rc;
2006}
2007
2008static int nv_swncq_port_start(struct ata_port *ap)
2009{
2010 struct device *dev = ap->host->dev;
2011 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
2012 struct nv_swncq_port_priv *pp;
2013 int rc;
2014
2015 rc = ata_port_start(ap);
2016 if (rc)
2017 return rc;
2018
2019 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2020 if (!pp)
2021 return -ENOMEM;
2022
2023 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
2024 &pp->prd_dma, GFP_KERNEL);
2025 if (!pp->prd)
2026 return -ENOMEM;
2027 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2028
2029 ap->private_data = pp;
2030 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2031 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2032 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2033
2034 return 0;
2035}
2036
2037static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2038{
2039 if (qc->tf.protocol != ATA_PROT_NCQ) {
2040 ata_qc_prep(qc);
2041 return;
2042 }
2043
2044 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2045 return;
2046
2047 nv_swncq_fill_sg(qc);
2048}
2049
2050static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2051{
2052 struct ata_port *ap = qc->ap;
2053 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04002054 struct nv_swncq_port_priv *pp = ap->private_data;
2055 struct ata_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09002056 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04002057
2058 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2059
2060 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09002061 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04002062 u32 addr, offset;
2063 u32 sg_len, len;
2064
2065 addr = (u32)sg_dma_address(sg);
2066 sg_len = sg_dma_len(sg);
2067
2068 while (sg_len) {
2069 offset = addr & 0xffff;
2070 len = sg_len;
2071 if ((offset + sg_len) > 0x10000)
2072 len = 0x10000 - offset;
2073
2074 prd[idx].addr = cpu_to_le32(addr);
2075 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2076
2077 idx++;
2078 sg_len -= len;
2079 addr += len;
2080 }
2081 }
2082
Tejun Heoff2aeb12007-12-05 16:43:11 +09002083 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04002084}
2085
2086static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2087 struct ata_queued_cmd *qc)
2088{
2089 struct nv_swncq_port_priv *pp = ap->private_data;
2090
2091 if (qc == NULL)
2092 return 0;
2093
2094 DPRINTK("Enter\n");
2095
2096 writel((1 << qc->tag), pp->sactive_block);
2097 pp->last_issue_tag = qc->tag;
2098 pp->dhfis_bits &= ~(1 << qc->tag);
2099 pp->dmafis_bits &= ~(1 << qc->tag);
2100 pp->qc_active |= (0x1 << qc->tag);
2101
2102 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2103 ap->ops->exec_command(ap, &qc->tf);
2104
2105 DPRINTK("Issued tag %u\n", qc->tag);
2106
2107 return 0;
2108}
2109
2110static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2111{
2112 struct ata_port *ap = qc->ap;
2113 struct nv_swncq_port_priv *pp = ap->private_data;
2114
2115 if (qc->tf.protocol != ATA_PROT_NCQ)
2116 return ata_qc_issue_prot(qc);
2117
2118 DPRINTK("Enter\n");
2119
2120 if (!pp->qc_active)
2121 nv_swncq_issue_atacmd(ap, qc);
2122 else
2123 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2124
2125 return 0;
2126}
2127
2128static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2129{
2130 u32 serror;
2131 struct ata_eh_info *ehi = &ap->link.eh_info;
2132
2133 ata_ehi_clear_desc(ehi);
2134
2135 /* AHCI needs SError cleared; otherwise, it might lock up */
2136 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2137 sata_scr_write(&ap->link, SCR_ERROR, serror);
2138
2139 /* analyze @irq_stat */
2140 if (fis & NV_SWNCQ_IRQ_ADDED)
2141 ata_ehi_push_desc(ehi, "hot plug");
2142 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2143 ata_ehi_push_desc(ehi, "hot unplug");
2144
2145 ata_ehi_hotplugged(ehi);
2146
2147 /* okay, let's hand over to EH */
2148 ehi->serror |= serror;
2149
2150 ata_port_freeze(ap);
2151}
2152
2153static int nv_swncq_sdbfis(struct ata_port *ap)
2154{
2155 struct ata_queued_cmd *qc;
2156 struct nv_swncq_port_priv *pp = ap->private_data;
2157 struct ata_eh_info *ehi = &ap->link.eh_info;
2158 u32 sactive;
2159 int nr_done = 0;
2160 u32 done_mask;
2161 int i;
2162 u8 host_stat;
2163 u8 lack_dhfis = 0;
2164
2165 host_stat = ap->ops->bmdma_status(ap);
2166 if (unlikely(host_stat & ATA_DMA_ERR)) {
2167 /* error when transfering data to/from memory */
2168 ata_ehi_clear_desc(ehi);
2169 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2170 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002171 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002172 return -EINVAL;
2173 }
2174
2175 ap->ops->irq_clear(ap);
2176 __ata_bmdma_stop(ap);
2177
2178 sactive = readl(pp->sactive_block);
2179 done_mask = pp->qc_active ^ sactive;
2180
2181 if (unlikely(done_mask & sactive)) {
2182 ata_ehi_clear_desc(ehi);
2183 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2184 "(%08x->%08x)", pp->qc_active, sactive);
2185 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002186 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002187 return -EINVAL;
2188 }
2189 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2190 if (!(done_mask & (1 << i)))
2191 continue;
2192
2193 qc = ata_qc_from_tag(ap, i);
2194 if (qc) {
2195 ata_qc_complete(qc);
2196 pp->qc_active &= ~(1 << i);
2197 pp->dhfis_bits &= ~(1 << i);
2198 pp->dmafis_bits &= ~(1 << i);
2199 pp->sdbfis_bits |= (1 << i);
2200 nr_done++;
2201 }
2202 }
2203
2204 if (!ap->qc_active) {
2205 DPRINTK("over\n");
2206 nv_swncq_pp_reinit(ap);
2207 return nr_done;
2208 }
2209
2210 if (pp->qc_active & pp->dhfis_bits)
2211 return nr_done;
2212
2213 if ((pp->ncq_flags & ncq_saw_backout) ||
2214 (pp->qc_active ^ pp->dhfis_bits))
2215 /* if the controller cann't get a device to host register FIS,
2216 * The driver needs to reissue the new command.
2217 */
2218 lack_dhfis = 1;
2219
2220 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2221 "SWNCQ:qc_active 0x%X defer_bits %X "
2222 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2223 ap->print_id, ap->qc_active, pp->qc_active,
2224 pp->defer_queue.defer_bits, pp->dhfis_bits,
2225 pp->dmafis_bits, pp->last_issue_tag);
2226
2227 nv_swncq_fis_reinit(ap);
2228
2229 if (lack_dhfis) {
2230 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2231 nv_swncq_issue_atacmd(ap, qc);
2232 return nr_done;
2233 }
2234
2235 if (pp->defer_queue.defer_bits) {
2236 /* send deferral queue command */
2237 qc = nv_swncq_qc_from_dq(ap);
2238 WARN_ON(qc == NULL);
2239 nv_swncq_issue_atacmd(ap, qc);
2240 }
2241
2242 return nr_done;
2243}
2244
2245static inline u32 nv_swncq_tag(struct ata_port *ap)
2246{
2247 struct nv_swncq_port_priv *pp = ap->private_data;
2248 u32 tag;
2249
2250 tag = readb(pp->tag_block) >> 2;
2251 return (tag & 0x1f);
2252}
2253
2254static int nv_swncq_dmafis(struct ata_port *ap)
2255{
2256 struct ata_queued_cmd *qc;
2257 unsigned int rw;
2258 u8 dmactl;
2259 u32 tag;
2260 struct nv_swncq_port_priv *pp = ap->private_data;
2261
2262 __ata_bmdma_stop(ap);
2263 tag = nv_swncq_tag(ap);
2264
2265 DPRINTK("dma setup tag 0x%x\n", tag);
2266 qc = ata_qc_from_tag(ap, tag);
2267
2268 if (unlikely(!qc))
2269 return 0;
2270
2271 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2272
2273 /* load PRD table addr. */
2274 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2275 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2276
2277 /* specify data direction, triple-check start bit is clear */
2278 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2279 dmactl &= ~ATA_DMA_WR;
2280 if (!rw)
2281 dmactl |= ATA_DMA_WR;
2282
2283 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2284
2285 return 1;
2286}
2287
2288static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2289{
2290 struct nv_swncq_port_priv *pp = ap->private_data;
2291 struct ata_queued_cmd *qc;
2292 struct ata_eh_info *ehi = &ap->link.eh_info;
2293 u32 serror;
2294 u8 ata_stat;
2295 int rc = 0;
2296
2297 ata_stat = ap->ops->check_status(ap);
2298 nv_swncq_irq_clear(ap, fis);
2299 if (!fis)
2300 return;
2301
2302 if (ap->pflags & ATA_PFLAG_FROZEN)
2303 return;
2304
2305 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2306 nv_swncq_hotplug(ap, fis);
2307 return;
2308 }
2309
2310 if (!pp->qc_active)
2311 return;
2312
2313 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2314 return;
2315 ap->ops->scr_write(ap, SCR_ERROR, serror);
2316
2317 if (ata_stat & ATA_ERR) {
2318 ata_ehi_clear_desc(ehi);
2319 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2320 ehi->err_mask |= AC_ERR_DEV;
2321 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002322 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002323 ata_port_freeze(ap);
2324 return;
2325 }
2326
2327 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2328 /* If the IRQ is backout, driver must issue
2329 * the new command again some time later.
2330 */
2331 pp->ncq_flags |= ncq_saw_backout;
2332 }
2333
2334 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2335 pp->ncq_flags |= ncq_saw_sdb;
2336 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2337 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2338 ap->print_id, pp->qc_active, pp->dhfis_bits,
2339 pp->dmafis_bits, readl(pp->sactive_block));
2340 rc = nv_swncq_sdbfis(ap);
2341 if (rc < 0)
2342 goto irq_error;
2343 }
2344
2345 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2346 /* The interrupt indicates the new command
2347 * was transmitted correctly to the drive.
2348 */
2349 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2350 pp->ncq_flags |= ncq_saw_d2h;
2351 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2352 ata_ehi_push_desc(ehi, "illegal fis transaction");
2353 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002354 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002355 goto irq_error;
2356 }
2357
2358 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2359 !(pp->ncq_flags & ncq_saw_dmas)) {
2360 ata_stat = ap->ops->check_status(ap);
2361 if (ata_stat & ATA_BUSY)
2362 goto irq_exit;
2363
2364 if (pp->defer_queue.defer_bits) {
2365 DPRINTK("send next command\n");
2366 qc = nv_swncq_qc_from_dq(ap);
2367 nv_swncq_issue_atacmd(ap, qc);
2368 }
2369 }
2370 }
2371
2372 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2373 /* program the dma controller with appropriate PRD buffers
2374 * and start the DMA transfer for requested command.
2375 */
2376 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2377 pp->ncq_flags |= ncq_saw_dmas;
2378 rc = nv_swncq_dmafis(ap);
2379 }
2380
2381irq_exit:
2382 return;
2383irq_error:
2384 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2385 ata_port_freeze(ap);
2386 return;
2387}
2388
2389static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2390{
2391 struct ata_host *host = dev_instance;
2392 unsigned int i;
2393 unsigned int handled = 0;
2394 unsigned long flags;
2395 u32 irq_stat;
2396
2397 spin_lock_irqsave(&host->lock, flags);
2398
2399 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2400
2401 for (i = 0; i < host->n_ports; i++) {
2402 struct ata_port *ap = host->ports[i];
2403
2404 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2405 if (ap->link.sactive) {
2406 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2407 handled = 1;
2408 } else {
2409 if (irq_stat) /* reserve Hotplug */
2410 nv_swncq_irq_clear(ap, 0xfff0);
2411
2412 handled += nv_host_intr(ap, (u8)irq_stat);
2413 }
2414 }
2415 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2416 }
2417
2418 spin_unlock_irqrestore(&host->lock, flags);
2419
2420 return IRQ_RETVAL(handled);
2421}
2422
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002423static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002425 static int printed_version;
Tejun Heo1626aeb2007-05-04 12:43:58 +02002426 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo9a829cc2007-04-17 23:44:08 +09002427 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002428 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 int rc;
2430 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002431 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002432 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 // Make sure this is a SATA controller by counting the number of bars
2435 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2436 // it's an IDE controller and we ignore it.
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002437 for (bar = 0; bar < 6; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 if (pci_resource_start(pdev, bar) == 0)
2439 return -ENODEV;
2440
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002441 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05002442 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
Tejun Heo24dc5f32007-01-20 16:00:28 +09002444 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002446 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Tejun Heo9a829cc2007-04-17 23:44:08 +09002448 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002449 if (type == CK804 && adma_enabled) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07002450 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2451 type = ADMA;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002452 }
2453
Jeff Garzik360737a2007-10-29 06:49:24 -04002454 if (type == SWNCQ) {
2455 if (swncq_enabled)
2456 dev_printk(KERN_NOTICE, &pdev->dev,
2457 "Using SWNCQ mode\n");
2458 else
2459 type = GENERIC;
2460 }
2461
Tejun Heo1626aeb2007-05-04 12:43:58 +02002462 ppi[0] = &nv_port_info[type];
Tejun Heod583bc12007-07-04 18:02:07 +09002463 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002464 if (rc)
2465 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Tejun Heo24dc5f32007-01-20 16:00:28 +09002467 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002468 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002469 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002470 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002471 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
Tejun Heo9a829cc2007-04-17 23:44:08 +09002473 /* request and iomap NV_MMIO_BAR */
2474 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2475 if (rc)
2476 return rc;
2477
2478 /* configure SCR access */
2479 base = host->iomap[NV_MMIO_BAR];
2480 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2481 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002482
Tejun Heoada364e2006-06-17 15:49:56 +09002483 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002484 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002485 u8 regval;
2486
2487 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2488 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2489 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2490 }
2491
Tejun Heo9a829cc2007-04-17 23:44:08 +09002492 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002493 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002494 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002495 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002496 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002497 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002498 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002499
Tejun Heo9a829cc2007-04-17 23:44:08 +09002500 pci_set_master(pdev);
2501 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2502 IRQF_SHARED, ppi[0]->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503}
2504
Tejun Heo438ac6d2007-03-02 17:31:26 +09002505#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002506static int nv_pci_device_resume(struct pci_dev *pdev)
2507{
2508 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2509 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002510 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002511
Robert Hancockce053fa2007-02-05 16:26:04 -08002512 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002513 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002514 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002515
2516 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002517 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002518 u8 regval;
2519
2520 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2521 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2522 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2523 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002524 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002525 u32 tmp32;
2526 struct nv_adma_port_priv *pp;
2527 /* enable/disable ADMA on the ports appropriately */
2528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2529
2530 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002531 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002532 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002533 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002534 else
2535 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002536 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002537 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002538 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002539 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002540 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002541 else
2542 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002543 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002544
2545 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2546 }
2547 }
2548
2549 ata_host_resume(host);
2550
2551 return 0;
2552}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002553#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002554
Jeff Garzikcca39742006-08-24 03:19:22 -04002555static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002556{
Jeff Garzikcca39742006-08-24 03:19:22 -04002557 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002558 u8 regval;
2559
2560 /* disable SATA space for CK804 */
2561 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2562 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2563 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002564}
2565
Robert Hancockfbbb2622006-10-27 19:08:41 -07002566static void nv_adma_host_stop(struct ata_host *host)
2567{
2568 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002569 u32 tmp32;
2570
Robert Hancockfbbb2622006-10-27 19:08:41 -07002571 /* disable ADMA on the ports */
2572 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2573 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2574 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2575 NV_MCP_SATA_CFG_20_PORT1_EN |
2576 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2577
2578 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2579
2580 nv_ck804_host_stop(host);
2581}
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583static int __init nv_init(void)
2584{
Pavel Roskinb7887192006-08-10 18:13:18 +09002585 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586}
2587
2588static void __exit nv_exit(void)
2589{
2590 pci_unregister_driver(&nv_pci_driver);
2591}
2592
2593module_init(nv_init);
2594module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002595module_param_named(adma, adma_enabled, bool, 0444);
2596MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002597module_param_named(swncq, swncq_enabled, bool, 0444);
2598MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2599