blob: b3b3da4eaa0321b140440913e7e3f8157e75fae1 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 /* PCI interface registers */
156
Brett Russ31961942005-09-30 01:36:00 -0400157 PCI_COMMAND_OFS = 0xc00,
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
Jeff Garzik522479f2005-11-12 22:14:02 -0500164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
Mark Lord02a121d2007-12-01 13:07:22 -0500175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500182
Brett Russ20f733e2005-09-01 18:26:17 -0400183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500226 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500260
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400308 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400312
Brett Russ31961942005-09-30 01:36:00 -0400313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400328
Jeff Garzikc9d39132005-11-13 17:47:51 -0500329 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500330 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400343
Brett Russ31961942005-09-30 01:36:00 -0400344 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400348};
349
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500354
Jeff Garzik095fec82005-11-12 09:50:49 -0500355enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500360
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400366 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
Jeff Garzik522479f2005-11-12 22:14:02 -0500370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500376 chip_6042,
377 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500378 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ReQuest Block: 32B */
382struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400387};
388
Jeff Garzike4e7b892006-01-31 12:18:41 -0500389struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500395};
396
Brett Russ31961942005-09-30 01:36:00 -0400397/* Command ResPonse Block: 8B */
398struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400402};
403
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
412struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
Brett Russ31961942005-09-30 01:36:00 -0400423 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400424};
425
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
Mark Lord02a121d2007-12-01 13:07:22 -0500431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500450};
451
Jeff Garzik47c2b672005-11-12 21:13:17 -0500452struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500462};
463
Tejun Heoda3dbb12007-07-16 14:29:40 +0900464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400473static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500476static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400477
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500511
Mark Lordeb73d552008-01-29 13:24:00 -0500512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400516static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400517 .module = THIS_MODULE,
518 .name = DRV_NAME,
519 .ioctl = ata_scsi_ioctl,
520 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400521 .can_queue = ATA_DEF_QUEUE,
522 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400523 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400524 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
525 .emulated = ATA_SHT_EMULATED,
526 .use_clustering = 1,
527 .proc_name = DRV_NAME,
528 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400529 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400530 .slave_destroy = ata_scsi_slave_destroy,
531 .bios_param = ata_std_bios_param,
532};
533
534static struct scsi_host_template mv6_sht = {
535 .module = THIS_MODULE,
536 .name = DRV_NAME,
537 .ioctl = ata_scsi_ioctl,
538 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500539 .change_queue_depth = ata_scsi_change_queue_depth,
540 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400541 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400542 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400543 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
544 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500545 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400546 .proc_name = DRV_NAME,
547 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400548 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900549 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400550 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400551};
552
Jeff Garzikc9d39132005-11-13 17:47:51 -0500553static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500554 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read,
556 .check_status = ata_check_status,
557 .exec_command = ata_exec_command,
558 .dev_select = ata_std_dev_select,
559
Jeff Garzikcffacd82007-03-09 09:46:47 -0500560 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500561
562 .qc_prep = mv_qc_prep,
563 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900564 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500565
Tejun Heo358f9a72008-03-25 12:22:47 +0900566 .irq_clear = ata_noop_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900567 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500568
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400569 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400570 .freeze = mv_eh_freeze,
571 .thaw = mv_eh_thaw,
572
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578};
579
580static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500581 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400582 .tf_load = ata_tf_load,
583 .tf_read = ata_tf_read,
584 .check_status = ata_check_status,
585 .exec_command = ata_exec_command,
586 .dev_select = ata_std_dev_select,
587
Jeff Garzikcffacd82007-03-09 09:46:47 -0500588 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400589
Brett Russ31961942005-09-30 01:36:00 -0400590 .qc_prep = mv_qc_prep,
591 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900592 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400593
Tejun Heo358f9a72008-03-25 12:22:47 +0900594 .irq_clear = ata_noop_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900595 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400596
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400597 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400598 .freeze = mv_eh_freeze,
599 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500600 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400601
Brett Russ20f733e2005-09-01 18:26:17 -0400602 .scr_read = mv_scr_read,
603 .scr_write = mv_scr_write,
604
Brett Russ31961942005-09-30 01:36:00 -0400605 .port_start = mv_port_start,
606 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400607};
608
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 .tf_load = ata_tf_load,
611 .tf_read = ata_tf_read,
612 .check_status = ata_check_status,
613 .exec_command = ata_exec_command,
614 .dev_select = ata_std_dev_select,
615
Jeff Garzikcffacd82007-03-09 09:46:47 -0500616 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500617
618 .qc_prep = mv_qc_prep_iie,
619 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900620 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500621
Tejun Heo358f9a72008-03-25 12:22:47 +0900622 .irq_clear = ata_noop_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900623 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500624
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400625 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400626 .freeze = mv_eh_freeze,
627 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500628 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400629
Jeff Garzike4e7b892006-01-31 12:18:41 -0500630 .scr_read = mv_scr_read,
631 .scr_write = mv_scr_write,
632
633 .port_start = mv_port_start,
634 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635};
636
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100637static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400638 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400639 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400640 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400641 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500642 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400643 },
644 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400645 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400646 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400647 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500648 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400649 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500650 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400651 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500652 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400653 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500654 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500655 },
Brett Russ20f733e2005-09-01 18:26:17 -0400656 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500657 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
658 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400659 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400660 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500661 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400662 },
663 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400664 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500665 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400666 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400667 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500668 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400669 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500670 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500671 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
672 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500673 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400674 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500675 .port_ops = &mv_iie_ops,
676 },
677 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500678 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
679 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500680 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400681 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500682 .port_ops = &mv_iie_ops,
683 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500684 { /* chip_soc */
685 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
686 .pio_mask = 0x1f, /* pio0-4 */
687 .udma_mask = ATA_UDMA6,
688 .port_ops = &mv_iie_ops,
689 },
Brett Russ20f733e2005-09-01 18:26:17 -0400690};
691
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500692static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400693 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
694 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
696 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100697 /* RocketRAID 1740/174x have different identifiers */
698 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
699 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400700
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400701 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
702 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
704 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
705 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500706
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400707 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
708
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200709 /* Adaptec 1430SA */
710 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
711
Mark Lord02a121d2007-12-01 13:07:22 -0500712 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800713 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
714
Mark Lord02a121d2007-12-01 13:07:22 -0500715 /* Highpoint RocketRAID PCIe series */
716 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
717 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
718
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400719 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400720};
721
Jeff Garzik47c2b672005-11-12 21:13:17 -0500722static const struct mv_hw_ops mv5xxx_ops = {
723 .phy_errata = mv5_phy_errata,
724 .enable_leds = mv5_enable_leds,
725 .read_preamp = mv5_read_preamp,
726 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500727 .reset_flash = mv5_reset_flash,
728 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500729};
730
731static const struct mv_hw_ops mv6xxx_ops = {
732 .phy_errata = mv6_phy_errata,
733 .enable_leds = mv6_enable_leds,
734 .read_preamp = mv6_read_preamp,
735 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500736 .reset_flash = mv6_reset_flash,
737 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500738};
739
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500740static const struct mv_hw_ops mv_soc_ops = {
741 .phy_errata = mv6_phy_errata,
742 .enable_leds = mv_soc_enable_leds,
743 .read_preamp = mv_soc_read_preamp,
744 .reset_hc = mv_soc_reset_hc,
745 .reset_flash = mv_soc_reset_flash,
746 .reset_bus = mv_soc_reset_bus,
747};
748
Brett Russ20f733e2005-09-01 18:26:17 -0400749/*
750 * Functions
751 */
752
753static inline void writelfl(unsigned long data, void __iomem *addr)
754{
755 writel(data, addr);
756 (void) readl(addr); /* flush to avoid PCI posted write */
757}
758
Brett Russ20f733e2005-09-01 18:26:17 -0400759static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
760{
761 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
762}
763
Jeff Garzikc9d39132005-11-13 17:47:51 -0500764static inline unsigned int mv_hc_from_port(unsigned int port)
765{
766 return port >> MV_PORT_HC_SHIFT;
767}
768
769static inline unsigned int mv_hardport_from_port(unsigned int port)
770{
771 return port & MV_PORT_MASK;
772}
773
774static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
775 unsigned int port)
776{
777 return mv_hc_base(base, mv_hc_from_port(port));
778}
779
Brett Russ20f733e2005-09-01 18:26:17 -0400780static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
781{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500782 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500783 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500784 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400785}
786
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500787static inline void __iomem *mv_host_base(struct ata_host *host)
788{
789 struct mv_host_priv *hpriv = host->private_data;
790 return hpriv->base;
791}
792
Brett Russ20f733e2005-09-01 18:26:17 -0400793static inline void __iomem *mv_ap_base(struct ata_port *ap)
794{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500795 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400796}
797
Jeff Garzikcca39742006-08-24 03:19:22 -0400798static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400799{
Jeff Garzikcca39742006-08-24 03:19:22 -0400800 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400801}
802
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400803static void mv_set_edma_ptrs(void __iomem *port_mmio,
804 struct mv_host_priv *hpriv,
805 struct mv_port_priv *pp)
806{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400807 u32 index;
808
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 /*
810 * initialize request queue
811 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400812 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
813
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814 WARN_ON(pp->crqb_dma & 0x3ff);
815 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
818
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400820 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400823 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400824
825 /*
826 * initialize response queue
827 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400828 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
829
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830 WARN_ON(pp->crpb_dma & 0xff);
831 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
832
833 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
836 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400837 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400838
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400839 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400840 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841}
842
Brett Russ05b308e2005-10-05 17:08:53 -0400843/**
844 * mv_start_dma - Enable eDMA engine
845 * @base: port base address
846 * @pp: port private data
847 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900848 * Verify the local cache of the eDMA state is accurate with a
849 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400850 *
851 * LOCKING:
852 * Inherited from caller.
853 */
Mark Lord0c589122008-01-26 18:31:16 -0500854static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500855 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400856{
Mark Lord72109162008-01-26 18:31:33 -0500857 int want_ncq = (protocol == ATA_PROT_NCQ);
858
859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
860 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
861 if (want_ncq != using_ncq)
862 __mv_stop_dma(ap);
863 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400864 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500865 struct mv_host_priv *hpriv = ap->host->private_data;
866 int hard_port = mv_hardport_from_port(ap->port_no);
867 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100868 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500869 u32 hc_irq_cause, ipending;
870
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400871 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500872 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400873
Mark Lord0c589122008-01-26 18:31:16 -0500874 /* clear EDMA interrupt indicator, if any */
875 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
876 ipending = (DEV_IRQ << hard_port) |
877 (CRPB_DMA_DONE << hard_port);
878 if (hc_irq_cause & ipending) {
879 writelfl(hc_irq_cause & ~ipending,
880 hc_mmio + HC_IRQ_CAUSE_OFS);
881 }
882
Mark Lord72109162008-01-26 18:31:33 -0500883 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500884
885 /* clear FIS IRQ Cause */
886 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
887
Mark Lordf630d562008-01-26 18:31:00 -0500888 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400889
Mark Lordf630d562008-01-26 18:31:00 -0500890 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400891 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
892 }
Mark Lordf630d562008-01-26 18:31:00 -0500893 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400894}
895
Brett Russ05b308e2005-10-05 17:08:53 -0400896/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400897 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400898 * @ap: ATA channel to manipulate
899 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900900 * Verify the local cache of the eDMA state is accurate with a
901 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400902 *
903 * LOCKING:
904 * Inherited from caller.
905 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400906static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400907{
908 void __iomem *port_mmio = mv_ap_base(ap);
909 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400910 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400911 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400912
Jeff Garzik4537deb2007-07-12 14:30:19 -0400913 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400914 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400915 */
Brett Russ31961942005-09-30 01:36:00 -0400916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
917 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400918 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900919 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400920 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500921
Brett Russ31961942005-09-30 01:36:00 -0400922 /* now properly wait for the eDMA to stop */
923 for (i = 1000; i > 0; i--) {
924 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400925 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400926 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400927
Brett Russ31961942005-09-30 01:36:00 -0400928 udelay(100);
929 }
930
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400931 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900932 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400933 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400934 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400935
936 return err;
Brett Russ31961942005-09-30 01:36:00 -0400937}
938
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400939static int mv_stop_dma(struct ata_port *ap)
940{
941 unsigned long flags;
942 int rc;
943
944 spin_lock_irqsave(&ap->host->lock, flags);
945 rc = __mv_stop_dma(ap);
946 spin_unlock_irqrestore(&ap->host->lock, flags);
947
948 return rc;
949}
950
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400951#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400952static void mv_dump_mem(void __iomem *start, unsigned bytes)
953{
Brett Russ31961942005-09-30 01:36:00 -0400954 int b, w;
955 for (b = 0; b < bytes; ) {
956 DPRINTK("%p: ", start + b);
957 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400958 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400959 b += sizeof(u32);
960 }
961 printk("\n");
962 }
Brett Russ31961942005-09-30 01:36:00 -0400963}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400964#endif
965
Brett Russ31961942005-09-30 01:36:00 -0400966static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
967{
968#ifdef ATA_DEBUG
969 int b, w;
970 u32 dw;
971 for (b = 0; b < bytes; ) {
972 DPRINTK("%02x: ", b);
973 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400974 (void) pci_read_config_dword(pdev, b, &dw);
975 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400976 b += sizeof(u32);
977 }
978 printk("\n");
979 }
980#endif
981}
982static void mv_dump_all_regs(void __iomem *mmio_base, int port,
983 struct pci_dev *pdev)
984{
985#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500986 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400987 port >> MV_PORT_HC_SHIFT);
988 void __iomem *port_base;
989 int start_port, num_ports, p, start_hc, num_hcs, hc;
990
991 if (0 > port) {
992 start_hc = start_port = 0;
993 num_ports = 8; /* shld be benign for 4 port devs */
994 num_hcs = 2;
995 } else {
996 start_hc = port >> MV_PORT_HC_SHIFT;
997 start_port = port;
998 num_ports = num_hcs = 1;
999 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001000 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001001 num_ports > 1 ? num_ports - 1 : start_port);
1002
1003 if (NULL != pdev) {
1004 DPRINTK("PCI config space regs:\n");
1005 mv_dump_pci_cfg(pdev, 0x68);
1006 }
1007 DPRINTK("PCI regs:\n");
1008 mv_dump_mem(mmio_base+0xc00, 0x3c);
1009 mv_dump_mem(mmio_base+0xd00, 0x34);
1010 mv_dump_mem(mmio_base+0xf00, 0x4);
1011 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1012 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001013 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001014 DPRINTK("HC regs (HC %i):\n", hc);
1015 mv_dump_mem(hc_base, 0x1c);
1016 }
1017 for (p = start_port; p < start_port + num_ports; p++) {
1018 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001019 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001020 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001021 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001022 mv_dump_mem(port_base+0x300, 0x60);
1023 }
1024#endif
1025}
1026
Brett Russ20f733e2005-09-01 18:26:17 -04001027static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1028{
1029 unsigned int ofs;
1030
1031 switch (sc_reg_in) {
1032 case SCR_STATUS:
1033 case SCR_CONTROL:
1034 case SCR_ERROR:
1035 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1036 break;
1037 case SCR_ACTIVE:
1038 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1039 break;
1040 default:
1041 ofs = 0xffffffffU;
1042 break;
1043 }
1044 return ofs;
1045}
1046
Tejun Heoda3dbb12007-07-16 14:29:40 +09001047static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001048{
1049 unsigned int ofs = mv_scr_offset(sc_reg_in);
1050
Tejun Heoda3dbb12007-07-16 14:29:40 +09001051 if (ofs != 0xffffffffU) {
1052 *val = readl(mv_ap_base(ap) + ofs);
1053 return 0;
1054 } else
1055 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001056}
1057
Tejun Heoda3dbb12007-07-16 14:29:40 +09001058static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001059{
1060 unsigned int ofs = mv_scr_offset(sc_reg_in);
1061
Tejun Heoda3dbb12007-07-16 14:29:40 +09001062 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001063 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001064 return 0;
1065 } else
1066 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001067}
1068
Mark Lordf2738272008-01-26 18:32:29 -05001069static void mv6_dev_config(struct ata_device *adev)
1070{
1071 /*
1072 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1073 * See mv_qc_prep() for more info.
1074 */
1075 if (adev->flags & ATA_DFLAG_NCQ)
1076 if (adev->max_sectors > ATA_MAX_SECTORS)
1077 adev->max_sectors = ATA_MAX_SECTORS;
1078}
1079
Mark Lord72109162008-01-26 18:31:33 -05001080static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1081 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001082{
Mark Lord0c589122008-01-26 18:31:16 -05001083 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001084
1085 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001086 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001087
Mark Lord0c589122008-01-26 18:31:16 -05001088 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001089 cfg |= (1 << 8); /* enab config burst size mask */
1090
Mark Lord0c589122008-01-26 18:31:16 -05001091 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001092 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1093
1094 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001095 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1096 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001097 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001098 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001099 }
1100
Mark Lord72109162008-01-26 18:31:33 -05001101 if (want_ncq) {
1102 cfg |= EDMA_CFG_NCQ;
1103 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1104 } else
1105 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1106
Jeff Garzike4e7b892006-01-31 12:18:41 -05001107 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1108}
1109
Mark Lordda2fa9b2008-01-26 18:32:45 -05001110static void mv_port_free_dma_mem(struct ata_port *ap)
1111{
1112 struct mv_host_priv *hpriv = ap->host->private_data;
1113 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001114 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001115
1116 if (pp->crqb) {
1117 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1118 pp->crqb = NULL;
1119 }
1120 if (pp->crpb) {
1121 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1122 pp->crpb = NULL;
1123 }
Mark Lordeb73d552008-01-29 13:24:00 -05001124 /*
1125 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1126 * For later hardware, we have one unique sg_tbl per NCQ tag.
1127 */
1128 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1129 if (pp->sg_tbl[tag]) {
1130 if (tag == 0 || !IS_GEN_I(hpriv))
1131 dma_pool_free(hpriv->sg_tbl_pool,
1132 pp->sg_tbl[tag],
1133 pp->sg_tbl_dma[tag]);
1134 pp->sg_tbl[tag] = NULL;
1135 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001136 }
1137}
1138
Brett Russ05b308e2005-10-05 17:08:53 -04001139/**
1140 * mv_port_start - Port specific init/start routine.
1141 * @ap: ATA channel to manipulate
1142 *
1143 * Allocate and point to DMA memory, init port private memory,
1144 * zero indices.
1145 *
1146 * LOCKING:
1147 * Inherited from caller.
1148 */
Brett Russ31961942005-09-30 01:36:00 -04001149static int mv_port_start(struct ata_port *ap)
1150{
Jeff Garzikcca39742006-08-24 03:19:22 -04001151 struct device *dev = ap->host->dev;
1152 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001153 struct mv_port_priv *pp;
1154 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001155 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001156 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001157
Tejun Heo24dc5f32007-01-20 16:00:28 +09001158 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001159 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001160 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001161 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001162
Mark Lordda2fa9b2008-01-26 18:32:45 -05001163 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1164 if (!pp->crqb)
1165 return -ENOMEM;
1166 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001167
Mark Lordda2fa9b2008-01-26 18:32:45 -05001168 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1169 if (!pp->crpb)
1170 goto out_port_free_dma_mem;
1171 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001172
Mark Lordeb73d552008-01-29 13:24:00 -05001173 /*
1174 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1175 * For later hardware, we need one unique sg_tbl per NCQ tag.
1176 */
1177 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1178 if (tag == 0 || !IS_GEN_I(hpriv)) {
1179 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1180 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1181 if (!pp->sg_tbl[tag])
1182 goto out_port_free_dma_mem;
1183 } else {
1184 pp->sg_tbl[tag] = pp->sg_tbl[0];
1185 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1186 }
1187 }
Brett Russ31961942005-09-30 01:36:00 -04001188
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001189 spin_lock_irqsave(&ap->host->lock, flags);
1190
Mark Lord72109162008-01-26 18:31:33 -05001191 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001192 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001193
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001194 spin_unlock_irqrestore(&ap->host->lock, flags);
1195
Brett Russ31961942005-09-30 01:36:00 -04001196 /* Don't turn on EDMA here...do it before DMA commands only. Else
1197 * we'll be unable to send non-data, PIO, etc due to restricted access
1198 * to shadow regs.
1199 */
Brett Russ31961942005-09-30 01:36:00 -04001200 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001201
1202out_port_free_dma_mem:
1203 mv_port_free_dma_mem(ap);
1204 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001205}
1206
Brett Russ05b308e2005-10-05 17:08:53 -04001207/**
1208 * mv_port_stop - Port specific cleanup/stop routine.
1209 * @ap: ATA channel to manipulate
1210 *
1211 * Stop DMA, cleanup port memory.
1212 *
1213 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001214 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001215 */
Brett Russ31961942005-09-30 01:36:00 -04001216static void mv_port_stop(struct ata_port *ap)
1217{
Brett Russ31961942005-09-30 01:36:00 -04001218 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001219 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001220}
1221
Brett Russ05b308e2005-10-05 17:08:53 -04001222/**
1223 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1224 * @qc: queued command whose SG list to source from
1225 *
1226 * Populate the SG list and mark the last entry.
1227 *
1228 * LOCKING:
1229 * Inherited from caller.
1230 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001231static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001232{
1233 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001234 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001235 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001236 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001237
Mark Lordeb73d552008-01-29 13:24:00 -05001238 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001239 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001240 dma_addr_t addr = sg_dma_address(sg);
1241 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001242
Olof Johansson4007b492007-10-02 20:45:27 -05001243 while (sg_len) {
1244 u32 offset = addr & 0xffff;
1245 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001246
Olof Johansson4007b492007-10-02 20:45:27 -05001247 if ((offset + sg_len > 0x10000))
1248 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001249
Olof Johansson4007b492007-10-02 20:45:27 -05001250 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1251 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001252 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001253
1254 sg_len -= len;
1255 addr += len;
1256
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001257 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001258 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001259 }
Brett Russ31961942005-09-30 01:36:00 -04001260 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001261
1262 if (likely(last_sg))
1263 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001264}
1265
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001266static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001267{
Mark Lord559eeda2006-05-19 16:40:15 -04001268 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001269 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001270 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001271}
1272
Brett Russ05b308e2005-10-05 17:08:53 -04001273/**
1274 * mv_qc_prep - Host specific command preparation.
1275 * @qc: queued command to prepare
1276 *
1277 * This routine simply redirects to the general purpose routine
1278 * if command is not DMA. Else, it handles prep of the CRQB
1279 * (command request block), does some sanity checking, and calls
1280 * the SG load routine.
1281 *
1282 * LOCKING:
1283 * Inherited from caller.
1284 */
Brett Russ31961942005-09-30 01:36:00 -04001285static void mv_qc_prep(struct ata_queued_cmd *qc)
1286{
1287 struct ata_port *ap = qc->ap;
1288 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001289 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001290 struct ata_taskfile *tf;
1291 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001292 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001293
Mark Lord138bfdd2008-01-26 18:33:18 -05001294 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1295 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001296 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001297
Brett Russ31961942005-09-30 01:36:00 -04001298 /* Fill in command request block
1299 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001300 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001301 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001302 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001303 flags |= qc->tag << CRQB_TAG_SHIFT;
1304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001305 /* get current queue index from software */
1306 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001307
Mark Lorda6432432006-05-19 16:36:36 -04001308 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001309 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001310 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001311 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001312 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1313
1314 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001315 tf = &qc->tf;
1316
1317 /* Sadly, the CRQB cannot accomodate all registers--there are
1318 * only 11 bytes...so we must pick and choose required
1319 * registers based on the command. So, we drop feature and
1320 * hob_feature for [RW] DMA commands, but they are needed for
1321 * NCQ. NCQ will drop hob_nsect.
1322 */
1323 switch (tf->command) {
1324 case ATA_CMD_READ:
1325 case ATA_CMD_READ_EXT:
1326 case ATA_CMD_WRITE:
1327 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001328 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001329 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1330 break;
Brett Russ31961942005-09-30 01:36:00 -04001331 case ATA_CMD_FPDMA_READ:
1332 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001333 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001334 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1335 break;
Brett Russ31961942005-09-30 01:36:00 -04001336 default:
1337 /* The only other commands EDMA supports in non-queued and
1338 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1339 * of which are defined/used by Linux. If we get here, this
1340 * driver needs work.
1341 *
1342 * FIXME: modify libata to give qc_prep a return value and
1343 * return error here.
1344 */
1345 BUG_ON(tf->command);
1346 break;
1347 }
1348 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1349 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1350 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1351 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1352 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1353 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1354 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1355 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1356 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1357
Jeff Garzike4e7b892006-01-31 12:18:41 -05001358 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001359 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001360 mv_fill_sg(qc);
1361}
1362
1363/**
1364 * mv_qc_prep_iie - Host specific command preparation.
1365 * @qc: queued command to prepare
1366 *
1367 * This routine simply redirects to the general purpose routine
1368 * if command is not DMA. Else, it handles prep of the CRQB
1369 * (command request block), does some sanity checking, and calls
1370 * the SG load routine.
1371 *
1372 * LOCKING:
1373 * Inherited from caller.
1374 */
1375static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1376{
1377 struct ata_port *ap = qc->ap;
1378 struct mv_port_priv *pp = ap->private_data;
1379 struct mv_crqb_iie *crqb;
1380 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001381 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001382 u32 flags = 0;
1383
Mark Lord138bfdd2008-01-26 18:33:18 -05001384 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1385 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001386 return;
1387
Jeff Garzike4e7b892006-01-31 12:18:41 -05001388 /* Fill in Gen IIE command request block
1389 */
1390 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1391 flags |= CRQB_FLAG_READ;
1392
Tejun Heobeec7db2006-02-11 19:11:13 +09001393 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001394 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001395 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001396
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001397 /* get current queue index from software */
1398 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001399
1400 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001401 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1402 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001403 crqb->flags = cpu_to_le32(flags);
1404
1405 tf = &qc->tf;
1406 crqb->ata_cmd[0] = cpu_to_le32(
1407 (tf->command << 16) |
1408 (tf->feature << 24)
1409 );
1410 crqb->ata_cmd[1] = cpu_to_le32(
1411 (tf->lbal << 0) |
1412 (tf->lbam << 8) |
1413 (tf->lbah << 16) |
1414 (tf->device << 24)
1415 );
1416 crqb->ata_cmd[2] = cpu_to_le32(
1417 (tf->hob_lbal << 0) |
1418 (tf->hob_lbam << 8) |
1419 (tf->hob_lbah << 16) |
1420 (tf->hob_feature << 24)
1421 );
1422 crqb->ata_cmd[3] = cpu_to_le32(
1423 (tf->nsect << 0) |
1424 (tf->hob_nsect << 8)
1425 );
1426
1427 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1428 return;
Brett Russ31961942005-09-30 01:36:00 -04001429 mv_fill_sg(qc);
1430}
1431
Brett Russ05b308e2005-10-05 17:08:53 -04001432/**
1433 * mv_qc_issue - Initiate a command to the host
1434 * @qc: queued command to start
1435 *
1436 * This routine simply redirects to the general purpose routine
1437 * if command is not DMA. Else, it sanity checks our local
1438 * caches of the request producer/consumer indices then enables
1439 * DMA and bumps the request producer index.
1440 *
1441 * LOCKING:
1442 * Inherited from caller.
1443 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001444static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001445{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001446 struct ata_port *ap = qc->ap;
1447 void __iomem *port_mmio = mv_ap_base(ap);
1448 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001449 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001450
Mark Lord138bfdd2008-01-26 18:33:18 -05001451 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1452 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001453 /* We're about to send a non-EDMA capable command to the
1454 * port. Turn off EDMA so there won't be problems accessing
1455 * shadow block, etc registers.
1456 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001457 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001458 return ata_qc_issue_prot(qc);
1459 }
1460
Mark Lord72109162008-01-26 18:31:33 -05001461 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001462
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001463 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001464
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001465 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001466
1467 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1469 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001470
1471 return 0;
1472}
1473
Brett Russ05b308e2005-10-05 17:08:53 -04001474/**
Brett Russ05b308e2005-10-05 17:08:53 -04001475 * mv_err_intr - Handle error interrupts on the port
1476 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001477 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001478 *
1479 * In most cases, just clear the interrupt and move on. However,
1480 * some cases require an eDMA reset, which is done right before
1481 * the COMRESET in mv_phy_reset(). The SERR case requires a
1482 * clear of pending errors in the SATA SERROR register. Finally,
1483 * if the port disabled DMA, update our cached copy to match.
1484 *
1485 * LOCKING:
1486 * Inherited from caller.
1487 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001488static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001489{
Brett Russ31961942005-09-30 01:36:00 -04001490 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1492 struct mv_port_priv *pp = ap->private_data;
1493 struct mv_host_priv *hpriv = ap->host->private_data;
1494 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1495 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001496 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001497
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001498 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001499
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500 if (!edma_enabled) {
1501 /* just a guess: do we need to do this? should we
1502 * expand this, and do it in all cases?
1503 */
Tejun Heo936fd732007-08-06 18:36:23 +09001504 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1505 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001506 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507
1508 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1509
1510 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1511
1512 /*
1513 * all generations share these EDMA error cause bits
1514 */
1515
1516 if (edma_err_cause & EDMA_ERR_DEV)
1517 err_mask |= AC_ERR_DEV;
1518 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001519 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001520 EDMA_ERR_INTRL_PAR)) {
1521 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001522 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001523 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001524 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001525 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1526 ata_ehi_hotplugged(ehi);
1527 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001528 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001529 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 }
1531
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001532 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001533 eh_freeze_mask = EDMA_EH_FREEZE_5;
1534
1535 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001536 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001537 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001538 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001539 }
1540 } else {
1541 eh_freeze_mask = EDMA_EH_FREEZE;
1542
1543 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001544 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001545 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001546 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547 }
1548
1549 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001550 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1551 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001552 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001553 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001554 }
1555 }
Brett Russ20f733e2005-09-01 18:26:17 -04001556
1557 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001558 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001559
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560 if (!err_mask) {
1561 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001562 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001563 }
1564
1565 ehi->serror |= serr;
1566 ehi->action |= action;
1567
1568 if (qc)
1569 qc->err_mask |= err_mask;
1570 else
1571 ehi->err_mask |= err_mask;
1572
1573 if (edma_err_cause & eh_freeze_mask)
1574 ata_port_freeze(ap);
1575 else
1576 ata_port_abort(ap);
1577}
1578
1579static void mv_intr_pio(struct ata_port *ap)
1580{
1581 struct ata_queued_cmd *qc;
1582 u8 ata_status;
1583
1584 /* ignore spurious intr if drive still BUSY */
1585 ata_status = readb(ap->ioaddr.status_addr);
1586 if (unlikely(ata_status & ATA_BUSY))
1587 return;
1588
1589 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001590 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001591 if (unlikely(!qc)) /* no active tag */
1592 return;
1593 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1594 return;
1595
1596 /* and finally, complete the ATA command */
1597 qc->err_mask |= ac_err_mask(ata_status);
1598 ata_qc_complete(qc);
1599}
1600
1601static void mv_intr_edma(struct ata_port *ap)
1602{
1603 void __iomem *port_mmio = mv_ap_base(ap);
1604 struct mv_host_priv *hpriv = ap->host->private_data;
1605 struct mv_port_priv *pp = ap->private_data;
1606 struct ata_queued_cmd *qc;
1607 u32 out_index, in_index;
1608 bool work_done = false;
1609
1610 /* get h/w response queue pointer */
1611 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1612 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1613
1614 while (1) {
1615 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001616 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001617
1618 /* get s/w response queue last-read pointer, and compare */
1619 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1620 if (in_index == out_index)
1621 break;
1622
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001623 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001624 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001625 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001627 /* Gen II/IIE: get active ATA command via tag, to enable
1628 * support for queueing. this works transparently for
1629 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001630 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001631 else
1632 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001633
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001634 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635
Mark Lordcb924412008-01-26 18:32:09 -05001636 /* For non-NCQ mode, the lower 8 bits of status
1637 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1638 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001639 */
1640 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001641 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001642 mv_err_intr(ap, qc);
1643 return;
1644 }
1645
1646 /* and finally, complete the ATA command */
1647 if (qc) {
1648 qc->err_mask |=
1649 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1650 ata_qc_complete(qc);
1651 }
1652
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001653 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001654 * indicate (after the loop completes) to hardware
1655 * that we have consumed a response queue entry.
1656 */
1657 work_done = true;
1658 pp->resp_idx++;
1659 }
1660
1661 if (work_done)
1662 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1663 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1664 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001665}
1666
Brett Russ05b308e2005-10-05 17:08:53 -04001667/**
1668 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001669 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001670 * @relevant: port error bits relevant to this host controller
1671 * @hc: which host controller we're to look at
1672 *
1673 * Read then write clear the HC interrupt status then walk each
1674 * port connected to the HC and see if it needs servicing. Port
1675 * success ints are reported in the HC interrupt status reg, the
1676 * port error ints are reported in the higher level main
1677 * interrupt status register and thus are passed in via the
1678 * 'relevant' argument.
1679 *
1680 * LOCKING:
1681 * Inherited from caller.
1682 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001683static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001684{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001685 struct mv_host_priv *hpriv = host->private_data;
1686 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001687 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001688 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001689 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001690
Jeff Garzik35177262007-02-24 21:26:42 -05001691 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001692 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001693 else
Brett Russ20f733e2005-09-01 18:26:17 -04001694 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001695
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001696 if (HAS_PCI(host))
1697 last_port = port0 + MV_PORTS_PER_HC;
1698 else
1699 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001700 /* we'll need the HC success int register in most cases */
1701 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001702 if (!hc_irq_cause)
1703 return;
1704
1705 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001706
1707 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001708 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001709
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001710 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001711 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001712 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001713 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001714
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001715 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001716 continue;
1717
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001718 pp = ap->private_data;
1719
Brett Russ31961942005-09-30 01:36:00 -04001720 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001721 if (port >= MV_PORTS_PER_HC) {
1722 shift++; /* skip bit 8 in the HC Main IRQ reg */
1723 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001724 have_err_bits = ((PORT0_ERR << shift) & relevant);
1725
1726 if (unlikely(have_err_bits)) {
1727 struct ata_queued_cmd *qc;
1728
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001729 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1731 continue;
1732
1733 mv_err_intr(ap, qc);
1734 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001735 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001736
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001737 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1738
1739 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1740 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1741 mv_intr_edma(ap);
1742 } else {
1743 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1744 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001745 }
1746 }
1747 VPRINTK("EXIT\n");
1748}
1749
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001750static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1751{
Mark Lord02a121d2007-12-01 13:07:22 -05001752 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001753 struct ata_port *ap;
1754 struct ata_queued_cmd *qc;
1755 struct ata_eh_info *ehi;
1756 unsigned int i, err_mask, printed = 0;
1757 u32 err_cause;
1758
Mark Lord02a121d2007-12-01 13:07:22 -05001759 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001760
1761 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1762 err_cause);
1763
1764 DPRINTK("All regs @ PCI error\n");
1765 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1766
Mark Lord02a121d2007-12-01 13:07:22 -05001767 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001768
1769 for (i = 0; i < host->n_ports; i++) {
1770 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001771 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001772 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001773 ata_ehi_clear_desc(ehi);
1774 if (!printed++)
1775 ata_ehi_push_desc(ehi,
1776 "PCI err cause 0x%08x", err_cause);
1777 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001778 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001779 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001780 if (qc)
1781 qc->err_mask |= err_mask;
1782 else
1783 ehi->err_mask |= err_mask;
1784
1785 ata_port_freeze(ap);
1786 }
1787 }
1788}
1789
Brett Russ05b308e2005-10-05 17:08:53 -04001790/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001791 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001792 * @irq: unused
1793 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001794 *
1795 * Read the read only register to determine if any host
1796 * controllers have pending interrupts. If so, call lower level
1797 * routine to handle. Also check for PCI errors which are only
1798 * reported here.
1799 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001800 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001801 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001802 * interrupts.
1803 */
David Howells7d12e782006-10-05 14:55:46 +01001804static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001805{
Jeff Garzikcca39742006-08-24 03:19:22 -04001806 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001807 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001808 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001809 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001810 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001811
Mark Lord646a4da2008-01-26 18:30:37 -05001812 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001813
1814 irq_stat = readl(hpriv->main_cause_reg_addr);
1815 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001816
1817 /* check the cases where we either have nothing pending or have read
1818 * a bogus register value which can indicate HW removal or PCI fault
1819 */
Mark Lord646a4da2008-01-26 18:30:37 -05001820 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1821 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001822
Jeff Garzikcca39742006-08-24 03:19:22 -04001823 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001824
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001825 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001826 mv_pci_error(host, mmio);
1827 handled = 1;
1828 goto out_unlock; /* skip all other HC irq handling */
1829 }
1830
Brett Russ20f733e2005-09-01 18:26:17 -04001831 for (hc = 0; hc < n_hcs; hc++) {
1832 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1833 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001834 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001835 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001836 }
1837 }
Mark Lord615ab952006-05-19 16:24:56 -04001838
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001839out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001840 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001841
1842 return IRQ_RETVAL(handled);
1843}
1844
Jeff Garzikc9d39132005-11-13 17:47:51 -05001845static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1846{
1847 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1848 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1849
1850 return hc_mmio + ofs;
1851}
1852
1853static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1854{
1855 unsigned int ofs;
1856
1857 switch (sc_reg_in) {
1858 case SCR_STATUS:
1859 case SCR_ERROR:
1860 case SCR_CONTROL:
1861 ofs = sc_reg_in * sizeof(u32);
1862 break;
1863 default:
1864 ofs = 0xffffffffU;
1865 break;
1866 }
1867 return ofs;
1868}
1869
Tejun Heoda3dbb12007-07-16 14:29:40 +09001870static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001872 struct mv_host_priv *hpriv = ap->host->private_data;
1873 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876
Tejun Heoda3dbb12007-07-16 14:29:40 +09001877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1879 return 0;
1880 } else
1881 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001882}
1883
Tejun Heoda3dbb12007-07-16 14:29:40 +09001884static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001886 struct mv_host_priv *hpriv = ap->host->private_data;
1887 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001888 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001889 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1890
Tejun Heoda3dbb12007-07-16 14:29:40 +09001891 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001892 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001893 return 0;
1894 } else
1895 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001896}
1897
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001898static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001899{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001900 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001901 int early_5080;
1902
Auke Kok44c10132007-06-08 15:46:36 -07001903 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001904
1905 if (!early_5080) {
1906 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 tmp |= (1 << 0);
1908 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1909 }
1910
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001911 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001912}
1913
1914static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1915{
1916 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1917}
1918
Jeff Garzik47c2b672005-11-12 21:13:17 -05001919static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001920 void __iomem *mmio)
1921{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001922 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1923 u32 tmp;
1924
1925 tmp = readl(phy_mmio + MV5_PHY_MODE);
1926
1927 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1928 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001929}
1930
Jeff Garzik47c2b672005-11-12 21:13:17 -05001931static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001932{
Jeff Garzik522479f2005-11-12 22:14:02 -05001933 u32 tmp;
1934
1935 writel(0, mmio + MV_GPIO_PORT_CTL);
1936
1937 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1938
1939 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1940 tmp |= ~(1 << 0);
1941 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001942}
1943
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001944static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001946{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001947 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1948 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1949 u32 tmp;
1950 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1951
1952 if (fix_apm_sq) {
1953 tmp = readl(phy_mmio + MV5_LT_MODE);
1954 tmp |= (1 << 19);
1955 writel(tmp, phy_mmio + MV5_LT_MODE);
1956
1957 tmp = readl(phy_mmio + MV5_PHY_CTL);
1958 tmp &= ~0x3;
1959 tmp |= 0x1;
1960 writel(tmp, phy_mmio + MV5_PHY_CTL);
1961 }
1962
1963 tmp = readl(phy_mmio + MV5_PHY_MODE);
1964 tmp &= ~mask;
1965 tmp |= hpriv->signal[port].pre;
1966 tmp |= hpriv->signal[port].amps;
1967 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001968}
1969
Jeff Garzikc9d39132005-11-13 17:47:51 -05001970
1971#undef ZERO
1972#define ZERO(reg) writel(0, port_mmio + (reg))
1973static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001975{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001976 void __iomem *port_mmio = mv_port_base(mmio, port);
1977
1978 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1979
1980 mv_channel_reset(hpriv, mmio, port);
1981
1982 ZERO(0x028); /* command */
1983 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1984 ZERO(0x004); /* timer */
1985 ZERO(0x008); /* irq err cause */
1986 ZERO(0x00c); /* irq err mask */
1987 ZERO(0x010); /* rq bah */
1988 ZERO(0x014); /* rq inp */
1989 ZERO(0x018); /* rq outp */
1990 ZERO(0x01c); /* respq bah */
1991 ZERO(0x024); /* respq outp */
1992 ZERO(0x020); /* respq inp */
1993 ZERO(0x02c); /* test control */
1994 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1995}
1996#undef ZERO
1997
1998#define ZERO(reg) writel(0, hc_mmio + (reg))
1999static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2000 unsigned int hc)
2001{
2002 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2003 u32 tmp;
2004
2005 ZERO(0x00c);
2006 ZERO(0x010);
2007 ZERO(0x014);
2008 ZERO(0x018);
2009
2010 tmp = readl(hc_mmio + 0x20);
2011 tmp &= 0x1c1c1c1c;
2012 tmp |= 0x03030303;
2013 writel(tmp, hc_mmio + 0x20);
2014}
2015#undef ZERO
2016
2017static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2018 unsigned int n_hc)
2019{
2020 unsigned int hc, port;
2021
2022 for (hc = 0; hc < n_hc; hc++) {
2023 for (port = 0; port < MV_PORTS_PER_HC; port++)
2024 mv5_reset_hc_port(hpriv, mmio,
2025 (hc * MV_PORTS_PER_HC) + port);
2026
2027 mv5_reset_one_hc(hpriv, mmio, hc);
2028 }
2029
2030 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002031}
2032
Jeff Garzik101ffae2005-11-12 22:17:49 -05002033#undef ZERO
2034#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002035static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002036{
Mark Lord02a121d2007-12-01 13:07:22 -05002037 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002038 u32 tmp;
2039
2040 tmp = readl(mmio + MV_PCI_MODE);
2041 tmp &= 0xff00ffff;
2042 writel(tmp, mmio + MV_PCI_MODE);
2043
2044 ZERO(MV_PCI_DISC_TIMER);
2045 ZERO(MV_PCI_MSI_TRIGGER);
2046 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2047 ZERO(HC_MAIN_IRQ_MASK_OFS);
2048 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002049 ZERO(hpriv->irq_cause_ofs);
2050 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002051 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2052 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2053 ZERO(MV_PCI_ERR_ATTRIBUTE);
2054 ZERO(MV_PCI_ERR_COMMAND);
2055}
2056#undef ZERO
2057
2058static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2059{
2060 u32 tmp;
2061
2062 mv5_reset_flash(hpriv, mmio);
2063
2064 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2065 tmp &= 0x3;
2066 tmp |= (1 << 5) | (1 << 6);
2067 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2068}
2069
2070/**
2071 * mv6_reset_hc - Perform the 6xxx global soft reset
2072 * @mmio: base address of the HBA
2073 *
2074 * This routine only applies to 6xxx parts.
2075 *
2076 * LOCKING:
2077 * Inherited from caller.
2078 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002079static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2080 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002081{
2082 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2083 int i, rc = 0;
2084 u32 t;
2085
2086 /* Following procedure defined in PCI "main command and status
2087 * register" table.
2088 */
2089 t = readl(reg);
2090 writel(t | STOP_PCI_MASTER, reg);
2091
2092 for (i = 0; i < 1000; i++) {
2093 udelay(1);
2094 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002095 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002096 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002097 }
2098 if (!(PCI_MASTER_EMPTY & t)) {
2099 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2100 rc = 1;
2101 goto done;
2102 }
2103
2104 /* set reset */
2105 i = 5;
2106 do {
2107 writel(t | GLOB_SFT_RST, reg);
2108 t = readl(reg);
2109 udelay(1);
2110 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2111
2112 if (!(GLOB_SFT_RST & t)) {
2113 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2114 rc = 1;
2115 goto done;
2116 }
2117
2118 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2119 i = 5;
2120 do {
2121 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2122 t = readl(reg);
2123 udelay(1);
2124 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2125
2126 if (GLOB_SFT_RST & t) {
2127 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2128 rc = 1;
2129 }
2130done:
2131 return rc;
2132}
2133
Jeff Garzik47c2b672005-11-12 21:13:17 -05002134static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002135 void __iomem *mmio)
2136{
2137 void __iomem *port_mmio;
2138 u32 tmp;
2139
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002140 tmp = readl(mmio + MV_RESET_CFG);
2141 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002142 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002143 hpriv->signal[idx].pre = 0x1 << 5;
2144 return;
2145 }
2146
2147 port_mmio = mv_port_base(mmio, idx);
2148 tmp = readl(port_mmio + PHY_MODE2);
2149
2150 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2151 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2152}
2153
Jeff Garzik47c2b672005-11-12 21:13:17 -05002154static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002155{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002156 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002157}
2158
Jeff Garzikc9d39132005-11-13 17:47:51 -05002159static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002160 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002161{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002162 void __iomem *port_mmio = mv_port_base(mmio, port);
2163
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002164 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002165 int fix_phy_mode2 =
2166 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002167 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002168 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2169 u32 m2, tmp;
2170
2171 if (fix_phy_mode2) {
2172 m2 = readl(port_mmio + PHY_MODE2);
2173 m2 &= ~(1 << 16);
2174 m2 |= (1 << 31);
2175 writel(m2, port_mmio + PHY_MODE2);
2176
2177 udelay(200);
2178
2179 m2 = readl(port_mmio + PHY_MODE2);
2180 m2 &= ~((1 << 16) | (1 << 31));
2181 writel(m2, port_mmio + PHY_MODE2);
2182
2183 udelay(200);
2184 }
2185
2186 /* who knows what this magic does */
2187 tmp = readl(port_mmio + PHY_MODE3);
2188 tmp &= ~0x7F800000;
2189 tmp |= 0x2A800000;
2190 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002191
2192 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002193 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002194
2195 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002196
2197 if (hp_flags & MV_HP_ERRATA_60X1B2)
2198 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002199
2200 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2201
2202 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002203
2204 if (hp_flags & MV_HP_ERRATA_60X1B2)
2205 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002206 }
2207
2208 /* Revert values of pre-emphasis and signal amps to the saved ones */
2209 m2 = readl(port_mmio + PHY_MODE2);
2210
2211 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002212 m2 |= hpriv->signal[port].amps;
2213 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002214 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002215
Jeff Garzike4e7b892006-01-31 12:18:41 -05002216 /* according to mvSata 3.6.1, some IIE values are fixed */
2217 if (IS_GEN_IIE(hpriv)) {
2218 m2 &= ~0xC30FF01F;
2219 m2 |= 0x0000900F;
2220 }
2221
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002222 writel(m2, port_mmio + PHY_MODE2);
2223}
2224
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002225/* TODO: use the generic LED interface to configure the SATA Presence */
2226/* & Acitivy LEDs on the board */
2227static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2228 void __iomem *mmio)
2229{
2230 return;
2231}
2232
2233static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2234 void __iomem *mmio)
2235{
2236 void __iomem *port_mmio;
2237 u32 tmp;
2238
2239 port_mmio = mv_port_base(mmio, idx);
2240 tmp = readl(port_mmio + PHY_MODE2);
2241
2242 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2243 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2244}
2245
2246#undef ZERO
2247#define ZERO(reg) writel(0, port_mmio + (reg))
2248static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2249 void __iomem *mmio, unsigned int port)
2250{
2251 void __iomem *port_mmio = mv_port_base(mmio, port);
2252
2253 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2254
2255 mv_channel_reset(hpriv, mmio, port);
2256
2257 ZERO(0x028); /* command */
2258 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2259 ZERO(0x004); /* timer */
2260 ZERO(0x008); /* irq err cause */
2261 ZERO(0x00c); /* irq err mask */
2262 ZERO(0x010); /* rq bah */
2263 ZERO(0x014); /* rq inp */
2264 ZERO(0x018); /* rq outp */
2265 ZERO(0x01c); /* respq bah */
2266 ZERO(0x024); /* respq outp */
2267 ZERO(0x020); /* respq inp */
2268 ZERO(0x02c); /* test control */
2269 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2270}
2271
2272#undef ZERO
2273
2274#define ZERO(reg) writel(0, hc_mmio + (reg))
2275static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2276 void __iomem *mmio)
2277{
2278 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2279
2280 ZERO(0x00c);
2281 ZERO(0x010);
2282 ZERO(0x014);
2283
2284}
2285
2286#undef ZERO
2287
2288static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2289 void __iomem *mmio, unsigned int n_hc)
2290{
2291 unsigned int port;
2292
2293 for (port = 0; port < hpriv->n_ports; port++)
2294 mv_soc_reset_hc_port(hpriv, mmio, port);
2295
2296 mv_soc_reset_one_hc(hpriv, mmio);
2297
2298 return 0;
2299}
2300
2301static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2302 void __iomem *mmio)
2303{
2304 return;
2305}
2306
2307static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2308{
2309 return;
2310}
2311
Jeff Garzikc9d39132005-11-13 17:47:51 -05002312static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2313 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002314{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002315 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002316
Brett Russ31961942005-09-30 01:36:00 -04002317 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002318
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002319 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002320 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002321 ifctl |= (1 << 7); /* enable gen2i speed */
2322 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002323 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2324 }
2325
Brett Russ20f733e2005-09-01 18:26:17 -04002326 udelay(25); /* allow reset propagation */
2327
2328 /* Spec never mentions clearing the bit. Marvell's driver does
2329 * clear the bit, however.
2330 */
Brett Russ31961942005-09-30 01:36:00 -04002331 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002332
Jeff Garzikc9d39132005-11-13 17:47:51 -05002333 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2334
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002335 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002336 mdelay(1);
2337}
2338
Jeff Garzikc9d39132005-11-13 17:47:51 -05002339/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002341 * @ap: ATA channel to manipulate
2342 *
2343 * Part of this is taken from __sata_phy_reset and modified to
2344 * not sleep since this routine gets called from interrupt level.
2345 *
2346 * LOCKING:
2347 * Inherited from caller. This is coded to safe to call at
2348 * interrupt level, i.e. it does not sleep.
2349 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2351 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002352{
2353 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002354 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002355 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002356 int retry = 5;
2357 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002358
2359 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002360
Tejun Heoda3dbb12007-07-16 14:29:40 +09002361#ifdef DEBUG
2362 {
2363 u32 sstatus, serror, scontrol;
2364
2365 mv_scr_read(ap, SCR_STATUS, &sstatus);
2366 mv_scr_read(ap, SCR_ERROR, &serror);
2367 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2368 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002369 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002370 }
2371#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002372
Jeff Garzik22374672005-11-17 10:59:48 -05002373 /* Issue COMRESET via SControl */
2374comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002375 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002377
Tejun Heo936fd732007-08-06 18:36:23 +09002378 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002379 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002380
Brett Russ31961942005-09-30 01:36:00 -04002381 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002382 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002383 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002384 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002385
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002386 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002387 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002388
Jeff Garzik22374672005-11-17 10:59:48 -05002389 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002390 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002391 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2392 (retry-- > 0))
2393 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002394
Tejun Heoda3dbb12007-07-16 14:29:40 +09002395#ifdef DEBUG
2396 {
2397 u32 sstatus, serror, scontrol;
2398
2399 mv_scr_read(ap, SCR_STATUS, &sstatus);
2400 mv_scr_read(ap, SCR_ERROR, &serror);
2401 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2402 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2403 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2404 }
2405#endif
Brett Russ31961942005-09-30 01:36:00 -04002406
Tejun Heo936fd732007-08-06 18:36:23 +09002407 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002409 return;
2410 }
2411
Jeff Garzik22374672005-11-17 10:59:48 -05002412 /* even after SStatus reflects that device is ready,
2413 * it seems to take a while for link to be fully
2414 * established (and thus Status no longer 0x80/0x7F),
2415 * so we poll a bit for that, here.
2416 */
2417 retry = 20;
2418 while (1) {
2419 u8 drv_stat = ata_check_status(ap);
2420 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2421 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002422 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002423 if (retry-- <= 0)
2424 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002425 if (time_after(jiffies, deadline))
2426 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002427 }
2428
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002429 /* FIXME: if we passed the deadline, the following
2430 * code probably produces an invalid result
2431 */
Brett Russ20f733e2005-09-01 18:26:17 -04002432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002433 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002434 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002435
2436 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2437
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002438 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002439
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002440 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002441}
2442
Tejun Heocc0680a2007-08-06 18:36:23 +09002443static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002444{
Tejun Heocc0680a2007-08-06 18:36:23 +09002445 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002446 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002447
Tejun Heocf480622008-01-24 00:05:14 +09002448 mv_stop_dma(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002449
Tejun Heocf480622008-01-24 00:05:14 +09002450 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002451 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002452
Tejun Heocf480622008-01-24 00:05:14 +09002453 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002454}
2455
Tejun Heocc0680a2007-08-06 18:36:23 +09002456static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002457 unsigned long deadline)
2458{
Tejun Heocc0680a2007-08-06 18:36:23 +09002459 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002460 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002461 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002462
2463 mv_stop_dma(ap);
2464
2465 mv_channel_reset(hpriv, mmio, ap->port_no);
2466
2467 mv_phy_reset(ap, class, deadline);
2468
2469 return 0;
2470}
2471
Tejun Heocc0680a2007-08-06 18:36:23 +09002472static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002473{
Tejun Heocc0680a2007-08-06 18:36:23 +09002474 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002475 u32 serr;
2476
2477 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002478 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002479
2480 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002481 sata_scr_read(link, SCR_ERROR, &serr);
2482 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002483
2484 /* bail out if no device is present */
2485 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2486 DPRINTK("EXIT, no device\n");
2487 return;
2488 }
2489
2490 /* set up device control */
2491 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2492}
2493
2494static void mv_error_handler(struct ata_port *ap)
2495{
2496 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2497 mv_hardreset, mv_postreset);
2498}
2499
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002500static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002501{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002502 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002503 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2504 u32 tmp, mask;
2505 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002506
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002507 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002508
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002509 shift = ap->port_no * 2;
2510 if (hc > 0)
2511 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002512
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002513 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002514
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002515 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002516 tmp = readl(hpriv->main_mask_reg_addr);
2517 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002518}
2519
2520static void mv_eh_thaw(struct ata_port *ap)
2521{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002522 struct mv_host_priv *hpriv = ap->host->private_data;
2523 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002524 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2525 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2526 void __iomem *port_mmio = mv_ap_base(ap);
2527 u32 tmp, mask, hc_irq_cause;
2528 unsigned int shift, hc_port_no = ap->port_no;
2529
2530 /* FIXME: handle coalescing completion events properly */
2531
2532 shift = ap->port_no * 2;
2533 if (hc > 0) {
2534 shift++;
2535 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002536 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002537
2538 mask = 0x3 << shift;
2539
2540 /* clear EDMA errors on this port */
2541 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2542
2543 /* clear pending irq events */
2544 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2545 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2546 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2547 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2548
2549 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002550 tmp = readl(hpriv->main_mask_reg_addr);
2551 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002552}
2553
Brett Russ05b308e2005-10-05 17:08:53 -04002554/**
2555 * mv_port_init - Perform some early initialization on a single port.
2556 * @port: libata data structure storing shadow register addresses
2557 * @port_mmio: base address of the port
2558 *
2559 * Initialize shadow register mmio addresses, clear outstanding
2560 * interrupts on the port, and unmask interrupts for the future
2561 * start of the port.
2562 *
2563 * LOCKING:
2564 * Inherited from caller.
2565 */
Brett Russ31961942005-09-30 01:36:00 -04002566static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2567{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002568 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002569 unsigned serr_ofs;
2570
Jeff Garzik8b260242005-11-12 12:32:50 -05002571 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002572 */
2573 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002574 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002575 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2576 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2577 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2578 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2579 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2580 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002581 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002582 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2583 /* special case: control/altstatus doesn't have ATA_REG_ address */
2584 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2585
2586 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002587 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002588
Brett Russ31961942005-09-30 01:36:00 -04002589 /* Clear any currently outstanding port interrupt conditions */
2590 serr_ofs = mv_scr_offset(SCR_ERROR);
2591 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2592 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2593
Mark Lord646a4da2008-01-26 18:30:37 -05002594 /* unmask all non-transient EDMA error interrupts */
2595 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002596
Jeff Garzik8b260242005-11-12 12:32:50 -05002597 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002598 readl(port_mmio + EDMA_CFG_OFS),
2599 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2600 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002601}
2602
Tejun Heo4447d352007-04-17 23:44:08 +09002603static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002604{
Tejun Heo4447d352007-04-17 23:44:08 +09002605 struct pci_dev *pdev = to_pci_dev(host->dev);
2606 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002607 u32 hp_flags = hpriv->hp_flags;
2608
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002609 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002610 case chip_5080:
2611 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002612 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002613
Auke Kok44c10132007-06-08 15:46:36 -07002614 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002615 case 0x1:
2616 hp_flags |= MV_HP_ERRATA_50XXB0;
2617 break;
2618 case 0x3:
2619 hp_flags |= MV_HP_ERRATA_50XXB2;
2620 break;
2621 default:
2622 dev_printk(KERN_WARNING, &pdev->dev,
2623 "Applying 50XXB2 workarounds to unknown rev\n");
2624 hp_flags |= MV_HP_ERRATA_50XXB2;
2625 break;
2626 }
2627 break;
2628
2629 case chip_504x:
2630 case chip_508x:
2631 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002632 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002633
Auke Kok44c10132007-06-08 15:46:36 -07002634 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002635 case 0x0:
2636 hp_flags |= MV_HP_ERRATA_50XXB0;
2637 break;
2638 case 0x3:
2639 hp_flags |= MV_HP_ERRATA_50XXB2;
2640 break;
2641 default:
2642 dev_printk(KERN_WARNING, &pdev->dev,
2643 "Applying B2 workarounds to unknown rev\n");
2644 hp_flags |= MV_HP_ERRATA_50XXB2;
2645 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002646 }
2647 break;
2648
2649 case chip_604x:
2650 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002651 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002652 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002653
Auke Kok44c10132007-06-08 15:46:36 -07002654 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002655 case 0x7:
2656 hp_flags |= MV_HP_ERRATA_60X1B2;
2657 break;
2658 case 0x9:
2659 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002660 break;
2661 default:
2662 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002663 "Applying B2 workarounds to unknown rev\n");
2664 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002665 break;
2666 }
2667 break;
2668
Jeff Garzike4e7b892006-01-31 12:18:41 -05002669 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002670 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002671 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2672 (pdev->device == 0x2300 || pdev->device == 0x2310))
2673 {
Mark Lord4e520032007-12-11 12:58:05 -05002674 /*
2675 * Highpoint RocketRAID PCIe 23xx series cards:
2676 *
2677 * Unconfigured drives are treated as "Legacy"
2678 * by the BIOS, and it overwrites sector 8 with
2679 * a "Lgcy" metadata block prior to Linux boot.
2680 *
2681 * Configured drives (RAID or JBOD) leave sector 8
2682 * alone, but instead overwrite a high numbered
2683 * sector for the RAID metadata. This sector can
2684 * be determined exactly, by truncating the physical
2685 * drive capacity to a nice even GB value.
2686 *
2687 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2688 *
2689 * Warn the user, lest they think we're just buggy.
2690 */
2691 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2692 " BIOS CORRUPTS DATA on all attached drives,"
2693 " regardless of if/how they are configured."
2694 " BEWARE!\n");
2695 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2696 " use sectors 8-9 on \"Legacy\" drives,"
2697 " and avoid the final two gigabytes on"
2698 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002699 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002700 case chip_6042:
2701 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002702 hp_flags |= MV_HP_GEN_IIE;
2703
Auke Kok44c10132007-06-08 15:46:36 -07002704 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002705 case 0x0:
2706 hp_flags |= MV_HP_ERRATA_XX42A0;
2707 break;
2708 case 0x1:
2709 hp_flags |= MV_HP_ERRATA_60X1C0;
2710 break;
2711 default:
2712 dev_printk(KERN_WARNING, &pdev->dev,
2713 "Applying 60X1C0 workarounds to unknown rev\n");
2714 hp_flags |= MV_HP_ERRATA_60X1C0;
2715 break;
2716 }
2717 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002718 case chip_soc:
2719 hpriv->ops = &mv_soc_ops;
2720 hp_flags |= MV_HP_ERRATA_60X1C0;
2721 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002722
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002723 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002724 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002725 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002726 return 1;
2727 }
2728
2729 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002730 if (hp_flags & MV_HP_PCIE) {
2731 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2732 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2733 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2734 } else {
2735 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2736 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2737 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2738 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002739
2740 return 0;
2741}
2742
Brett Russ05b308e2005-10-05 17:08:53 -04002743/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002744 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002745 * @host: ATA host to initialize
2746 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002747 *
2748 * If possible, do an early global reset of the host. Then do
2749 * our port init and clear/unmask all/relevant host interrupts.
2750 *
2751 * LOCKING:
2752 * Inherited from caller.
2753 */
Tejun Heo4447d352007-04-17 23:44:08 +09002754static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002755{
2756 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002757 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002758 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002759
Tejun Heo4447d352007-04-17 23:44:08 +09002760 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002761 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002762 goto done;
2763
2764 if (HAS_PCI(host)) {
2765 hpriv->main_cause_reg_addr = hpriv->base +
2766 HC_MAIN_IRQ_CAUSE_OFS;
2767 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2768 } else {
2769 hpriv->main_cause_reg_addr = hpriv->base +
2770 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2771 hpriv->main_mask_reg_addr = hpriv->base +
2772 HC_SOC_MAIN_IRQ_MASK_OFS;
2773 }
2774 /* global interrupt mask */
2775 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002776
Tejun Heo4447d352007-04-17 23:44:08 +09002777 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002778
Tejun Heo4447d352007-04-17 23:44:08 +09002779 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002780 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002781
Jeff Garzikc9d39132005-11-13 17:47:51 -05002782 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002783 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002784 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002785
Jeff Garzik522479f2005-11-12 22:14:02 -05002786 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002787 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002788 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002789
Tejun Heo4447d352007-04-17 23:44:08 +09002790 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002791 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002792 void __iomem *port_mmio = mv_port_base(mmio, port);
2793
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002794 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002795 ifctl |= (1 << 7); /* enable gen2i speed */
2796 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002797 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2798 }
2799
Jeff Garzikc9d39132005-11-13 17:47:51 -05002800 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002801 }
2802
Tejun Heo4447d352007-04-17 23:44:08 +09002803 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002804 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002805 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002806
2807 mv_port_init(&ap->ioaddr, port_mmio);
2808
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002809#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002810 if (HAS_PCI(host)) {
2811 unsigned int offset = port_mmio - mmio;
2812 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2813 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2814 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002815#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002816 }
2817
2818 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002819 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2820
2821 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2822 "(before clear)=0x%08x\n", hc,
2823 readl(hc_mmio + HC_CFG_OFS),
2824 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2825
2826 /* Clear any currently outstanding hc interrupt conditions */
2827 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002828 }
2829
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002830 if (HAS_PCI(host)) {
2831 /* Clear any currently outstanding host interrupt conditions */
2832 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002833
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002834 /* and unmask interrupt generation for host regs */
2835 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2836 if (IS_GEN_I(hpriv))
2837 writelfl(~HC_MAIN_MASKED_IRQS_5,
2838 hpriv->main_mask_reg_addr);
2839 else
2840 writelfl(~HC_MAIN_MASKED_IRQS,
2841 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002842
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002843 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2844 "PCI int cause/mask=0x%08x/0x%08x\n",
2845 readl(hpriv->main_cause_reg_addr),
2846 readl(hpriv->main_mask_reg_addr),
2847 readl(mmio + hpriv->irq_cause_ofs),
2848 readl(mmio + hpriv->irq_mask_ofs));
2849 } else {
2850 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2851 hpriv->main_mask_reg_addr);
2852 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2853 readl(hpriv->main_cause_reg_addr),
2854 readl(hpriv->main_mask_reg_addr));
2855 }
Brett Russ31961942005-09-30 01:36:00 -04002856done:
Brett Russ20f733e2005-09-01 18:26:17 -04002857 return rc;
2858}
2859
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002860static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2861{
2862 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2863 MV_CRQB_Q_SZ, 0);
2864 if (!hpriv->crqb_pool)
2865 return -ENOMEM;
2866
2867 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2868 MV_CRPB_Q_SZ, 0);
2869 if (!hpriv->crpb_pool)
2870 return -ENOMEM;
2871
2872 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2873 MV_SG_TBL_SZ, 0);
2874 if (!hpriv->sg_tbl_pool)
2875 return -ENOMEM;
2876
2877 return 0;
2878}
2879
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002880/**
2881 * mv_platform_probe - handle a positive probe of an soc Marvell
2882 * host
2883 * @pdev: platform device found
2884 *
2885 * LOCKING:
2886 * Inherited from caller.
2887 */
2888static int mv_platform_probe(struct platform_device *pdev)
2889{
2890 static int printed_version;
2891 const struct mv_sata_platform_data *mv_platform_data;
2892 const struct ata_port_info *ppi[] =
2893 { &mv_port_info[chip_soc], NULL };
2894 struct ata_host *host;
2895 struct mv_host_priv *hpriv;
2896 struct resource *res;
2897 int n_ports, rc;
2898
2899 if (!printed_version++)
2900 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2901
2902 /*
2903 * Simple resource validation ..
2904 */
2905 if (unlikely(pdev->num_resources != 2)) {
2906 dev_err(&pdev->dev, "invalid number of resources\n");
2907 return -EINVAL;
2908 }
2909
2910 /*
2911 * Get the register base first
2912 */
2913 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2914 if (res == NULL)
2915 return -EINVAL;
2916
2917 /* allocate host */
2918 mv_platform_data = pdev->dev.platform_data;
2919 n_ports = mv_platform_data->n_ports;
2920
2921 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2922 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2923
2924 if (!host || !hpriv)
2925 return -ENOMEM;
2926 host->private_data = hpriv;
2927 hpriv->n_ports = n_ports;
2928
2929 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002930 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2931 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002932 hpriv->base -= MV_SATAHC0_REG_BASE;
2933
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002934 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2935 if (rc)
2936 return rc;
2937
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002938 /* initialize adapter */
2939 rc = mv_init_host(host, chip_soc);
2940 if (rc)
2941 return rc;
2942
2943 dev_printk(KERN_INFO, &pdev->dev,
2944 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2945 host->n_ports);
2946
2947 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2948 IRQF_SHARED, &mv6_sht);
2949}
2950
2951/*
2952 *
2953 * mv_platform_remove - unplug a platform interface
2954 * @pdev: platform device
2955 *
2956 * A platform bus SATA device has been unplugged. Perform the needed
2957 * cleanup. Also called on module unload for any active devices.
2958 */
2959static int __devexit mv_platform_remove(struct platform_device *pdev)
2960{
2961 struct device *dev = &pdev->dev;
2962 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002963
2964 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002965 return 0;
2966}
2967
2968static struct platform_driver mv_platform_driver = {
2969 .probe = mv_platform_probe,
2970 .remove = __devexit_p(mv_platform_remove),
2971 .driver = {
2972 .name = DRV_NAME,
2973 .owner = THIS_MODULE,
2974 },
2975};
2976
2977
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002978#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002979static int mv_pci_init_one(struct pci_dev *pdev,
2980 const struct pci_device_id *ent);
2981
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002982
2983static struct pci_driver mv_pci_driver = {
2984 .name = DRV_NAME,
2985 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002986 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002987 .remove = ata_pci_remove_one,
2988};
2989
2990/*
2991 * module options
2992 */
2993static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2994
2995
2996/* move to PCI layer or libata core? */
2997static int pci_go_64(struct pci_dev *pdev)
2998{
2999 int rc;
3000
3001 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3002 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3003 if (rc) {
3004 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3005 if (rc) {
3006 dev_printk(KERN_ERR, &pdev->dev,
3007 "64-bit DMA enable failed\n");
3008 return rc;
3009 }
3010 }
3011 } else {
3012 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3013 if (rc) {
3014 dev_printk(KERN_ERR, &pdev->dev,
3015 "32-bit DMA enable failed\n");
3016 return rc;
3017 }
3018 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3019 if (rc) {
3020 dev_printk(KERN_ERR, &pdev->dev,
3021 "32-bit consistent DMA enable failed\n");
3022 return rc;
3023 }
3024 }
3025
3026 return rc;
3027}
3028
Brett Russ05b308e2005-10-05 17:08:53 -04003029/**
3030 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003031 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003032 *
3033 * FIXME: complete this.
3034 *
3035 * LOCKING:
3036 * Inherited from caller.
3037 */
Tejun Heo4447d352007-04-17 23:44:08 +09003038static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003039{
Tejun Heo4447d352007-04-17 23:44:08 +09003040 struct pci_dev *pdev = to_pci_dev(host->dev);
3041 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003042 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003043 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003044
3045 /* Use this to determine the HW stepping of the chip so we know
3046 * what errata to workaround
3047 */
Brett Russ31961942005-09-30 01:36:00 -04003048 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3049 if (scc == 0)
3050 scc_s = "SCSI";
3051 else if (scc == 0x01)
3052 scc_s = "RAID";
3053 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003054 scc_s = "?";
3055
3056 if (IS_GEN_I(hpriv))
3057 gen = "I";
3058 else if (IS_GEN_II(hpriv))
3059 gen = "II";
3060 else if (IS_GEN_IIE(hpriv))
3061 gen = "IIE";
3062 else
3063 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04003064
Jeff Garzika9524a72005-10-30 14:39:11 -05003065 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003066 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3067 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003068 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3069}
3070
Brett Russ05b308e2005-10-05 17:08:53 -04003071/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003072 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003073 * @pdev: PCI device found
3074 * @ent: PCI device ID entry for the matched host
3075 *
3076 * LOCKING:
3077 * Inherited from caller.
3078 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003079static int mv_pci_init_one(struct pci_dev *pdev,
3080 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003081{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003082 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003083 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003084 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3085 struct ata_host *host;
3086 struct mv_host_priv *hpriv;
3087 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003088
Jeff Garzika9524a72005-10-30 14:39:11 -05003089 if (!printed_version++)
3090 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003091
Tejun Heo4447d352007-04-17 23:44:08 +09003092 /* allocate host */
3093 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3094
3095 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3096 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3097 if (!host || !hpriv)
3098 return -ENOMEM;
3099 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003100 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003101
3102 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003103 rc = pcim_enable_device(pdev);
3104 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003105 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003106
Tejun Heo0d5ff562007-02-01 15:06:36 +09003107 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3108 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003109 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003110 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003111 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003112 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003113 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003114
Jeff Garzikd88184f2007-02-26 01:26:06 -05003115 rc = pci_go_64(pdev);
3116 if (rc)
3117 return rc;
3118
Mark Lordda2fa9b2008-01-26 18:32:45 -05003119 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3120 if (rc)
3121 return rc;
3122
Brett Russ20f733e2005-09-01 18:26:17 -04003123 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003124 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003125 if (rc)
3126 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003127
Brett Russ31961942005-09-30 01:36:00 -04003128 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003129 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003130 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003131
Brett Russ31961942005-09-30 01:36:00 -04003132 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003133 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003134
Tejun Heo4447d352007-04-17 23:44:08 +09003135 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003136 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003137 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003138 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003139}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003140#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003141
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003142static int mv_platform_probe(struct platform_device *pdev);
3143static int __devexit mv_platform_remove(struct platform_device *pdev);
3144
Brett Russ20f733e2005-09-01 18:26:17 -04003145static int __init mv_init(void)
3146{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003147 int rc = -ENODEV;
3148#ifdef CONFIG_PCI
3149 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003150 if (rc < 0)
3151 return rc;
3152#endif
3153 rc = platform_driver_register(&mv_platform_driver);
3154
3155#ifdef CONFIG_PCI
3156 if (rc < 0)
3157 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003158#endif
3159 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003160}
3161
3162static void __exit mv_exit(void)
3163{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003164#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003165 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003166#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003167 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003168}
3169
3170MODULE_AUTHOR("Brett Russ");
3171MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3172MODULE_LICENSE("GPL");
3173MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3174MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003175MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003176
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003177#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003178module_param(msi, int, 0444);
3179MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003180#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003181
Brett Russ20f733e2005-09-01 18:26:17 -04003182module_init(mv_init);
3183module_exit(mv_exit);