blob: 8a77a0ae83ad6619d421faf71e03532dd8d8b4ec [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 7) Test and verify 3.0 Gbps support
39
40 8) Develop a low-power-consumption strategy, and implement it.
41
42 9) [Experiment, low priority] See if ATAPI can be supported using
43 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 like that.
45
46 10) [Experiment, low priority] Investigate interrupt coalescing.
47 Quite often, especially with PCI Message Signalled Interrupts (MSI),
48 the overhead reduced by interrupt mitigation is quite often not
49 worth the latency cost.
50
51 11) [Experiment, Marvell value added] Is it possible to use target
52 mode to cross-connect two Linux boxes with Marvell cards? If so,
53 creating LibATA target mode support would be very interesting.
54
55 Target mode, for those without docs, is the ability to directly
56 connect two SATA controllers.
57
58 13) Verify that 7042 is fully supported. I only have a 6042.
59
60*/
61
62
Brett Russ20f733e2005-09-01 18:26:17 -040063#include <linux/kernel.h>
64#include <linux/module.h>
65#include <linux/pci.h>
66#include <linux/init.h>
67#include <linux/blkdev.h>
68#include <linux/delay.h>
69#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050071#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050073#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075
76#define DRV_NAME "sata_mv"
Jeff Garzik8bc3fc42007-05-21 20:26:38 -040077#define DRV_VERSION "0.81"
Brett Russ20f733e2005-09-01 18:26:17 -040078
79enum {
80 /* BAR's are enumerated in terms of pci_resource_start() terms */
81 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
82 MV_IO_BAR = 2, /* offset 0x18: IO space */
83 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84
85 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
86 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87
88 MV_PCI_REG_BASE = 0,
89 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040090 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
91 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
92 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
93 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
94 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95
Brett Russ20f733e2005-09-01 18:26:17 -040096 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050097 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050098 MV_GPIO_PORT_CTL = 0x104f0,
99 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400100
101 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
103 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
104 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105
Brett Russ31961942005-09-30 01:36:00 -0400106 MV_MAX_Q_DEPTH = 32,
107 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108
109 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
110 * CRPB needs alignment on a 256B boundary. Size == 256B
111 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
112 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 */
114 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
115 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_MAX_SG_CT = 176,
117 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119
Brett Russ20f733e2005-09-01 18:26:17 -0400120 MV_PORTS_PER_HC = 4,
121 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
122 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400123 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORT_MASK = 3,
125
126 /* Host Flags */
127 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
128 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400129 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400130 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
131 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500132 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400133
Brett Russ31961942005-09-30 01:36:00 -0400134 CRQB_FLAG_READ = (1 << 0),
135 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
137 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400138 CRQB_CMD_ADDR_SHIFT = 8,
139 CRQB_CMD_CS = (0x2 << 11),
140 CRQB_CMD_LAST = (1 << 15),
141
142 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
144 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400145
146 EPRD_FLAG_END_OF_TBL = (1 << 31),
147
Brett Russ20f733e2005-09-01 18:26:17 -0400148 /* PCI interface registers */
149
Brett Russ31961942005-09-30 01:36:00 -0400150 PCI_COMMAND_OFS = 0xc00,
151
Brett Russ20f733e2005-09-01 18:26:17 -0400152 PCI_MAIN_CMD_STS_OFS = 0xd30,
153 STOP_PCI_MASTER = (1 << 2),
154 PCI_MASTER_EMPTY = (1 << 3),
155 GLOB_SFT_RST = (1 << 4),
156
Jeff Garzik522479f2005-11-12 22:14:02 -0500157 MV_PCI_MODE = 0xd00,
158 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
159 MV_PCI_DISC_TIMER = 0xd04,
160 MV_PCI_MSI_TRIGGER = 0xc38,
161 MV_PCI_SERR_MASK = 0xc28,
162 MV_PCI_XBAR_TMOUT = 0x1d04,
163 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
164 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
165 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
166 MV_PCI_ERR_COMMAND = 0x1d50,
167
168 PCI_IRQ_CAUSE_OFS = 0x1d58,
169 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400170 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171
172 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
173 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
174 PORT0_ERR = (1 << 0), /* shift by port # */
175 PORT0_DONE = (1 << 1), /* shift by port # */
176 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
177 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
178 PCI_ERR = (1 << 18),
179 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
180 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500181 PORTS_0_3_COAL_DONE = (1 << 8),
182 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400183 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
184 GPIO_INT = (1 << 22),
185 SELF_INT = (1 << 23),
186 TWSI_INT = (1 << 24),
187 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500188 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500189 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400190 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
191 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500192 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400194
195 /* SATAHC registers */
196 HC_CFG_OFS = 0,
197
198 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400199 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400200 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
201 DEV_IRQ = (1 << 8), /* shift by port # */
202
203 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400204 SHD_BLK_OFS = 0x100,
205 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400206
207 /* SATA registers */
208 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
209 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500210 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500211 PHY_MODE4 = 0x314,
212 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500213 MV5_PHY_MODE = 0x74,
214 MV5_LT_MODE = 0x30,
215 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500216 SATA_INTERFACE_CTL = 0x050,
217
218 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400219
220 /* Port registers */
221 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400222 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
223 EDMA_CFG_NCQ = (1 << 5),
224 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
225 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
226 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400227
228 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
229 EDMA_ERR_IRQ_MASK_OFS = 0xc,
230 EDMA_ERR_D_PAR = (1 << 0),
231 EDMA_ERR_PRD_PAR = (1 << 1),
232 EDMA_ERR_DEV = (1 << 2),
233 EDMA_ERR_DEV_DCON = (1 << 3),
234 EDMA_ERR_DEV_CON = (1 << 4),
235 EDMA_ERR_SERR = (1 << 5),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400236 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
237 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Brett Russ20f733e2005-09-01 18:26:17 -0400238 EDMA_ERR_BIST_ASYNC = (1 << 8),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400239 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Brett Russ20f733e2005-09-01 18:26:17 -0400240 EDMA_ERR_CRBQ_PAR = (1 << 9),
241 EDMA_ERR_CRPB_PAR = (1 << 10),
242 EDMA_ERR_INTRL_PAR = (1 << 11),
243 EDMA_ERR_IORDY = (1 << 12),
244 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
245 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
246 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
247 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
248 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
249 EDMA_ERR_TRANS_PROTO = (1 << 31),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400250 EDMA_ERR_OVERRUN_5 = (1 << 5),
251 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400252 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
253 EDMA_ERR_PRD_PAR |
254 EDMA_ERR_DEV_DCON |
255 EDMA_ERR_DEV_CON |
256 EDMA_ERR_SERR |
257 EDMA_ERR_SELF_DIS |
258 EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR |
260 EDMA_ERR_INTRL_PAR |
261 EDMA_ERR_IORDY |
262 EDMA_ERR_LNK_CTRL_RX_2 |
263 EDMA_ERR_LNK_DATA_RX |
264 EDMA_ERR_LNK_DATA_TX |
265 EDMA_ERR_TRANS_PROTO,
266 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
267 EDMA_ERR_PRD_PAR |
268 EDMA_ERR_DEV_DCON |
269 EDMA_ERR_DEV_CON |
270 EDMA_ERR_OVERRUN_5 |
271 EDMA_ERR_UNDERRUN_5 |
272 EDMA_ERR_SELF_DIS_5 |
273 EDMA_ERR_CRBQ_PAR |
274 EDMA_ERR_CRPB_PAR |
275 EDMA_ERR_INTRL_PAR |
276 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400277
Brett Russ31961942005-09-30 01:36:00 -0400278 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
279 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400280
281 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
282 EDMA_REQ_Q_PTR_SHIFT = 5,
283
284 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
285 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
286 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400287 EDMA_RSP_Q_PTR_SHIFT = 3,
288
Brett Russ20f733e2005-09-01 18:26:17 -0400289 EDMA_CMD_OFS = 0x28,
290 EDMA_EN = (1 << 0),
291 EDMA_DS = (1 << 1),
292 ATA_RST = (1 << 2),
293
Jeff Garzikc9d39132005-11-13 17:47:51 -0500294 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500295 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500296
Brett Russ31961942005-09-30 01:36:00 -0400297 /* Host private flags (hp_flags) */
298 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500299 MV_HP_ERRATA_50XXB0 = (1 << 1),
300 MV_HP_ERRATA_50XXB2 = (1 << 2),
301 MV_HP_ERRATA_60X1B2 = (1 << 3),
302 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500303 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400304 MV_HP_GEN_I = (1 << 6),
305 MV_HP_GEN_II = (1 << 7),
306 MV_HP_GEN_IIE = (1 << 8),
Brett Russ20f733e2005-09-01 18:26:17 -0400307
Brett Russ31961942005-09-30 01:36:00 -0400308 /* Port private flags (pp_flags) */
309 MV_PP_FLAG_EDMA_EN = (1 << 0),
310 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400311 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
Brett Russ31961942005-09-30 01:36:00 -0400312};
313
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400314#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
315#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500316#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500317
Jeff Garzik095fec82005-11-12 09:50:49 -0500318enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500319 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500320
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322
323 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
324};
325
Jeff Garzik522479f2005-11-12 22:14:02 -0500326enum chip_type {
327 chip_504x,
328 chip_508x,
329 chip_5080,
330 chip_604x,
331 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500332 chip_6042,
333 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500334};
335
Brett Russ31961942005-09-30 01:36:00 -0400336/* Command ReQuest Block: 32B */
337struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400338 __le32 sg_addr;
339 __le32 sg_addr_hi;
340 __le16 ctrl_flags;
341 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400342};
343
Jeff Garzike4e7b892006-01-31 12:18:41 -0500344struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400345 __le32 addr;
346 __le32 addr_hi;
347 __le32 flags;
348 __le32 len;
349 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500350};
351
Brett Russ31961942005-09-30 01:36:00 -0400352/* Command ResPonse Block: 8B */
353struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400354 __le16 id;
355 __le16 flags;
356 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400357};
358
359/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
360struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400361 __le32 addr;
362 __le32 flags_size;
363 __le32 addr_hi;
364 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400365};
366
367struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400368 struct mv_crqb *crqb;
369 dma_addr_t crqb_dma;
370 struct mv_crpb *crpb;
371 dma_addr_t crpb_dma;
372 struct mv_sg *sg_tbl;
373 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400374
375 unsigned int req_idx;
376 unsigned int resp_idx;
377
Brett Russ31961942005-09-30 01:36:00 -0400378 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400379};
380
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500381struct mv_port_signal {
382 u32 amps;
383 u32 pre;
384};
385
Jeff Garzik47c2b672005-11-12 21:13:17 -0500386struct mv_host_priv;
387struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500388 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
389 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500390 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
391 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
392 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500393 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500395 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500397};
398
Brett Russ20f733e2005-09-01 18:26:17 -0400399struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400400 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500401 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500402 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400403};
404
405static void mv_irq_clear(struct ata_port *ap);
406static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
407static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500408static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
409static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400410static int mv_port_start(struct ata_port *ap);
411static void mv_port_stop(struct ata_port *ap);
412static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500413static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900414static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400415static void mv_error_handler(struct ata_port *ap);
416static void mv_post_int_cmd(struct ata_queued_cmd *qc);
417static void mv_eh_freeze(struct ata_port *ap);
418static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400419static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
420
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500421static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
422 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500423static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
424static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
425 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500426static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500428static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
429static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500430
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500431static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500433static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
434static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
435 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500436static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
437 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500438static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
439static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500440static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
441 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500442
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400443static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400444 .module = THIS_MODULE,
445 .name = DRV_NAME,
446 .ioctl = ata_scsi_ioctl,
447 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400448 .can_queue = ATA_DEF_QUEUE,
449 .this_id = ATA_SHT_THIS_ID,
450 .sg_tablesize = MV_MAX_SG_CT,
451 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
452 .emulated = ATA_SHT_EMULATED,
453 .use_clustering = 1,
454 .proc_name = DRV_NAME,
455 .dma_boundary = MV_DMA_BOUNDARY,
456 .slave_configure = ata_scsi_slave_config,
457 .slave_destroy = ata_scsi_slave_destroy,
458 .bios_param = ata_std_bios_param,
459};
460
461static struct scsi_host_template mv6_sht = {
462 .module = THIS_MODULE,
463 .name = DRV_NAME,
464 .ioctl = ata_scsi_ioctl,
465 .queuecommand = ata_scsi_queuecmd,
466 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400467 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500468 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400469 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
470 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500471 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400472 .proc_name = DRV_NAME,
473 .dma_boundary = MV_DMA_BOUNDARY,
474 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900475 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400476 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400477};
478
Jeff Garzikc9d39132005-11-13 17:47:51 -0500479static const struct ata_port_operations mv5_ops = {
480 .port_disable = ata_port_disable,
481
482 .tf_load = ata_tf_load,
483 .tf_read = ata_tf_read,
484 .check_status = ata_check_status,
485 .exec_command = ata_exec_command,
486 .dev_select = ata_std_dev_select,
487
Jeff Garzikcffacd82007-03-09 09:46:47 -0500488 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500489
490 .qc_prep = mv_qc_prep,
491 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900492 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500493
Jeff Garzikc9d39132005-11-13 17:47:51 -0500494 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900495 .irq_on = ata_irq_on,
496 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500497
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400498 .error_handler = mv_error_handler,
499 .post_internal_cmd = mv_post_int_cmd,
500 .freeze = mv_eh_freeze,
501 .thaw = mv_eh_thaw,
502
Jeff Garzikc9d39132005-11-13 17:47:51 -0500503 .scr_read = mv5_scr_read,
504 .scr_write = mv5_scr_write,
505
506 .port_start = mv_port_start,
507 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500508};
509
510static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400511 .port_disable = ata_port_disable,
512
513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
518
Jeff Garzikcffacd82007-03-09 09:46:47 -0500519 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400520
Brett Russ31961942005-09-30 01:36:00 -0400521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900523 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400524
Brett Russ20f733e2005-09-01 18:26:17 -0400525 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900526 .irq_on = ata_irq_on,
527 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400528
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
Brett Russ20f733e2005-09-01 18:26:17 -0400534 .scr_read = mv_scr_read,
535 .scr_write = mv_scr_write,
536
Brett Russ31961942005-09-30 01:36:00 -0400537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400539};
540
Jeff Garzike4e7b892006-01-31 12:18:41 -0500541static const struct ata_port_operations mv_iie_ops = {
542 .port_disable = ata_port_disable,
543
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
549
Jeff Garzikcffacd82007-03-09 09:46:47 -0500550 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500551
552 .qc_prep = mv_qc_prep_iie,
553 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900554 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500555
Jeff Garzike4e7b892006-01-31 12:18:41 -0500556 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900557 .irq_on = ata_irq_on,
558 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500559
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400560 .error_handler = mv_error_handler,
561 .post_internal_cmd = mv_post_int_cmd,
562 .freeze = mv_eh_freeze,
563 .thaw = mv_eh_thaw,
564
Jeff Garzike4e7b892006-01-31 12:18:41 -0500565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
567
568 .port_start = mv_port_start,
569 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500570};
571
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100572static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400573 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400574 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400575 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400576 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500577 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400578 },
579 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400580 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400581 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400582 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500583 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400584 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500585 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500587 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400588 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500589 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500590 },
Brett Russ20f733e2005-09-01 18:26:17 -0400591 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400592 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400593 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400594 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500595 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400596 },
597 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
599 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400600 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400601 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500602 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400603 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400605 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500606 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400607 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500608 .port_ops = &mv_iie_ops,
609 },
610 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500612 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400613 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500614 .port_ops = &mv_iie_ops,
615 },
Brett Russ20f733e2005-09-01 18:26:17 -0400616};
617
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500618static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400619 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
620 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
622 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400623
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400624 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500629
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400630 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
631
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200632 /* Adaptec 1430SA */
633 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
634
Olof Johanssone93f09d2007-01-18 18:39:59 -0600635 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
636
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800637 /* add Marvell 7042 support */
638 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
639
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400640 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400641};
642
643static struct pci_driver mv_pci_driver = {
644 .name = DRV_NAME,
645 .id_table = mv_pci_tbl,
646 .probe = mv_init_one,
647 .remove = ata_pci_remove_one,
648};
649
Jeff Garzik47c2b672005-11-12 21:13:17 -0500650static const struct mv_hw_ops mv5xxx_ops = {
651 .phy_errata = mv5_phy_errata,
652 .enable_leds = mv5_enable_leds,
653 .read_preamp = mv5_read_preamp,
654 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500655 .reset_flash = mv5_reset_flash,
656 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500657};
658
659static const struct mv_hw_ops mv6xxx_ops = {
660 .phy_errata = mv6_phy_errata,
661 .enable_leds = mv6_enable_leds,
662 .read_preamp = mv6_read_preamp,
663 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500664 .reset_flash = mv6_reset_flash,
665 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500666};
667
Brett Russ20f733e2005-09-01 18:26:17 -0400668/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500669 * module options
670 */
671static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
672
673
Jeff Garzikd88184f2007-02-26 01:26:06 -0500674/* move to PCI layer or libata core? */
675static int pci_go_64(struct pci_dev *pdev)
676{
677 int rc;
678
679 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
681 if (rc) {
682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
683 if (rc) {
684 dev_printk(KERN_ERR, &pdev->dev,
685 "64-bit DMA enable failed\n");
686 return rc;
687 }
688 }
689 } else {
690 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "32-bit DMA enable failed\n");
694 return rc;
695 }
696 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
697 if (rc) {
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit consistent DMA enable failed\n");
700 return rc;
701 }
702 }
703
704 return rc;
705}
706
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500707/*
Brett Russ20f733e2005-09-01 18:26:17 -0400708 * Functions
709 */
710
711static inline void writelfl(unsigned long data, void __iomem *addr)
712{
713 writel(data, addr);
714 (void) readl(addr); /* flush to avoid PCI posted write */
715}
716
Brett Russ20f733e2005-09-01 18:26:17 -0400717static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
718{
719 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
720}
721
Jeff Garzikc9d39132005-11-13 17:47:51 -0500722static inline unsigned int mv_hc_from_port(unsigned int port)
723{
724 return port >> MV_PORT_HC_SHIFT;
725}
726
727static inline unsigned int mv_hardport_from_port(unsigned int port)
728{
729 return port & MV_PORT_MASK;
730}
731
732static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
733 unsigned int port)
734{
735 return mv_hc_base(base, mv_hc_from_port(port));
736}
737
Brett Russ20f733e2005-09-01 18:26:17 -0400738static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
739{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500740 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500741 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500742 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400743}
744
745static inline void __iomem *mv_ap_base(struct ata_port *ap)
746{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900747 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400748}
749
Jeff Garzikcca39742006-08-24 03:19:22 -0400750static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400751{
Jeff Garzikcca39742006-08-24 03:19:22 -0400752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400753}
754
755static void mv_irq_clear(struct ata_port *ap)
756{
757}
758
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400759static void mv_set_edma_ptrs(void __iomem *port_mmio,
760 struct mv_host_priv *hpriv,
761 struct mv_port_priv *pp)
762{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400763 u32 index;
764
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400765 /*
766 * initialize request queue
767 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400768 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
769
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400770 WARN_ON(pp->crqb_dma & 0x3ff);
771 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400772 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400773 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
774
775 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400776 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400777 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
778 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400779 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400780
781 /*
782 * initialize response queue
783 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400784 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
785
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786 WARN_ON(pp->crpb_dma & 0xff);
787 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
788
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
792 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400793 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400796 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797}
798
Brett Russ05b308e2005-10-05 17:08:53 -0400799/**
800 * mv_start_dma - Enable eDMA engine
801 * @base: port base address
802 * @pp: port private data
803 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900804 * Verify the local cache of the eDMA state is accurate with a
805 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400806 *
807 * LOCKING:
808 * Inherited from caller.
809 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400812{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400813 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400814 /* clear EDMA event indicators, if any */
815 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
816
817 mv_set_edma_ptrs(base, hpriv, pp);
818
Brett Russafb0edd2005-10-05 17:08:42 -0400819 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
820 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
821 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900822 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400823}
824
Brett Russ05b308e2005-10-05 17:08:53 -0400825/**
826 * mv_stop_dma - Disable eDMA engine
827 * @ap: ATA channel to manipulate
828 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900829 * Verify the local cache of the eDMA state is accurate with a
830 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400831 *
832 * LOCKING:
833 * Inherited from caller.
834 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835static int mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400836{
837 void __iomem *port_mmio = mv_ap_base(ap);
838 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400839 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400840 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400841
Jeff Garzik4537deb2007-07-12 14:30:19 -0400842 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400843 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400844 */
Brett Russ31961942005-09-30 01:36:00 -0400845 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
846 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400847 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900848 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400849 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500850
Brett Russ31961942005-09-30 01:36:00 -0400851 /* now properly wait for the eDMA to stop */
852 for (i = 1000; i > 0; i--) {
853 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400854 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400855 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400856
Brett Russ31961942005-09-30 01:36:00 -0400857 udelay(100);
858 }
859
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400860 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900861 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400862 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400863 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400864
865 return err;
Brett Russ31961942005-09-30 01:36:00 -0400866}
867
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400868#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400869static void mv_dump_mem(void __iomem *start, unsigned bytes)
870{
Brett Russ31961942005-09-30 01:36:00 -0400871 int b, w;
872 for (b = 0; b < bytes; ) {
873 DPRINTK("%p: ", start + b);
874 for (w = 0; b < bytes && w < 4; w++) {
875 printk("%08x ",readl(start + b));
876 b += sizeof(u32);
877 }
878 printk("\n");
879 }
Brett Russ31961942005-09-30 01:36:00 -0400880}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400881#endif
882
Brett Russ31961942005-09-30 01:36:00 -0400883static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
884{
885#ifdef ATA_DEBUG
886 int b, w;
887 u32 dw;
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%02x: ", b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 (void) pci_read_config_dword(pdev,b,&dw);
892 printk("%08x ",dw);
893 b += sizeof(u32);
894 }
895 printk("\n");
896 }
897#endif
898}
899static void mv_dump_all_regs(void __iomem *mmio_base, int port,
900 struct pci_dev *pdev)
901{
902#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500903 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400904 port >> MV_PORT_HC_SHIFT);
905 void __iomem *port_base;
906 int start_port, num_ports, p, start_hc, num_hcs, hc;
907
908 if (0 > port) {
909 start_hc = start_port = 0;
910 num_ports = 8; /* shld be benign for 4 port devs */
911 num_hcs = 2;
912 } else {
913 start_hc = port >> MV_PORT_HC_SHIFT;
914 start_port = port;
915 num_ports = num_hcs = 1;
916 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500917 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400918 num_ports > 1 ? num_ports - 1 : start_port);
919
920 if (NULL != pdev) {
921 DPRINTK("PCI config space regs:\n");
922 mv_dump_pci_cfg(pdev, 0x68);
923 }
924 DPRINTK("PCI regs:\n");
925 mv_dump_mem(mmio_base+0xc00, 0x3c);
926 mv_dump_mem(mmio_base+0xd00, 0x34);
927 mv_dump_mem(mmio_base+0xf00, 0x4);
928 mv_dump_mem(mmio_base+0x1d00, 0x6c);
929 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700930 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400931 DPRINTK("HC regs (HC %i):\n", hc);
932 mv_dump_mem(hc_base, 0x1c);
933 }
934 for (p = start_port; p < start_port + num_ports; p++) {
935 port_base = mv_port_base(mmio_base, p);
936 DPRINTK("EDMA regs (port %i):\n",p);
937 mv_dump_mem(port_base, 0x54);
938 DPRINTK("SATA regs (port %i):\n",p);
939 mv_dump_mem(port_base+0x300, 0x60);
940 }
941#endif
942}
943
Brett Russ20f733e2005-09-01 18:26:17 -0400944static unsigned int mv_scr_offset(unsigned int sc_reg_in)
945{
946 unsigned int ofs;
947
948 switch (sc_reg_in) {
949 case SCR_STATUS:
950 case SCR_CONTROL:
951 case SCR_ERROR:
952 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
953 break;
954 case SCR_ACTIVE:
955 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
956 break;
957 default:
958 ofs = 0xffffffffU;
959 break;
960 }
961 return ofs;
962}
963
964static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
965{
966 unsigned int ofs = mv_scr_offset(sc_reg_in);
967
Jeff Garzik35177262007-02-24 21:26:42 -0500968 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400969 return readl(mv_ap_base(ap) + ofs);
Jeff Garzik35177262007-02-24 21:26:42 -0500970 else
Brett Russ20f733e2005-09-01 18:26:17 -0400971 return (u32) ofs;
Brett Russ20f733e2005-09-01 18:26:17 -0400972}
973
974static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
975{
976 unsigned int ofs = mv_scr_offset(sc_reg_in);
977
Jeff Garzik35177262007-02-24 21:26:42 -0500978 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400979 writelfl(val, mv_ap_base(ap) + ofs);
Brett Russ20f733e2005-09-01 18:26:17 -0400980}
981
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400982static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
983 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500984{
985 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
986
987 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400988 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500989
Jeff Garzike728eab2007-02-25 02:53:41 -0500990 if (IS_GEN_I(hpriv)) {
991 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500992 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -0500993 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500994
Jeff Garzike728eab2007-02-25 02:53:41 -0500995 else if (IS_GEN_II(hpriv)) {
996 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500997 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -0500998 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
999 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001000
1001 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001002 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1003 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001004 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1005 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001006 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1007 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001008 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001009 }
1010
1011 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1012}
1013
Brett Russ05b308e2005-10-05 17:08:53 -04001014/**
1015 * mv_port_start - Port specific init/start routine.
1016 * @ap: ATA channel to manipulate
1017 *
1018 * Allocate and point to DMA memory, init port private memory,
1019 * zero indices.
1020 *
1021 * LOCKING:
1022 * Inherited from caller.
1023 */
Brett Russ31961942005-09-30 01:36:00 -04001024static int mv_port_start(struct ata_port *ap)
1025{
Jeff Garzikcca39742006-08-24 03:19:22 -04001026 struct device *dev = ap->host->dev;
1027 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001028 struct mv_port_priv *pp;
1029 void __iomem *port_mmio = mv_ap_base(ap);
1030 void *mem;
1031 dma_addr_t mem_dma;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001032 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001033
Tejun Heo24dc5f32007-01-20 16:00:28 +09001034 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001035 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001036 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001037
Tejun Heo24dc5f32007-01-20 16:00:28 +09001038 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1039 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001040 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001041 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001042 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1043
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001044 rc = ata_pad_alloc(ap, dev);
1045 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001046 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001047
Jeff Garzik8b260242005-11-12 12:32:50 -05001048 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001049 * 32-slot command request table (CRQB), 32 bytes each in size
1050 */
1051 pp->crqb = mem;
1052 pp->crqb_dma = mem_dma;
1053 mem += MV_CRQB_Q_SZ;
1054 mem_dma += MV_CRQB_Q_SZ;
1055
Jeff Garzik8b260242005-11-12 12:32:50 -05001056 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001057 * 32-slot command response table (CRPB), 8 bytes each in size
1058 */
1059 pp->crpb = mem;
1060 pp->crpb_dma = mem_dma;
1061 mem += MV_CRPB_Q_SZ;
1062 mem_dma += MV_CRPB_Q_SZ;
1063
1064 /* Third item:
1065 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1066 */
1067 pp->sg_tbl = mem;
1068 pp->sg_tbl_dma = mem_dma;
1069
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001070 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001071
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001072 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001073
Brett Russ31961942005-09-30 01:36:00 -04001074 /* Don't turn on EDMA here...do it before DMA commands only. Else
1075 * we'll be unable to send non-data, PIO, etc due to restricted access
1076 * to shadow regs.
1077 */
1078 ap->private_data = pp;
1079 return 0;
1080}
1081
Brett Russ05b308e2005-10-05 17:08:53 -04001082/**
1083 * mv_port_stop - Port specific cleanup/stop routine.
1084 * @ap: ATA channel to manipulate
1085 *
1086 * Stop DMA, cleanup port memory.
1087 *
1088 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001089 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001090 */
Brett Russ31961942005-09-30 01:36:00 -04001091static void mv_port_stop(struct ata_port *ap)
1092{
Brett Russafb0edd2005-10-05 17:08:42 -04001093 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04001094
Jeff Garzikcca39742006-08-24 03:19:22 -04001095 spin_lock_irqsave(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001096 mv_stop_dma(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04001097 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001098}
1099
Brett Russ05b308e2005-10-05 17:08:53 -04001100/**
1101 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1102 * @qc: queued command whose SG list to source from
1103 *
1104 * Populate the SG list and mark the last entry.
1105 *
1106 * LOCKING:
1107 * Inherited from caller.
1108 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001109static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001110{
1111 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001112 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001113 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001114 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001115
Jeff Garzikd88184f2007-02-26 01:26:06 -05001116 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001117 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001118 dma_addr_t addr = sg_dma_address(sg);
1119 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001120
Jeff Garzikd88184f2007-02-26 01:26:06 -05001121 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1122 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1123 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
Brett Russ31961942005-09-30 01:36:00 -04001124
Jeff Garzikd88184f2007-02-26 01:26:06 -05001125 if (ata_sg_is_last(sg, qc))
1126 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Jeff Garzik972c26b2005-10-18 22:14:54 -04001127
Jeff Garzikd88184f2007-02-26 01:26:06 -05001128 mv_sg++;
1129 n_sg++;
Brett Russ31961942005-09-30 01:36:00 -04001130 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001131
1132 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001133}
1134
Mark Lorde1469872006-05-22 19:02:03 -04001135static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001136{
Mark Lord559eeda2006-05-19 16:40:15 -04001137 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001138 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001139 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001140}
1141
Brett Russ05b308e2005-10-05 17:08:53 -04001142/**
1143 * mv_qc_prep - Host specific command preparation.
1144 * @qc: queued command to prepare
1145 *
1146 * This routine simply redirects to the general purpose routine
1147 * if command is not DMA. Else, it handles prep of the CRQB
1148 * (command request block), does some sanity checking, and calls
1149 * the SG load routine.
1150 *
1151 * LOCKING:
1152 * Inherited from caller.
1153 */
Brett Russ31961942005-09-30 01:36:00 -04001154static void mv_qc_prep(struct ata_queued_cmd *qc)
1155{
1156 struct ata_port *ap = qc->ap;
1157 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001158 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001159 struct ata_taskfile *tf;
1160 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001161 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001162
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001163 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001164 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001165
Brett Russ31961942005-09-30 01:36:00 -04001166 /* Fill in command request block
1167 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001168 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001169 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001170 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001171 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001172 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001173
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001174 /* get current queue index from software */
1175 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001176
Mark Lorda6432432006-05-19 16:36:36 -04001177 pp->crqb[in_index].sg_addr =
1178 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1179 pp->crqb[in_index].sg_addr_hi =
1180 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1181 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1182
1183 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001184 tf = &qc->tf;
1185
1186 /* Sadly, the CRQB cannot accomodate all registers--there are
1187 * only 11 bytes...so we must pick and choose required
1188 * registers based on the command. So, we drop feature and
1189 * hob_feature for [RW] DMA commands, but they are needed for
1190 * NCQ. NCQ will drop hob_nsect.
1191 */
1192 switch (tf->command) {
1193 case ATA_CMD_READ:
1194 case ATA_CMD_READ_EXT:
1195 case ATA_CMD_WRITE:
1196 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001197 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001198 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1199 break;
1200#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1201 case ATA_CMD_FPDMA_READ:
1202 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001203 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001204 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1205 break;
1206#endif /* FIXME: remove this line when NCQ added */
1207 default:
1208 /* The only other commands EDMA supports in non-queued and
1209 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1210 * of which are defined/used by Linux. If we get here, this
1211 * driver needs work.
1212 *
1213 * FIXME: modify libata to give qc_prep a return value and
1214 * return error here.
1215 */
1216 BUG_ON(tf->command);
1217 break;
1218 }
1219 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1220 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1221 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1222 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1223 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1224 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1225 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1226 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1227 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1228
Jeff Garzike4e7b892006-01-31 12:18:41 -05001229 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001230 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001231 mv_fill_sg(qc);
1232}
1233
1234/**
1235 * mv_qc_prep_iie - Host specific command preparation.
1236 * @qc: queued command to prepare
1237 *
1238 * This routine simply redirects to the general purpose routine
1239 * if command is not DMA. Else, it handles prep of the CRQB
1240 * (command request block), does some sanity checking, and calls
1241 * the SG load routine.
1242 *
1243 * LOCKING:
1244 * Inherited from caller.
1245 */
1246static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1247{
1248 struct ata_port *ap = qc->ap;
1249 struct mv_port_priv *pp = ap->private_data;
1250 struct mv_crqb_iie *crqb;
1251 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001252 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001253 u32 flags = 0;
1254
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001255 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001256 return;
1257
Jeff Garzike4e7b892006-01-31 12:18:41 -05001258 /* Fill in Gen IIE command request block
1259 */
1260 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1261 flags |= CRQB_FLAG_READ;
1262
Tejun Heobeec7db2006-02-11 19:11:13 +09001263 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001264 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001265 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001266 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001267
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001268 /* get current queue index from software */
1269 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001270
1271 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001272 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1273 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1274 crqb->flags = cpu_to_le32(flags);
1275
1276 tf = &qc->tf;
1277 crqb->ata_cmd[0] = cpu_to_le32(
1278 (tf->command << 16) |
1279 (tf->feature << 24)
1280 );
1281 crqb->ata_cmd[1] = cpu_to_le32(
1282 (tf->lbal << 0) |
1283 (tf->lbam << 8) |
1284 (tf->lbah << 16) |
1285 (tf->device << 24)
1286 );
1287 crqb->ata_cmd[2] = cpu_to_le32(
1288 (tf->hob_lbal << 0) |
1289 (tf->hob_lbam << 8) |
1290 (tf->hob_lbah << 16) |
1291 (tf->hob_feature << 24)
1292 );
1293 crqb->ata_cmd[3] = cpu_to_le32(
1294 (tf->nsect << 0) |
1295 (tf->hob_nsect << 8)
1296 );
1297
1298 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1299 return;
Brett Russ31961942005-09-30 01:36:00 -04001300 mv_fill_sg(qc);
1301}
1302
Brett Russ05b308e2005-10-05 17:08:53 -04001303/**
1304 * mv_qc_issue - Initiate a command to the host
1305 * @qc: queued command to start
1306 *
1307 * This routine simply redirects to the general purpose routine
1308 * if command is not DMA. Else, it sanity checks our local
1309 * caches of the request producer/consumer indices then enables
1310 * DMA and bumps the request producer index.
1311 *
1312 * LOCKING:
1313 * Inherited from caller.
1314 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001315static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001316{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001317 struct ata_port *ap = qc->ap;
1318 void __iomem *port_mmio = mv_ap_base(ap);
1319 struct mv_port_priv *pp = ap->private_data;
1320 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001321 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001322
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001323 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001324 /* We're about to send a non-EDMA capable command to the
1325 * port. Turn off EDMA so there won't be problems accessing
1326 * shadow block, etc registers.
1327 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001328 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001329 return ata_qc_issue_prot(qc);
1330 }
1331
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001332 mv_start_dma(port_mmio, hpriv, pp);
1333
1334 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001335
Brett Russ31961942005-09-30 01:36:00 -04001336 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001337 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1338 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001339
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001340 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001341
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001342 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001343
1344 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001345 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1346 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001347
1348 return 0;
1349}
1350
Brett Russ05b308e2005-10-05 17:08:53 -04001351/**
Brett Russ05b308e2005-10-05 17:08:53 -04001352 * mv_err_intr - Handle error interrupts on the port
1353 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001354 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001355 *
1356 * In most cases, just clear the interrupt and move on. However,
1357 * some cases require an eDMA reset, which is done right before
1358 * the COMRESET in mv_phy_reset(). The SERR case requires a
1359 * clear of pending errors in the SATA SERROR register. Finally,
1360 * if the port disabled DMA, update our cached copy to match.
1361 *
1362 * LOCKING:
1363 * Inherited from caller.
1364 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001365static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001366{
Brett Russ31961942005-09-30 01:36:00 -04001367 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001368 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1369 struct mv_port_priv *pp = ap->private_data;
1370 struct mv_host_priv *hpriv = ap->host->private_data;
1371 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1372 unsigned int action = 0, err_mask = 0;
1373 struct ata_eh_info *ehi = &ap->eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001374
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001375 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001376
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001377 if (!edma_enabled) {
1378 /* just a guess: do we need to do this? should we
1379 * expand this, and do it in all cases?
1380 */
Tejun Heo81952c52006-05-15 20:57:47 +09001381 sata_scr_read(ap, SCR_ERROR, &serr);
1382 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001383 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001384
1385 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1386
1387 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1388
1389 /*
1390 * all generations share these EDMA error cause bits
1391 */
1392
1393 if (edma_err_cause & EDMA_ERR_DEV)
1394 err_mask |= AC_ERR_DEV;
1395 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1396 EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR |
1397 EDMA_ERR_INTRL_PAR)) {
1398 err_mask |= AC_ERR_ATA_BUS;
1399 action |= ATA_EH_HARDRESET;
1400 ata_ehi_push_desc(ehi, ", parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001401 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001402 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1403 ata_ehi_hotplugged(ehi);
1404 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1405 ", dev disconnect" : ", dev connect");
1406 }
1407
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001408 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001409 eh_freeze_mask = EDMA_EH_FREEZE_5;
1410
1411 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1412 struct mv_port_priv *pp = ap->private_data;
1413 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1414 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1415 }
1416 } else {
1417 eh_freeze_mask = EDMA_EH_FREEZE;
1418
1419 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1420 struct mv_port_priv *pp = ap->private_data;
1421 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1422 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1423 }
1424
1425 if (edma_err_cause & EDMA_ERR_SERR) {
1426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
1428 err_mask = AC_ERR_ATA_BUS;
1429 action |= ATA_EH_HARDRESET;
1430 }
1431 }
Brett Russ20f733e2005-09-01 18:26:17 -04001432
1433 /* Clear EDMA now that SERR cleanup done */
1434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1435
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001436 if (!err_mask) {
1437 err_mask = AC_ERR_OTHER;
1438 action |= ATA_EH_HARDRESET;
1439 }
1440
1441 ehi->serror |= serr;
1442 ehi->action |= action;
1443
1444 if (qc)
1445 qc->err_mask |= err_mask;
1446 else
1447 ehi->err_mask |= err_mask;
1448
1449 if (edma_err_cause & eh_freeze_mask)
1450 ata_port_freeze(ap);
1451 else
1452 ata_port_abort(ap);
1453}
1454
1455static void mv_intr_pio(struct ata_port *ap)
1456{
1457 struct ata_queued_cmd *qc;
1458 u8 ata_status;
1459
1460 /* ignore spurious intr if drive still BUSY */
1461 ata_status = readb(ap->ioaddr.status_addr);
1462 if (unlikely(ata_status & ATA_BUSY))
1463 return;
1464
1465 /* get active ATA command */
1466 qc = ata_qc_from_tag(ap, ap->active_tag);
1467 if (unlikely(!qc)) /* no active tag */
1468 return;
1469 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1470 return;
1471
1472 /* and finally, complete the ATA command */
1473 qc->err_mask |= ac_err_mask(ata_status);
1474 ata_qc_complete(qc);
1475}
1476
1477static void mv_intr_edma(struct ata_port *ap)
1478{
1479 void __iomem *port_mmio = mv_ap_base(ap);
1480 struct mv_host_priv *hpriv = ap->host->private_data;
1481 struct mv_port_priv *pp = ap->private_data;
1482 struct ata_queued_cmd *qc;
1483 u32 out_index, in_index;
1484 bool work_done = false;
1485
1486 /* get h/w response queue pointer */
1487 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1488 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1489
1490 while (1) {
1491 u16 status;
1492
1493 /* get s/w response queue last-read pointer, and compare */
1494 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1495 if (in_index == out_index)
1496 break;
1497
1498
1499 /* 50xx: get active ATA command */
1500 if (IS_GEN_I(hpriv))
1501 qc = ata_qc_from_tag(ap, ap->active_tag);
1502
1503 /* 60xx: get active ATA command via tag, to enable support
1504 * for queueing. this works transparently for queued and
1505 * non-queued modes.
1506 */
1507 else {
1508 unsigned int tag;
1509
1510 if (IS_GEN_II(hpriv))
1511 tag = (le16_to_cpu(pp->crpb[out_index].id)
1512 >> CRPB_IOID_SHIFT_6) & 0x3f;
1513 else
1514 tag = (le16_to_cpu(pp->crpb[out_index].id)
1515 >> CRPB_IOID_SHIFT_7) & 0x3f;
1516
1517 qc = ata_qc_from_tag(ap, tag);
1518 }
1519
1520 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1521 * bits (WARNING: might not necessarily be associated
1522 * with this command), which -should- be clear
1523 * if all is well
1524 */
1525 status = le16_to_cpu(pp->crpb[out_index].flags);
1526 if (unlikely(status & 0xff)) {
1527 mv_err_intr(ap, qc);
1528 return;
1529 }
1530
1531 /* and finally, complete the ATA command */
1532 if (qc) {
1533 qc->err_mask |=
1534 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1535 ata_qc_complete(qc);
1536 }
1537
1538 /* advance software response queue pointer, to
1539 * indicate (after the loop completes) to hardware
1540 * that we have consumed a response queue entry.
1541 */
1542 work_done = true;
1543 pp->resp_idx++;
1544 }
1545
1546 if (work_done)
1547 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1548 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1549 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001550}
1551
Brett Russ05b308e2005-10-05 17:08:53 -04001552/**
1553 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001554 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001555 * @relevant: port error bits relevant to this host controller
1556 * @hc: which host controller we're to look at
1557 *
1558 * Read then write clear the HC interrupt status then walk each
1559 * port connected to the HC and see if it needs servicing. Port
1560 * success ints are reported in the HC interrupt status reg, the
1561 * port error ints are reported in the higher level main
1562 * interrupt status register and thus are passed in via the
1563 * 'relevant' argument.
1564 *
1565 * LOCKING:
1566 * Inherited from caller.
1567 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001568static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001569{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001570 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001571 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001572 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001573 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001574
Jeff Garzik35177262007-02-24 21:26:42 -05001575 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001576 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001577 else
Brett Russ20f733e2005-09-01 18:26:17 -04001578 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001579
1580 /* we'll need the HC success int register in most cases */
1581 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001582 if (!hc_irq_cause)
1583 return;
1584
1585 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001586
1587 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1588 hc,relevant,hc_irq_cause);
1589
1590 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001591 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001592 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001593 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001594
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001595 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001596 continue;
1597
Brett Russ31961942005-09-30 01:36:00 -04001598 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001599 if (port >= MV_PORTS_PER_HC) {
1600 shift++; /* skip bit 8 in the HC Main IRQ reg */
1601 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001602 have_err_bits = ((PORT0_ERR << shift) & relevant);
1603
1604 if (unlikely(have_err_bits)) {
1605 struct ata_queued_cmd *qc;
1606
1607 qc = ata_qc_from_tag(ap, ap->active_tag);
1608 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1609 continue;
1610
1611 mv_err_intr(ap, qc);
1612 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001613 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001614
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001615 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1616
1617 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1618 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1619 mv_intr_edma(ap);
1620 } else {
1621 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1622 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001623 }
1624 }
1625 VPRINTK("EXIT\n");
1626}
1627
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001628static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1629{
1630 struct ata_port *ap;
1631 struct ata_queued_cmd *qc;
1632 struct ata_eh_info *ehi;
1633 unsigned int i, err_mask, printed = 0;
1634 u32 err_cause;
1635
1636 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1637
1638 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1639 err_cause);
1640
1641 DPRINTK("All regs @ PCI error\n");
1642 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1643
1644 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1645
1646 for (i = 0; i < host->n_ports; i++) {
1647 ap = host->ports[i];
1648 if (!ata_port_offline(ap)) {
1649 ehi = &ap->eh_info;
1650 ata_ehi_clear_desc(ehi);
1651 if (!printed++)
1652 ata_ehi_push_desc(ehi,
1653 "PCI err cause 0x%08x", err_cause);
1654 err_mask = AC_ERR_HOST_BUS;
1655 ehi->action = ATA_EH_HARDRESET;
1656 qc = ata_qc_from_tag(ap, ap->active_tag);
1657 if (qc)
1658 qc->err_mask |= err_mask;
1659 else
1660 ehi->err_mask |= err_mask;
1661
1662 ata_port_freeze(ap);
1663 }
1664 }
1665}
1666
Brett Russ05b308e2005-10-05 17:08:53 -04001667/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001668 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001669 * @irq: unused
1670 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001671 *
1672 * Read the read only register to determine if any host
1673 * controllers have pending interrupts. If so, call lower level
1674 * routine to handle. Also check for PCI errors which are only
1675 * reported here.
1676 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001677 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001678 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001679 * interrupts.
1680 */
David Howells7d12e782006-10-05 14:55:46 +01001681static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001682{
Jeff Garzikcca39742006-08-24 03:19:22 -04001683 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001684 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001685 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001686 u32 irq_stat;
1687
Brett Russ20f733e2005-09-01 18:26:17 -04001688 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001689
1690 /* check the cases where we either have nothing pending or have read
1691 * a bogus register value which can indicate HW removal or PCI fault
1692 */
Jeff Garzik35177262007-02-24 21:26:42 -05001693 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001694 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001695
Jeff Garzikcca39742006-08-24 03:19:22 -04001696 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1697 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001698
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001699 if (unlikely(irq_stat & PCI_ERR)) {
1700 mv_pci_error(host, mmio);
1701 handled = 1;
1702 goto out_unlock; /* skip all other HC irq handling */
1703 }
1704
Brett Russ20f733e2005-09-01 18:26:17 -04001705 for (hc = 0; hc < n_hcs; hc++) {
1706 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1707 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001708 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001709 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001710 }
1711 }
Mark Lord615ab952006-05-19 16:24:56 -04001712
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001713out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001714 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001715
1716 return IRQ_RETVAL(handled);
1717}
1718
Jeff Garzikc9d39132005-11-13 17:47:51 -05001719static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1720{
1721 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1722 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1723
1724 return hc_mmio + ofs;
1725}
1726
1727static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1728{
1729 unsigned int ofs;
1730
1731 switch (sc_reg_in) {
1732 case SCR_STATUS:
1733 case SCR_ERROR:
1734 case SCR_CONTROL:
1735 ofs = sc_reg_in * sizeof(u32);
1736 break;
1737 default:
1738 ofs = 0xffffffffU;
1739 break;
1740 }
1741 return ofs;
1742}
1743
1744static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1745{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001746 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1747 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001748 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1749
1750 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001751 return readl(addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001752 else
1753 return (u32) ofs;
1754}
1755
1756static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1757{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001758 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1759 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001760 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1761
1762 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001763 writelfl(val, addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001764}
1765
Jeff Garzik522479f2005-11-12 22:14:02 -05001766static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1767{
1768 u8 rev_id;
1769 int early_5080;
1770
1771 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1772
1773 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1774
1775 if (!early_5080) {
1776 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1777 tmp |= (1 << 0);
1778 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1779 }
1780
1781 mv_reset_pci_bus(pdev, mmio);
1782}
1783
1784static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1785{
1786 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1787}
1788
Jeff Garzik47c2b672005-11-12 21:13:17 -05001789static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001790 void __iomem *mmio)
1791{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001792 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1793 u32 tmp;
1794
1795 tmp = readl(phy_mmio + MV5_PHY_MODE);
1796
1797 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1798 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001799}
1800
Jeff Garzik47c2b672005-11-12 21:13:17 -05001801static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001802{
Jeff Garzik522479f2005-11-12 22:14:02 -05001803 u32 tmp;
1804
1805 writel(0, mmio + MV_GPIO_PORT_CTL);
1806
1807 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1808
1809 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1810 tmp |= ~(1 << 0);
1811 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001812}
1813
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001814static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1815 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001816{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001817 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1818 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1819 u32 tmp;
1820 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1821
1822 if (fix_apm_sq) {
1823 tmp = readl(phy_mmio + MV5_LT_MODE);
1824 tmp |= (1 << 19);
1825 writel(tmp, phy_mmio + MV5_LT_MODE);
1826
1827 tmp = readl(phy_mmio + MV5_PHY_CTL);
1828 tmp &= ~0x3;
1829 tmp |= 0x1;
1830 writel(tmp, phy_mmio + MV5_PHY_CTL);
1831 }
1832
1833 tmp = readl(phy_mmio + MV5_PHY_MODE);
1834 tmp &= ~mask;
1835 tmp |= hpriv->signal[port].pre;
1836 tmp |= hpriv->signal[port].amps;
1837 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001838}
1839
Jeff Garzikc9d39132005-11-13 17:47:51 -05001840
1841#undef ZERO
1842#define ZERO(reg) writel(0, port_mmio + (reg))
1843static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1844 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001845{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846 void __iomem *port_mmio = mv_port_base(mmio, port);
1847
1848 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1849
1850 mv_channel_reset(hpriv, mmio, port);
1851
1852 ZERO(0x028); /* command */
1853 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1854 ZERO(0x004); /* timer */
1855 ZERO(0x008); /* irq err cause */
1856 ZERO(0x00c); /* irq err mask */
1857 ZERO(0x010); /* rq bah */
1858 ZERO(0x014); /* rq inp */
1859 ZERO(0x018); /* rq outp */
1860 ZERO(0x01c); /* respq bah */
1861 ZERO(0x024); /* respq outp */
1862 ZERO(0x020); /* respq inp */
1863 ZERO(0x02c); /* test control */
1864 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1865}
1866#undef ZERO
1867
1868#define ZERO(reg) writel(0, hc_mmio + (reg))
1869static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1870 unsigned int hc)
1871{
1872 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1873 u32 tmp;
1874
1875 ZERO(0x00c);
1876 ZERO(0x010);
1877 ZERO(0x014);
1878 ZERO(0x018);
1879
1880 tmp = readl(hc_mmio + 0x20);
1881 tmp &= 0x1c1c1c1c;
1882 tmp |= 0x03030303;
1883 writel(tmp, hc_mmio + 0x20);
1884}
1885#undef ZERO
1886
1887static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1888 unsigned int n_hc)
1889{
1890 unsigned int hc, port;
1891
1892 for (hc = 0; hc < n_hc; hc++) {
1893 for (port = 0; port < MV_PORTS_PER_HC; port++)
1894 mv5_reset_hc_port(hpriv, mmio,
1895 (hc * MV_PORTS_PER_HC) + port);
1896
1897 mv5_reset_one_hc(hpriv, mmio, hc);
1898 }
1899
1900 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001901}
1902
Jeff Garzik101ffae2005-11-12 22:17:49 -05001903#undef ZERO
1904#define ZERO(reg) writel(0, mmio + (reg))
1905static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1906{
1907 u32 tmp;
1908
1909 tmp = readl(mmio + MV_PCI_MODE);
1910 tmp &= 0xff00ffff;
1911 writel(tmp, mmio + MV_PCI_MODE);
1912
1913 ZERO(MV_PCI_DISC_TIMER);
1914 ZERO(MV_PCI_MSI_TRIGGER);
1915 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1916 ZERO(HC_MAIN_IRQ_MASK_OFS);
1917 ZERO(MV_PCI_SERR_MASK);
1918 ZERO(PCI_IRQ_CAUSE_OFS);
1919 ZERO(PCI_IRQ_MASK_OFS);
1920 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1921 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1922 ZERO(MV_PCI_ERR_ATTRIBUTE);
1923 ZERO(MV_PCI_ERR_COMMAND);
1924}
1925#undef ZERO
1926
1927static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1928{
1929 u32 tmp;
1930
1931 mv5_reset_flash(hpriv, mmio);
1932
1933 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1934 tmp &= 0x3;
1935 tmp |= (1 << 5) | (1 << 6);
1936 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1937}
1938
1939/**
1940 * mv6_reset_hc - Perform the 6xxx global soft reset
1941 * @mmio: base address of the HBA
1942 *
1943 * This routine only applies to 6xxx parts.
1944 *
1945 * LOCKING:
1946 * Inherited from caller.
1947 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001948static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1949 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001950{
1951 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1952 int i, rc = 0;
1953 u32 t;
1954
1955 /* Following procedure defined in PCI "main command and status
1956 * register" table.
1957 */
1958 t = readl(reg);
1959 writel(t | STOP_PCI_MASTER, reg);
1960
1961 for (i = 0; i < 1000; i++) {
1962 udelay(1);
1963 t = readl(reg);
1964 if (PCI_MASTER_EMPTY & t) {
1965 break;
1966 }
1967 }
1968 if (!(PCI_MASTER_EMPTY & t)) {
1969 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1970 rc = 1;
1971 goto done;
1972 }
1973
1974 /* set reset */
1975 i = 5;
1976 do {
1977 writel(t | GLOB_SFT_RST, reg);
1978 t = readl(reg);
1979 udelay(1);
1980 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1981
1982 if (!(GLOB_SFT_RST & t)) {
1983 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1984 rc = 1;
1985 goto done;
1986 }
1987
1988 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1989 i = 5;
1990 do {
1991 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1992 t = readl(reg);
1993 udelay(1);
1994 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1995
1996 if (GLOB_SFT_RST & t) {
1997 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1998 rc = 1;
1999 }
2000done:
2001 return rc;
2002}
2003
Jeff Garzik47c2b672005-11-12 21:13:17 -05002004static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002005 void __iomem *mmio)
2006{
2007 void __iomem *port_mmio;
2008 u32 tmp;
2009
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002010 tmp = readl(mmio + MV_RESET_CFG);
2011 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002012 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002013 hpriv->signal[idx].pre = 0x1 << 5;
2014 return;
2015 }
2016
2017 port_mmio = mv_port_base(mmio, idx);
2018 tmp = readl(port_mmio + PHY_MODE2);
2019
2020 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2021 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2022}
2023
Jeff Garzik47c2b672005-11-12 21:13:17 -05002024static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002025{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002026 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002027}
2028
Jeff Garzikc9d39132005-11-13 17:47:51 -05002029static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002030 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002031{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002032 void __iomem *port_mmio = mv_port_base(mmio, port);
2033
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002034 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002035 int fix_phy_mode2 =
2036 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002037 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002038 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2039 u32 m2, tmp;
2040
2041 if (fix_phy_mode2) {
2042 m2 = readl(port_mmio + PHY_MODE2);
2043 m2 &= ~(1 << 16);
2044 m2 |= (1 << 31);
2045 writel(m2, port_mmio + PHY_MODE2);
2046
2047 udelay(200);
2048
2049 m2 = readl(port_mmio + PHY_MODE2);
2050 m2 &= ~((1 << 16) | (1 << 31));
2051 writel(m2, port_mmio + PHY_MODE2);
2052
2053 udelay(200);
2054 }
2055
2056 /* who knows what this magic does */
2057 tmp = readl(port_mmio + PHY_MODE3);
2058 tmp &= ~0x7F800000;
2059 tmp |= 0x2A800000;
2060 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002061
2062 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002063 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002064
2065 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002066
2067 if (hp_flags & MV_HP_ERRATA_60X1B2)
2068 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002069
2070 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2071
2072 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002073
2074 if (hp_flags & MV_HP_ERRATA_60X1B2)
2075 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002076 }
2077
2078 /* Revert values of pre-emphasis and signal amps to the saved ones */
2079 m2 = readl(port_mmio + PHY_MODE2);
2080
2081 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002082 m2 |= hpriv->signal[port].amps;
2083 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002084 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002085
Jeff Garzike4e7b892006-01-31 12:18:41 -05002086 /* according to mvSata 3.6.1, some IIE values are fixed */
2087 if (IS_GEN_IIE(hpriv)) {
2088 m2 &= ~0xC30FF01F;
2089 m2 |= 0x0000900F;
2090 }
2091
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002092 writel(m2, port_mmio + PHY_MODE2);
2093}
2094
Jeff Garzikc9d39132005-11-13 17:47:51 -05002095static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2096 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002097{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002098 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002099
Brett Russ31961942005-09-30 01:36:00 -04002100 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002101
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002102 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002103 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002104 ifctl |= (1 << 7); /* enable gen2i speed */
2105 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002106 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2107 }
2108
Brett Russ20f733e2005-09-01 18:26:17 -04002109 udelay(25); /* allow reset propagation */
2110
2111 /* Spec never mentions clearing the bit. Marvell's driver does
2112 * clear the bit, however.
2113 */
Brett Russ31961942005-09-30 01:36:00 -04002114 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002115
Jeff Garzikc9d39132005-11-13 17:47:51 -05002116 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2117
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002118 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002119 mdelay(1);
2120}
2121
Jeff Garzikc9d39132005-11-13 17:47:51 -05002122/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002123 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002124 * @ap: ATA channel to manipulate
2125 *
2126 * Part of this is taken from __sata_phy_reset and modified to
2127 * not sleep since this routine gets called from interrupt level.
2128 *
2129 * LOCKING:
2130 * Inherited from caller. This is coded to safe to call at
2131 * interrupt level, i.e. it does not sleep.
2132 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002133static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2134 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002135{
2136 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002137 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002138 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002139 int retry = 5;
2140 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002141
2142 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002143
Jeff Garzik095fec82005-11-12 09:50:49 -05002144 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002145 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2146 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04002147
Jeff Garzik22374672005-11-17 10:59:48 -05002148 /* Issue COMRESET via SControl */
2149comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09002150 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002151 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002152
Tejun Heo81952c52006-05-15 20:57:47 +09002153 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002154 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002155
Brett Russ31961942005-09-30 01:36:00 -04002156 do {
Tejun Heo81952c52006-05-15 20:57:47 +09002157 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002158 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002159 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002160
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002161 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002162 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002163
Jeff Garzik22374672005-11-17 10:59:48 -05002164 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002165 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002166 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2167 (retry-- > 0))
2168 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002169
2170 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002171 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2172 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2173
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002174 if (ata_port_offline(ap)) {
2175 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002176 return;
2177 }
2178
Jeff Garzik22374672005-11-17 10:59:48 -05002179 /* even after SStatus reflects that device is ready,
2180 * it seems to take a while for link to be fully
2181 * established (and thus Status no longer 0x80/0x7F),
2182 * so we poll a bit for that, here.
2183 */
2184 retry = 20;
2185 while (1) {
2186 u8 drv_stat = ata_check_status(ap);
2187 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2188 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002189 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002190 if (retry-- <= 0)
2191 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002192 if (time_after(jiffies, deadline))
2193 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002194 }
2195
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002196 /* FIXME: if we passed the deadline, the following
2197 * code probably produces an invalid result
2198 */
Brett Russ20f733e2005-09-01 18:26:17 -04002199
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002200 /* finally, read device signature from TF registers */
2201 *class = ata_dev_try_classify(ap, 0, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002202
2203 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2204
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002205 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002206
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002207 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002208}
2209
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002210static int mv_prereset(struct ata_port *ap, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002211{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002212 struct mv_port_priv *pp = ap->private_data;
2213 struct ata_eh_context *ehc = &ap->eh_context;
2214 int rc;
2215
2216 rc = mv_stop_dma(ap);
2217 if (rc)
2218 ehc->i.action |= ATA_EH_HARDRESET;
2219
2220 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2221 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2222 ehc->i.action |= ATA_EH_HARDRESET;
2223 }
2224
2225 /* if we're about to do hardreset, nothing more to do */
2226 if (ehc->i.action & ATA_EH_HARDRESET)
2227 return 0;
2228
2229 if (ata_port_online(ap))
2230 rc = ata_wait_ready(ap, deadline);
2231 else
2232 rc = -ENODEV;
2233
2234 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002235}
2236
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002237static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2238 unsigned long deadline)
2239{
2240 struct mv_host_priv *hpriv = ap->host->private_data;
2241 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2242
2243 mv_stop_dma(ap);
2244
2245 mv_channel_reset(hpriv, mmio, ap->port_no);
2246
2247 mv_phy_reset(ap, class, deadline);
2248
2249 return 0;
2250}
2251
2252static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2253{
2254 u32 serr;
2255
2256 /* print link status */
2257 sata_print_link_status(ap);
2258
2259 /* clear SError */
2260 sata_scr_read(ap, SCR_ERROR, &serr);
2261 sata_scr_write_flush(ap, SCR_ERROR, serr);
2262
2263 /* bail out if no device is present */
2264 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2265 DPRINTK("EXIT, no device\n");
2266 return;
2267 }
2268
2269 /* set up device control */
2270 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2271}
2272
2273static void mv_error_handler(struct ata_port *ap)
2274{
2275 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2276 mv_hardreset, mv_postreset);
2277}
2278
2279static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2280{
2281 mv_stop_dma(qc->ap);
2282}
2283
2284static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002285{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002286 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002287 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2288 u32 tmp, mask;
2289 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002290
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002291 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002292
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002293 shift = ap->port_no * 2;
2294 if (hc > 0)
2295 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002296
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002297 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002298
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002299 /* disable assertion of portN err, done events */
2300 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2301 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2302}
2303
2304static void mv_eh_thaw(struct ata_port *ap)
2305{
2306 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2307 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2308 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2309 void __iomem *port_mmio = mv_ap_base(ap);
2310 u32 tmp, mask, hc_irq_cause;
2311 unsigned int shift, hc_port_no = ap->port_no;
2312
2313 /* FIXME: handle coalescing completion events properly */
2314
2315 shift = ap->port_no * 2;
2316 if (hc > 0) {
2317 shift++;
2318 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002319 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320
2321 mask = 0x3 << shift;
2322
2323 /* clear EDMA errors on this port */
2324 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2325
2326 /* clear pending irq events */
2327 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2328 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2329 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2330 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2331
2332 /* enable assertion of portN err, done events */
2333 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2334 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002335}
2336
Brett Russ05b308e2005-10-05 17:08:53 -04002337/**
2338 * mv_port_init - Perform some early initialization on a single port.
2339 * @port: libata data structure storing shadow register addresses
2340 * @port_mmio: base address of the port
2341 *
2342 * Initialize shadow register mmio addresses, clear outstanding
2343 * interrupts on the port, and unmask interrupts for the future
2344 * start of the port.
2345 *
2346 * LOCKING:
2347 * Inherited from caller.
2348 */
Brett Russ31961942005-09-30 01:36:00 -04002349static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2350{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002351 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002352 unsigned serr_ofs;
2353
Jeff Garzik8b260242005-11-12 12:32:50 -05002354 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002355 */
2356 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002357 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002358 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2359 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2360 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2361 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2362 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2363 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002364 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002365 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2366 /* special case: control/altstatus doesn't have ATA_REG_ address */
2367 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2368
2369 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002370 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002371
Brett Russ31961942005-09-30 01:36:00 -04002372 /* Clear any currently outstanding port interrupt conditions */
2373 serr_ofs = mv_scr_offset(SCR_ERROR);
2374 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2375 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2376
Brett Russ20f733e2005-09-01 18:26:17 -04002377 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002378 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002379
Jeff Garzik8b260242005-11-12 12:32:50 -05002380 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002381 readl(port_mmio + EDMA_CFG_OFS),
2382 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2383 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002384}
2385
Tejun Heo4447d352007-04-17 23:44:08 +09002386static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002387{
Tejun Heo4447d352007-04-17 23:44:08 +09002388 struct pci_dev *pdev = to_pci_dev(host->dev);
2389 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002390 u8 rev_id;
2391 u32 hp_flags = hpriv->hp_flags;
2392
2393 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2394
2395 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002396 case chip_5080:
2397 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002398 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002399
Jeff Garzik47c2b672005-11-12 21:13:17 -05002400 switch (rev_id) {
2401 case 0x1:
2402 hp_flags |= MV_HP_ERRATA_50XXB0;
2403 break;
2404 case 0x3:
2405 hp_flags |= MV_HP_ERRATA_50XXB2;
2406 break;
2407 default:
2408 dev_printk(KERN_WARNING, &pdev->dev,
2409 "Applying 50XXB2 workarounds to unknown rev\n");
2410 hp_flags |= MV_HP_ERRATA_50XXB2;
2411 break;
2412 }
2413 break;
2414
2415 case chip_504x:
2416 case chip_508x:
2417 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002418 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002419
2420 switch (rev_id) {
2421 case 0x0:
2422 hp_flags |= MV_HP_ERRATA_50XXB0;
2423 break;
2424 case 0x3:
2425 hp_flags |= MV_HP_ERRATA_50XXB2;
2426 break;
2427 default:
2428 dev_printk(KERN_WARNING, &pdev->dev,
2429 "Applying B2 workarounds to unknown rev\n");
2430 hp_flags |= MV_HP_ERRATA_50XXB2;
2431 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002432 }
2433 break;
2434
2435 case chip_604x:
2436 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002437 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002438 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002439
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002440 switch (rev_id) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002441 case 0x7:
2442 hp_flags |= MV_HP_ERRATA_60X1B2;
2443 break;
2444 case 0x9:
2445 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002446 break;
2447 default:
2448 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002449 "Applying B2 workarounds to unknown rev\n");
2450 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002451 break;
2452 }
2453 break;
2454
Jeff Garzike4e7b892006-01-31 12:18:41 -05002455 case chip_7042:
2456 case chip_6042:
2457 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002458 hp_flags |= MV_HP_GEN_IIE;
2459
2460 switch (rev_id) {
2461 case 0x0:
2462 hp_flags |= MV_HP_ERRATA_XX42A0;
2463 break;
2464 case 0x1:
2465 hp_flags |= MV_HP_ERRATA_60X1C0;
2466 break;
2467 default:
2468 dev_printk(KERN_WARNING, &pdev->dev,
2469 "Applying 60X1C0 workarounds to unknown rev\n");
2470 hp_flags |= MV_HP_ERRATA_60X1C0;
2471 break;
2472 }
2473 break;
2474
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002475 default:
2476 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2477 return 1;
2478 }
2479
2480 hpriv->hp_flags = hp_flags;
2481
2482 return 0;
2483}
2484
Brett Russ05b308e2005-10-05 17:08:53 -04002485/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002486 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002487 * @host: ATA host to initialize
2488 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002489 *
2490 * If possible, do an early global reset of the host. Then do
2491 * our port init and clear/unmask all/relevant host interrupts.
2492 *
2493 * LOCKING:
2494 * Inherited from caller.
2495 */
Tejun Heo4447d352007-04-17 23:44:08 +09002496static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002497{
2498 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002499 struct pci_dev *pdev = to_pci_dev(host->dev);
2500 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2501 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002502
Jeff Garzik47c2b672005-11-12 21:13:17 -05002503 /* global interrupt mask */
2504 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2505
Tejun Heo4447d352007-04-17 23:44:08 +09002506 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002507 if (rc)
2508 goto done;
2509
Tejun Heo4447d352007-04-17 23:44:08 +09002510 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002511
Tejun Heo4447d352007-04-17 23:44:08 +09002512 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002513 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002514
Jeff Garzikc9d39132005-11-13 17:47:51 -05002515 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002516 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002517 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002518
Jeff Garzik522479f2005-11-12 22:14:02 -05002519 hpriv->ops->reset_flash(hpriv, mmio);
2520 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002521 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002522
Tejun Heo4447d352007-04-17 23:44:08 +09002523 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002524 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002525 void __iomem *port_mmio = mv_port_base(mmio, port);
2526
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002527 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002528 ifctl |= (1 << 7); /* enable gen2i speed */
2529 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002530 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2531 }
2532
Jeff Garzikc9d39132005-11-13 17:47:51 -05002533 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002534 }
2535
Tejun Heo4447d352007-04-17 23:44:08 +09002536 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002537 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002538 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002539 }
2540
2541 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002542 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2543
2544 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2545 "(before clear)=0x%08x\n", hc,
2546 readl(hc_mmio + HC_CFG_OFS),
2547 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2548
2549 /* Clear any currently outstanding hc interrupt conditions */
2550 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002551 }
2552
Brett Russ31961942005-09-30 01:36:00 -04002553 /* Clear any currently outstanding host interrupt conditions */
2554 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2555
2556 /* and unmask interrupt generation for host regs */
2557 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002558
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002559 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002560 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2561 else
2562 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002563
2564 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002565 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002566 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2567 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2568 readl(mmio + PCI_IRQ_CAUSE_OFS),
2569 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002570
Brett Russ31961942005-09-30 01:36:00 -04002571done:
Brett Russ20f733e2005-09-01 18:26:17 -04002572 return rc;
2573}
2574
Brett Russ05b308e2005-10-05 17:08:53 -04002575/**
2576 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002577 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002578 *
2579 * FIXME: complete this.
2580 *
2581 * LOCKING:
2582 * Inherited from caller.
2583 */
Tejun Heo4447d352007-04-17 23:44:08 +09002584static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002585{
Tejun Heo4447d352007-04-17 23:44:08 +09002586 struct pci_dev *pdev = to_pci_dev(host->dev);
2587 struct mv_host_priv *hpriv = host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04002588 u8 rev_id, scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002589 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002590
2591 /* Use this to determine the HW stepping of the chip so we know
2592 * what errata to workaround
2593 */
2594 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2595
2596 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2597 if (scc == 0)
2598 scc_s = "SCSI";
2599 else if (scc == 0x01)
2600 scc_s = "RAID";
2601 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002602 scc_s = "?";
2603
2604 if (IS_GEN_I(hpriv))
2605 gen = "I";
2606 else if (IS_GEN_II(hpriv))
2607 gen = "II";
2608 else if (IS_GEN_IIE(hpriv))
2609 gen = "IIE";
2610 else
2611 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002612
Jeff Garzika9524a72005-10-30 14:39:11 -05002613 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002614 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2615 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002616 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2617}
2618
Brett Russ05b308e2005-10-05 17:08:53 -04002619/**
2620 * mv_init_one - handle a positive probe of a Marvell host
2621 * @pdev: PCI device found
2622 * @ent: PCI device ID entry for the matched host
2623 *
2624 * LOCKING:
2625 * Inherited from caller.
2626 */
Brett Russ20f733e2005-09-01 18:26:17 -04002627static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2628{
2629 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002630 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002631 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2632 struct ata_host *host;
2633 struct mv_host_priv *hpriv;
2634 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002635
Jeff Garzika9524a72005-10-30 14:39:11 -05002636 if (!printed_version++)
2637 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002638
Tejun Heo4447d352007-04-17 23:44:08 +09002639 /* allocate host */
2640 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2641
2642 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2643 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2644 if (!host || !hpriv)
2645 return -ENOMEM;
2646 host->private_data = hpriv;
2647
2648 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002649 rc = pcim_enable_device(pdev);
2650 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002651 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002652
Tejun Heo0d5ff562007-02-01 15:06:36 +09002653 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2654 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002655 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002656 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002657 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002658 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002659
Jeff Garzikd88184f2007-02-26 01:26:06 -05002660 rc = pci_go_64(pdev);
2661 if (rc)
2662 return rc;
2663
Brett Russ20f733e2005-09-01 18:26:17 -04002664 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002665 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002666 if (rc)
2667 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002668
Brett Russ31961942005-09-30 01:36:00 -04002669 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002670 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002671 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002672
Brett Russ31961942005-09-30 01:36:00 -04002673 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002674 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002675
Tejun Heo4447d352007-04-17 23:44:08 +09002676 pci_set_master(pdev);
Jeff Garzik4537deb2007-07-12 14:30:19 -04002677 pci_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002678 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002679 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002680}
2681
2682static int __init mv_init(void)
2683{
Pavel Roskinb7887192006-08-10 18:13:18 +09002684 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002685}
2686
2687static void __exit mv_exit(void)
2688{
2689 pci_unregister_driver(&mv_pci_driver);
2690}
2691
2692MODULE_AUTHOR("Brett Russ");
2693MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2694MODULE_LICENSE("GPL");
2695MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2696MODULE_VERSION(DRV_VERSION);
2697
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002698module_param(msi, int, 0444);
2699MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2700
Brett Russ20f733e2005-09-01 18:26:17 -04002701module_init(mv_init);
2702module_exit(mv_exit);