blob: 1a82e22b3efda91ba02e927031341cc73ca36011 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073
74#define DRV_NAME "sata_mv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040075#define DRV_VERSION "1.0"
Brett Russ20f733e2005-09-01 18:26:17 -040076
77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85
86 MV_PCI_REG_BASE = 0,
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040088 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93
Brett Russ20f733e2005-09-01 18:26:17 -040094 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050095 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050096 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040098
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103
Brett Russ31961942005-09-30 01:36:00 -0400104 MV_MAX_Q_DEPTH = 32,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
114 MV_MAX_SG_CT = 176,
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
117
Brett Russ20f733e2005-09-01 18:26:17 -0400118 MV_PORTS_PER_HC = 4,
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400122 MV_PORT_MASK = 3,
123
124 /* Host Flags */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400131
Brett Russ31961942005-09-30 01:36:00 -0400132 CRQB_FLAG_READ = (1 << 0),
133 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
139
140 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400143
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
145
Brett Russ20f733e2005-09-01 18:26:17 -0400146 /* PCI interface registers */
147
Brett Russ31961942005-09-30 01:36:00 -0400148 PCI_COMMAND_OFS = 0xc00,
149
Brett Russ20f733e2005-09-01 18:26:17 -0400150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
154
Jeff Garzik522479f2005-11-12 22:14:02 -0500155 MV_PCI_MODE = 0xd00,
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
165
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
176 PCI_ERR = (1 << 18),
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
189 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
191 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400192
193 /* SATAHC registers */
194 HC_CFG_OFS = 0,
195
196 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
200
201 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400202 SHD_BLK_OFS = 0x100,
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400204
205 /* SATA registers */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500208 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500209 PHY_MODE4 = 0x314,
210 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500211 MV5_PHY_MODE = 0x74,
212 MV5_LT_MODE = 0x30,
213 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500214 SATA_INTERFACE_CTL = 0x050,
215
216 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400217
218 /* Port registers */
219 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400225
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Brett Russ20f733e2005-09-01 18:26:17 -0400243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
251 EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON |
253 EDMA_ERR_DEV_CON |
254 EDMA_ERR_SERR |
255 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400257 EDMA_ERR_CRPB_PAR |
258 EDMA_ERR_INTRL_PAR |
259 EDMA_ERR_IORDY |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
265 EDMA_ERR_PRD_PAR |
266 EDMA_ERR_DEV_DCON |
267 EDMA_ERR_DEV_CON |
268 EDMA_ERR_OVERRUN_5 |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400271 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400272 EDMA_ERR_CRPB_PAR |
273 EDMA_ERR_INTRL_PAR |
274 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400275
Brett Russ31961942005-09-30 01:36:00 -0400276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400278
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
281
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400285 EDMA_RSP_Q_PTR_SHIFT = 3,
286
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400291
Jeff Garzikc9d39132005-11-13 17:47:51 -0500292 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500293 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294
Brett Russ31961942005-09-30 01:36:00 -0400295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500301 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400305
Brett Russ31961942005-09-30 01:36:00 -0400306 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400309};
310
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400311#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500313#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500314
Jeff Garzik095fec82005-11-12 09:50:49 -0500315enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500316 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500317
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
320 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400323 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
325};
326
Jeff Garzik522479f2005-11-12 22:14:02 -0500327enum chip_type {
328 chip_504x,
329 chip_508x,
330 chip_5080,
331 chip_604x,
332 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500333 chip_6042,
334 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500335};
336
Brett Russ31961942005-09-30 01:36:00 -0400337/* Command ReQuest Block: 32B */
338struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400339 __le32 sg_addr;
340 __le32 sg_addr_hi;
341 __le16 ctrl_flags;
342 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400343};
344
Jeff Garzike4e7b892006-01-31 12:18:41 -0500345struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400346 __le32 addr;
347 __le32 addr_hi;
348 __le32 flags;
349 __le32 len;
350 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500351};
352
Brett Russ31961942005-09-30 01:36:00 -0400353/* Command ResPonse Block: 8B */
354struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400355 __le16 id;
356 __le16 flags;
357 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400358};
359
360/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
361struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400362 __le32 addr;
363 __le32 flags_size;
364 __le32 addr_hi;
365 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400366};
367
368struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400369 struct mv_crqb *crqb;
370 dma_addr_t crqb_dma;
371 struct mv_crpb *crpb;
372 dma_addr_t crpb_dma;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400375
376 unsigned int req_idx;
377 unsigned int resp_idx;
378
Brett Russ31961942005-09-30 01:36:00 -0400379 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400380};
381
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500382struct mv_port_signal {
383 u32 amps;
384 u32 pre;
385};
386
Jeff Garzik47c2b672005-11-12 21:13:17 -0500387struct mv_host_priv;
388struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500398};
399
Brett Russ20f733e2005-09-01 18:26:17 -0400400struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400401 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500402 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500403 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400404};
405
406static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900407static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400411static int mv_port_start(struct ata_port *ap);
412static void mv_port_stop(struct ata_port *ap);
413static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500414static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900415static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400416static void mv_error_handler(struct ata_port *ap);
417static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418static void mv_eh_freeze(struct ata_port *ap);
419static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400420static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500422static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500424static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500427static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500429static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500431
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500432static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500434static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500437static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500439static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500441static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500443
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400444static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400445 .module = THIS_MODULE,
446 .name = DRV_NAME,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
454 .use_clustering = 1,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
460};
461
462static struct scsi_host_template mv6_sht = {
463 .module = THIS_MODULE,
464 .name = DRV_NAME,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400468 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500469 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500472 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900476 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400477 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400478};
479
Jeff Garzikc9d39132005-11-13 17:47:51 -0500480static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
482
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
488
Jeff Garzikcffacd82007-03-09 09:46:47 -0500489 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500490
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900493 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500494
Jeff Garzikc9d39132005-11-13 17:47:51 -0500495 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500498
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
502 .thaw = mv_eh_thaw,
503
Jeff Garzikc9d39132005-11-13 17:47:51 -0500504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
506
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509};
510
511static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400512 .port_disable = ata_port_disable,
513
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
Jeff Garzikcffacd82007-03-09 09:46:47 -0500520 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400521
Brett Russ31961942005-09-30 01:36:00 -0400522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900524 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400525
Brett Russ20f733e2005-09-01 18:26:17 -0400526 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400529
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
533 .thaw = mv_eh_thaw,
534
Brett Russ20f733e2005-09-01 18:26:17 -0400535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
537
Brett Russ31961942005-09-30 01:36:00 -0400538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400540};
541
Jeff Garzike4e7b892006-01-31 12:18:41 -0500542static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
544
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
550
Jeff Garzikcffacd82007-03-09 09:46:47 -0500551 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500552
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900555 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500556
Jeff Garzike4e7b892006-01-31 12:18:41 -0500557 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500560
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
564 .thaw = mv_eh_thaw,
565
Jeff Garzike4e7b892006-01-31 12:18:41 -0500566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500571};
572
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100573static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400574 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400575 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400576 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400577 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400579 },
580 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400582 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400583 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500584 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400585 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500586 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500588 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400589 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500590 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500591 },
Brett Russ20f733e2005-09-01 18:26:17 -0400592 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400594 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400595 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500596 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400597 },
598 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400601 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400602 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500603 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400604 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500605 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500607 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400608 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609 .port_ops = &mv_iie_ops,
610 },
611 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500613 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400614 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500615 .port_ops = &mv_iie_ops,
616 },
Brett Russ20f733e2005-09-01 18:26:17 -0400617};
618
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500619static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100624 /* RocketRAID 1740/174x have different identifiers */
625 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
626 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400627
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400628 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
629 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
630 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
631 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
632 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500633
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400634 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
635
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200636 /* Adaptec 1430SA */
637 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
638
Olof Johanssone93f09d2007-01-18 18:39:59 -0600639 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
640
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800641 /* add Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400644 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400645};
646
647static struct pci_driver mv_pci_driver = {
648 .name = DRV_NAME,
649 .id_table = mv_pci_tbl,
650 .probe = mv_init_one,
651 .remove = ata_pci_remove_one,
652};
653
Jeff Garzik47c2b672005-11-12 21:13:17 -0500654static const struct mv_hw_ops mv5xxx_ops = {
655 .phy_errata = mv5_phy_errata,
656 .enable_leds = mv5_enable_leds,
657 .read_preamp = mv5_read_preamp,
658 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500659 .reset_flash = mv5_reset_flash,
660 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500661};
662
663static const struct mv_hw_ops mv6xxx_ops = {
664 .phy_errata = mv6_phy_errata,
665 .enable_leds = mv6_enable_leds,
666 .read_preamp = mv6_read_preamp,
667 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500668 .reset_flash = mv6_reset_flash,
669 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500670};
671
Brett Russ20f733e2005-09-01 18:26:17 -0400672/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500673 * module options
674 */
675static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
676
677
Jeff Garzikd88184f2007-02-26 01:26:06 -0500678/* move to PCI layer or libata core? */
679static int pci_go_64(struct pci_dev *pdev)
680{
681 int rc;
682
683 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
684 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
685 if (rc) {
686 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
687 if (rc) {
688 dev_printk(KERN_ERR, &pdev->dev,
689 "64-bit DMA enable failed\n");
690 return rc;
691 }
692 }
693 } else {
694 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
695 if (rc) {
696 dev_printk(KERN_ERR, &pdev->dev,
697 "32-bit DMA enable failed\n");
698 return rc;
699 }
700 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
701 if (rc) {
702 dev_printk(KERN_ERR, &pdev->dev,
703 "32-bit consistent DMA enable failed\n");
704 return rc;
705 }
706 }
707
708 return rc;
709}
710
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500711/*
Brett Russ20f733e2005-09-01 18:26:17 -0400712 * Functions
713 */
714
715static inline void writelfl(unsigned long data, void __iomem *addr)
716{
717 writel(data, addr);
718 (void) readl(addr); /* flush to avoid PCI posted write */
719}
720
Brett Russ20f733e2005-09-01 18:26:17 -0400721static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
722{
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
724}
725
Jeff Garzikc9d39132005-11-13 17:47:51 -0500726static inline unsigned int mv_hc_from_port(unsigned int port)
727{
728 return port >> MV_PORT_HC_SHIFT;
729}
730
731static inline unsigned int mv_hardport_from_port(unsigned int port)
732{
733 return port & MV_PORT_MASK;
734}
735
736static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
737 unsigned int port)
738{
739 return mv_hc_base(base, mv_hc_from_port(port));
740}
741
Brett Russ20f733e2005-09-01 18:26:17 -0400742static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
743{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500744 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500745 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
749static inline void __iomem *mv_ap_base(struct ata_port *ap)
750{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900751 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400752}
753
Jeff Garzikcca39742006-08-24 03:19:22 -0400754static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400755{
Jeff Garzikcca39742006-08-24 03:19:22 -0400756 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400757}
758
759static void mv_irq_clear(struct ata_port *ap)
760{
761}
762
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400763static void mv_set_edma_ptrs(void __iomem *port_mmio,
764 struct mv_host_priv *hpriv,
765 struct mv_port_priv *pp)
766{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400767 u32 index;
768
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400769 /*
770 * initialize request queue
771 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400772 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
773
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400774 WARN_ON(pp->crqb_dma & 0x3ff);
775 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400776 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400777 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
778
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400780 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400781 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
782 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784
785 /*
786 * initialize response queue
787 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400788 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
789
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400790 WARN_ON(pp->crpb_dma & 0xff);
791 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
792
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400794 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400795 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
796 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400797 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400799 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400801}
802
Brett Russ05b308e2005-10-05 17:08:53 -0400803/**
804 * mv_start_dma - Enable eDMA engine
805 * @base: port base address
806 * @pp: port private data
807 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900808 * Verify the local cache of the eDMA state is accurate with a
809 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400810 *
811 * LOCKING:
812 * Inherited from caller.
813 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
815 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400816{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400818 /* clear EDMA event indicators, if any */
819 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
820
821 mv_set_edma_ptrs(base, hpriv, pp);
822
Brett Russafb0edd2005-10-05 17:08:42 -0400823 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
824 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
825 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900826 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400827}
828
Brett Russ05b308e2005-10-05 17:08:53 -0400829/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400830 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400831 * @ap: ATA channel to manipulate
832 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900833 * Verify the local cache of the eDMA state is accurate with a
834 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400835 *
836 * LOCKING:
837 * Inherited from caller.
838 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400839static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400840{
841 void __iomem *port_mmio = mv_ap_base(ap);
842 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400843 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400844 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400845
Jeff Garzik4537deb2007-07-12 14:30:19 -0400846 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400847 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400848 */
Brett Russ31961942005-09-30 01:36:00 -0400849 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
850 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400851 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900852 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400853 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500854
Brett Russ31961942005-09-30 01:36:00 -0400855 /* now properly wait for the eDMA to stop */
856 for (i = 1000; i > 0; i--) {
857 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400858 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400859 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400860
Brett Russ31961942005-09-30 01:36:00 -0400861 udelay(100);
862 }
863
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400864 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900865 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400866 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400867 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400868
869 return err;
Brett Russ31961942005-09-30 01:36:00 -0400870}
871
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400872static int mv_stop_dma(struct ata_port *ap)
873{
874 unsigned long flags;
875 int rc;
876
877 spin_lock_irqsave(&ap->host->lock, flags);
878 rc = __mv_stop_dma(ap);
879 spin_unlock_irqrestore(&ap->host->lock, flags);
880
881 return rc;
882}
883
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400884#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400885static void mv_dump_mem(void __iomem *start, unsigned bytes)
886{
Brett Russ31961942005-09-30 01:36:00 -0400887 int b, w;
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%p: ", start + b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 printk("%08x ",readl(start + b));
892 b += sizeof(u32);
893 }
894 printk("\n");
895 }
Brett Russ31961942005-09-30 01:36:00 -0400896}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400897#endif
898
Brett Russ31961942005-09-30 01:36:00 -0400899static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
900{
901#ifdef ATA_DEBUG
902 int b, w;
903 u32 dw;
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%02x: ", b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 (void) pci_read_config_dword(pdev,b,&dw);
908 printk("%08x ",dw);
909 b += sizeof(u32);
910 }
911 printk("\n");
912 }
913#endif
914}
915static void mv_dump_all_regs(void __iomem *mmio_base, int port,
916 struct pci_dev *pdev)
917{
918#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500919 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400920 port >> MV_PORT_HC_SHIFT);
921 void __iomem *port_base;
922 int start_port, num_ports, p, start_hc, num_hcs, hc;
923
924 if (0 > port) {
925 start_hc = start_port = 0;
926 num_ports = 8; /* shld be benign for 4 port devs */
927 num_hcs = 2;
928 } else {
929 start_hc = port >> MV_PORT_HC_SHIFT;
930 start_port = port;
931 num_ports = num_hcs = 1;
932 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500933 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400934 num_ports > 1 ? num_ports - 1 : start_port);
935
936 if (NULL != pdev) {
937 DPRINTK("PCI config space regs:\n");
938 mv_dump_pci_cfg(pdev, 0x68);
939 }
940 DPRINTK("PCI regs:\n");
941 mv_dump_mem(mmio_base+0xc00, 0x3c);
942 mv_dump_mem(mmio_base+0xd00, 0x34);
943 mv_dump_mem(mmio_base+0xf00, 0x4);
944 mv_dump_mem(mmio_base+0x1d00, 0x6c);
945 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700946 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400947 DPRINTK("HC regs (HC %i):\n", hc);
948 mv_dump_mem(hc_base, 0x1c);
949 }
950 for (p = start_port; p < start_port + num_ports; p++) {
951 port_base = mv_port_base(mmio_base, p);
952 DPRINTK("EDMA regs (port %i):\n",p);
953 mv_dump_mem(port_base, 0x54);
954 DPRINTK("SATA regs (port %i):\n",p);
955 mv_dump_mem(port_base+0x300, 0x60);
956 }
957#endif
958}
959
Brett Russ20f733e2005-09-01 18:26:17 -0400960static unsigned int mv_scr_offset(unsigned int sc_reg_in)
961{
962 unsigned int ofs;
963
964 switch (sc_reg_in) {
965 case SCR_STATUS:
966 case SCR_CONTROL:
967 case SCR_ERROR:
968 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
969 break;
970 case SCR_ACTIVE:
971 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
972 break;
973 default:
974 ofs = 0xffffffffU;
975 break;
976 }
977 return ofs;
978}
979
Tejun Heoda3dbb12007-07-16 14:29:40 +0900980static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400981{
982 unsigned int ofs = mv_scr_offset(sc_reg_in);
983
Tejun Heoda3dbb12007-07-16 14:29:40 +0900984 if (ofs != 0xffffffffU) {
985 *val = readl(mv_ap_base(ap) + ofs);
986 return 0;
987 } else
988 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400989}
990
Tejun Heoda3dbb12007-07-16 14:29:40 +0900991static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400992{
993 unsigned int ofs = mv_scr_offset(sc_reg_in);
994
Tejun Heoda3dbb12007-07-16 14:29:40 +0900995 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400996 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900997 return 0;
998 } else
999 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001000}
1001
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001002static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1003 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001004{
1005 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1006
1007 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001008 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001009
Jeff Garzike728eab2007-02-25 02:53:41 -05001010 if (IS_GEN_I(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001012 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -05001013 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001014
Jeff Garzike728eab2007-02-25 02:53:41 -05001015 else if (IS_GEN_II(hpriv)) {
1016 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001017 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -05001018 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1019 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001020
1021 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001022 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1023 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001024 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1025 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1027 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001028 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001029 }
1030
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032}
1033
Brett Russ05b308e2005-10-05 17:08:53 -04001034/**
1035 * mv_port_start - Port specific init/start routine.
1036 * @ap: ATA channel to manipulate
1037 *
1038 * Allocate and point to DMA memory, init port private memory,
1039 * zero indices.
1040 *
1041 * LOCKING:
1042 * Inherited from caller.
1043 */
Brett Russ31961942005-09-30 01:36:00 -04001044static int mv_port_start(struct ata_port *ap)
1045{
Jeff Garzikcca39742006-08-24 03:19:22 -04001046 struct device *dev = ap->host->dev;
1047 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001048 struct mv_port_priv *pp;
1049 void __iomem *port_mmio = mv_ap_base(ap);
1050 void *mem;
1051 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001052 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001053 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001054
Tejun Heo24dc5f32007-01-20 16:00:28 +09001055 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001056 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001057 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001058
Tejun Heo24dc5f32007-01-20 16:00:28 +09001059 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1060 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001061 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001062 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001063 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1064
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001065 rc = ata_pad_alloc(ap, dev);
1066 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001067 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001068
Jeff Garzik8b260242005-11-12 12:32:50 -05001069 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001070 * 32-slot command request table (CRQB), 32 bytes each in size
1071 */
1072 pp->crqb = mem;
1073 pp->crqb_dma = mem_dma;
1074 mem += MV_CRQB_Q_SZ;
1075 mem_dma += MV_CRQB_Q_SZ;
1076
Jeff Garzik8b260242005-11-12 12:32:50 -05001077 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001078 * 32-slot command response table (CRPB), 8 bytes each in size
1079 */
1080 pp->crpb = mem;
1081 pp->crpb_dma = mem_dma;
1082 mem += MV_CRPB_Q_SZ;
1083 mem_dma += MV_CRPB_Q_SZ;
1084
1085 /* Third item:
1086 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1087 */
1088 pp->sg_tbl = mem;
1089 pp->sg_tbl_dma = mem_dma;
1090
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001091 spin_lock_irqsave(&ap->host->lock, flags);
1092
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001093 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001094
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001095 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001096
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001097 spin_unlock_irqrestore(&ap->host->lock, flags);
1098
Brett Russ31961942005-09-30 01:36:00 -04001099 /* Don't turn on EDMA here...do it before DMA commands only. Else
1100 * we'll be unable to send non-data, PIO, etc due to restricted access
1101 * to shadow regs.
1102 */
1103 ap->private_data = pp;
1104 return 0;
1105}
1106
Brett Russ05b308e2005-10-05 17:08:53 -04001107/**
1108 * mv_port_stop - Port specific cleanup/stop routine.
1109 * @ap: ATA channel to manipulate
1110 *
1111 * Stop DMA, cleanup port memory.
1112 *
1113 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001114 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001115 */
Brett Russ31961942005-09-30 01:36:00 -04001116static void mv_port_stop(struct ata_port *ap)
1117{
Brett Russ31961942005-09-30 01:36:00 -04001118 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001119}
1120
Brett Russ05b308e2005-10-05 17:08:53 -04001121/**
1122 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1123 * @qc: queued command whose SG list to source from
1124 *
1125 * Populate the SG list and mark the last entry.
1126 *
1127 * LOCKING:
1128 * Inherited from caller.
1129 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001130static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001131{
1132 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001133 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001134 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001135 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001136
Jeff Garzikd88184f2007-02-26 01:26:06 -05001137 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001138 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001139 dma_addr_t addr = sg_dma_address(sg);
1140 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001141
Olof Johansson4007b492007-10-02 20:45:27 -05001142 while (sg_len) {
1143 u32 offset = addr & 0xffff;
1144 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001145
Olof Johansson4007b492007-10-02 20:45:27 -05001146 if ((offset + sg_len > 0x10000))
1147 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001148
Olof Johansson4007b492007-10-02 20:45:27 -05001149 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1150 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1151 mv_sg->flags_size = cpu_to_le32(len);
1152
1153 sg_len -= len;
1154 addr += len;
1155
1156 if (!sg_len && ata_sg_is_last(sg, qc))
1157 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1158
1159 mv_sg++;
1160 n_sg++;
1161 }
1162
Brett Russ31961942005-09-30 01:36:00 -04001163 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001164
1165 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001166}
1167
Mark Lorde1469872006-05-22 19:02:03 -04001168static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001169{
Mark Lord559eeda2006-05-19 16:40:15 -04001170 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001171 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001172 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001173}
1174
Brett Russ05b308e2005-10-05 17:08:53 -04001175/**
1176 * mv_qc_prep - Host specific command preparation.
1177 * @qc: queued command to prepare
1178 *
1179 * This routine simply redirects to the general purpose routine
1180 * if command is not DMA. Else, it handles prep of the CRQB
1181 * (command request block), does some sanity checking, and calls
1182 * the SG load routine.
1183 *
1184 * LOCKING:
1185 * Inherited from caller.
1186 */
Brett Russ31961942005-09-30 01:36:00 -04001187static void mv_qc_prep(struct ata_queued_cmd *qc)
1188{
1189 struct ata_port *ap = qc->ap;
1190 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001191 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001192 struct ata_taskfile *tf;
1193 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001194 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001195
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001196 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001197 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001198
Brett Russ31961942005-09-30 01:36:00 -04001199 /* Fill in command request block
1200 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001201 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001202 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001203 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001204 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001205 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001206
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001207 /* get current queue index from software */
1208 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001209
Mark Lorda6432432006-05-19 16:36:36 -04001210 pp->crqb[in_index].sg_addr =
1211 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1212 pp->crqb[in_index].sg_addr_hi =
1213 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1214 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1215
1216 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001217 tf = &qc->tf;
1218
1219 /* Sadly, the CRQB cannot accomodate all registers--there are
1220 * only 11 bytes...so we must pick and choose required
1221 * registers based on the command. So, we drop feature and
1222 * hob_feature for [RW] DMA commands, but they are needed for
1223 * NCQ. NCQ will drop hob_nsect.
1224 */
1225 switch (tf->command) {
1226 case ATA_CMD_READ:
1227 case ATA_CMD_READ_EXT:
1228 case ATA_CMD_WRITE:
1229 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001230 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001231 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1232 break;
1233#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1234 case ATA_CMD_FPDMA_READ:
1235 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001236 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001237 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1238 break;
1239#endif /* FIXME: remove this line when NCQ added */
1240 default:
1241 /* The only other commands EDMA supports in non-queued and
1242 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1243 * of which are defined/used by Linux. If we get here, this
1244 * driver needs work.
1245 *
1246 * FIXME: modify libata to give qc_prep a return value and
1247 * return error here.
1248 */
1249 BUG_ON(tf->command);
1250 break;
1251 }
1252 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1253 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1254 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1255 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1256 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1257 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1258 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1259 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1260 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1261
Jeff Garzike4e7b892006-01-31 12:18:41 -05001262 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001263 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001264 mv_fill_sg(qc);
1265}
1266
1267/**
1268 * mv_qc_prep_iie - Host specific command preparation.
1269 * @qc: queued command to prepare
1270 *
1271 * This routine simply redirects to the general purpose routine
1272 * if command is not DMA. Else, it handles prep of the CRQB
1273 * (command request block), does some sanity checking, and calls
1274 * the SG load routine.
1275 *
1276 * LOCKING:
1277 * Inherited from caller.
1278 */
1279static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1280{
1281 struct ata_port *ap = qc->ap;
1282 struct mv_port_priv *pp = ap->private_data;
1283 struct mv_crqb_iie *crqb;
1284 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001285 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001286 u32 flags = 0;
1287
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001288 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001289 return;
1290
Jeff Garzike4e7b892006-01-31 12:18:41 -05001291 /* Fill in Gen IIE command request block
1292 */
1293 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1294 flags |= CRQB_FLAG_READ;
1295
Tejun Heobeec7db2006-02-11 19:11:13 +09001296 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001297 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001298 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001299 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001300
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001301 /* get current queue index from software */
1302 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001303
1304 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001305 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1306 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1307 crqb->flags = cpu_to_le32(flags);
1308
1309 tf = &qc->tf;
1310 crqb->ata_cmd[0] = cpu_to_le32(
1311 (tf->command << 16) |
1312 (tf->feature << 24)
1313 );
1314 crqb->ata_cmd[1] = cpu_to_le32(
1315 (tf->lbal << 0) |
1316 (tf->lbam << 8) |
1317 (tf->lbah << 16) |
1318 (tf->device << 24)
1319 );
1320 crqb->ata_cmd[2] = cpu_to_le32(
1321 (tf->hob_lbal << 0) |
1322 (tf->hob_lbam << 8) |
1323 (tf->hob_lbah << 16) |
1324 (tf->hob_feature << 24)
1325 );
1326 crqb->ata_cmd[3] = cpu_to_le32(
1327 (tf->nsect << 0) |
1328 (tf->hob_nsect << 8)
1329 );
1330
1331 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1332 return;
Brett Russ31961942005-09-30 01:36:00 -04001333 mv_fill_sg(qc);
1334}
1335
Brett Russ05b308e2005-10-05 17:08:53 -04001336/**
1337 * mv_qc_issue - Initiate a command to the host
1338 * @qc: queued command to start
1339 *
1340 * This routine simply redirects to the general purpose routine
1341 * if command is not DMA. Else, it sanity checks our local
1342 * caches of the request producer/consumer indices then enables
1343 * DMA and bumps the request producer index.
1344 *
1345 * LOCKING:
1346 * Inherited from caller.
1347 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001348static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001349{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001350 struct ata_port *ap = qc->ap;
1351 void __iomem *port_mmio = mv_ap_base(ap);
1352 struct mv_port_priv *pp = ap->private_data;
1353 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001354 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001355
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001356 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001357 /* We're about to send a non-EDMA capable command to the
1358 * port. Turn off EDMA so there won't be problems accessing
1359 * shadow block, etc registers.
1360 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001361 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001362 return ata_qc_issue_prot(qc);
1363 }
1364
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001365 mv_start_dma(port_mmio, hpriv, pp);
1366
1367 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001368
Brett Russ31961942005-09-30 01:36:00 -04001369 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001370 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1371 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001372
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001373 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001374
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001375 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001376
1377 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001378 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1379 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001380
1381 return 0;
1382}
1383
Brett Russ05b308e2005-10-05 17:08:53 -04001384/**
Brett Russ05b308e2005-10-05 17:08:53 -04001385 * mv_err_intr - Handle error interrupts on the port
1386 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001387 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001388 *
1389 * In most cases, just clear the interrupt and move on. However,
1390 * some cases require an eDMA reset, which is done right before
1391 * the COMRESET in mv_phy_reset(). The SERR case requires a
1392 * clear of pending errors in the SATA SERROR register. Finally,
1393 * if the port disabled DMA, update our cached copy to match.
1394 *
1395 * LOCKING:
1396 * Inherited from caller.
1397 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001398static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001399{
Brett Russ31961942005-09-30 01:36:00 -04001400 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1402 struct mv_port_priv *pp = ap->private_data;
1403 struct mv_host_priv *hpriv = ap->host->private_data;
1404 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1405 unsigned int action = 0, err_mask = 0;
1406 struct ata_eh_info *ehi = &ap->eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001407
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001408 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001409
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001410 if (!edma_enabled) {
1411 /* just a guess: do we need to do this? should we
1412 * expand this, and do it in all cases?
1413 */
Tejun Heo81952c52006-05-15 20:57:47 +09001414 sata_scr_read(ap, SCR_ERROR, &serr);
1415 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001416 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001417
1418 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1419
1420 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1421
1422 /*
1423 * all generations share these EDMA error cause bits
1424 */
1425
1426 if (edma_err_cause & EDMA_ERR_DEV)
1427 err_mask |= AC_ERR_DEV;
1428 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001429 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 EDMA_ERR_INTRL_PAR)) {
1431 err_mask |= AC_ERR_ATA_BUS;
1432 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001433 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001434 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001435 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1436 ata_ehi_hotplugged(ehi);
1437 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001438 "dev disconnect" : "dev connect");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001439 }
1440
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001441 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001442 eh_freeze_mask = EDMA_EH_FREEZE_5;
1443
1444 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1445 struct mv_port_priv *pp = ap->private_data;
1446 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001447 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001448 }
1449 } else {
1450 eh_freeze_mask = EDMA_EH_FREEZE;
1451
1452 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1453 struct mv_port_priv *pp = ap->private_data;
1454 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001455 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001456 }
1457
1458 if (edma_err_cause & EDMA_ERR_SERR) {
1459 sata_scr_read(ap, SCR_ERROR, &serr);
1460 sata_scr_write_flush(ap, SCR_ERROR, serr);
1461 err_mask = AC_ERR_ATA_BUS;
1462 action |= ATA_EH_HARDRESET;
1463 }
1464 }
Brett Russ20f733e2005-09-01 18:26:17 -04001465
1466 /* Clear EDMA now that SERR cleanup done */
1467 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1468
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001469 if (!err_mask) {
1470 err_mask = AC_ERR_OTHER;
1471 action |= ATA_EH_HARDRESET;
1472 }
1473
1474 ehi->serror |= serr;
1475 ehi->action |= action;
1476
1477 if (qc)
1478 qc->err_mask |= err_mask;
1479 else
1480 ehi->err_mask |= err_mask;
1481
1482 if (edma_err_cause & eh_freeze_mask)
1483 ata_port_freeze(ap);
1484 else
1485 ata_port_abort(ap);
1486}
1487
1488static void mv_intr_pio(struct ata_port *ap)
1489{
1490 struct ata_queued_cmd *qc;
1491 u8 ata_status;
1492
1493 /* ignore spurious intr if drive still BUSY */
1494 ata_status = readb(ap->ioaddr.status_addr);
1495 if (unlikely(ata_status & ATA_BUSY))
1496 return;
1497
1498 /* get active ATA command */
1499 qc = ata_qc_from_tag(ap, ap->active_tag);
1500 if (unlikely(!qc)) /* no active tag */
1501 return;
1502 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1503 return;
1504
1505 /* and finally, complete the ATA command */
1506 qc->err_mask |= ac_err_mask(ata_status);
1507 ata_qc_complete(qc);
1508}
1509
1510static void mv_intr_edma(struct ata_port *ap)
1511{
1512 void __iomem *port_mmio = mv_ap_base(ap);
1513 struct mv_host_priv *hpriv = ap->host->private_data;
1514 struct mv_port_priv *pp = ap->private_data;
1515 struct ata_queued_cmd *qc;
1516 u32 out_index, in_index;
1517 bool work_done = false;
1518
1519 /* get h/w response queue pointer */
1520 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1521 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1522
1523 while (1) {
1524 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001525 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001526
1527 /* get s/w response queue last-read pointer, and compare */
1528 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1529 if (in_index == out_index)
1530 break;
1531
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001532 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001533 if (IS_GEN_I(hpriv))
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001534 tag = ap->active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001536 /* Gen II/IIE: get active ATA command via tag, to enable
1537 * support for queueing. this works transparently for
1538 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001539 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001540 else if (IS_GEN_II(hpriv))
1541 tag = (le16_to_cpu(pp->crpb[out_index].id)
1542 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001543
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001544 else /* IS_GEN_IIE */
1545 tag = (le16_to_cpu(pp->crpb[out_index].id)
1546 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001548 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001549
1550 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1551 * bits (WARNING: might not necessarily be associated
1552 * with this command), which -should- be clear
1553 * if all is well
1554 */
1555 status = le16_to_cpu(pp->crpb[out_index].flags);
1556 if (unlikely(status & 0xff)) {
1557 mv_err_intr(ap, qc);
1558 return;
1559 }
1560
1561 /* and finally, complete the ATA command */
1562 if (qc) {
1563 qc->err_mask |=
1564 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1565 ata_qc_complete(qc);
1566 }
1567
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001568 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 * indicate (after the loop completes) to hardware
1570 * that we have consumed a response queue entry.
1571 */
1572 work_done = true;
1573 pp->resp_idx++;
1574 }
1575
1576 if (work_done)
1577 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1578 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1579 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001580}
1581
Brett Russ05b308e2005-10-05 17:08:53 -04001582/**
1583 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001584 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001585 * @relevant: port error bits relevant to this host controller
1586 * @hc: which host controller we're to look at
1587 *
1588 * Read then write clear the HC interrupt status then walk each
1589 * port connected to the HC and see if it needs servicing. Port
1590 * success ints are reported in the HC interrupt status reg, the
1591 * port error ints are reported in the higher level main
1592 * interrupt status register and thus are passed in via the
1593 * 'relevant' argument.
1594 *
1595 * LOCKING:
1596 * Inherited from caller.
1597 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001598static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001599{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001600 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001601 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001602 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001603 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001604
Jeff Garzik35177262007-02-24 21:26:42 -05001605 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001606 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001607 else
Brett Russ20f733e2005-09-01 18:26:17 -04001608 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001609
1610 /* we'll need the HC success int register in most cases */
1611 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001612 if (!hc_irq_cause)
1613 return;
1614
1615 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001616
1617 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1618 hc,relevant,hc_irq_cause);
1619
1620 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001621 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001622 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001623 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001624
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001625 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001626 continue;
1627
Brett Russ31961942005-09-30 01:36:00 -04001628 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001629 if (port >= MV_PORTS_PER_HC) {
1630 shift++; /* skip bit 8 in the HC Main IRQ reg */
1631 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001632 have_err_bits = ((PORT0_ERR << shift) & relevant);
1633
1634 if (unlikely(have_err_bits)) {
1635 struct ata_queued_cmd *qc;
1636
1637 qc = ata_qc_from_tag(ap, ap->active_tag);
1638 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1639 continue;
1640
1641 mv_err_intr(ap, qc);
1642 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001643 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001644
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001645 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1646
1647 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1648 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1649 mv_intr_edma(ap);
1650 } else {
1651 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1652 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001653 }
1654 }
1655 VPRINTK("EXIT\n");
1656}
1657
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001658static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1659{
1660 struct ata_port *ap;
1661 struct ata_queued_cmd *qc;
1662 struct ata_eh_info *ehi;
1663 unsigned int i, err_mask, printed = 0;
1664 u32 err_cause;
1665
1666 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1667
1668 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1669 err_cause);
1670
1671 DPRINTK("All regs @ PCI error\n");
1672 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1673
1674 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1675
1676 for (i = 0; i < host->n_ports; i++) {
1677 ap = host->ports[i];
1678 if (!ata_port_offline(ap)) {
1679 ehi = &ap->eh_info;
1680 ata_ehi_clear_desc(ehi);
1681 if (!printed++)
1682 ata_ehi_push_desc(ehi,
1683 "PCI err cause 0x%08x", err_cause);
1684 err_mask = AC_ERR_HOST_BUS;
1685 ehi->action = ATA_EH_HARDRESET;
1686 qc = ata_qc_from_tag(ap, ap->active_tag);
1687 if (qc)
1688 qc->err_mask |= err_mask;
1689 else
1690 ehi->err_mask |= err_mask;
1691
1692 ata_port_freeze(ap);
1693 }
1694 }
1695}
1696
Brett Russ05b308e2005-10-05 17:08:53 -04001697/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001698 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001699 * @irq: unused
1700 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001701 *
1702 * Read the read only register to determine if any host
1703 * controllers have pending interrupts. If so, call lower level
1704 * routine to handle. Also check for PCI errors which are only
1705 * reported here.
1706 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001707 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001708 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001709 * interrupts.
1710 */
David Howells7d12e782006-10-05 14:55:46 +01001711static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001712{
Jeff Garzikcca39742006-08-24 03:19:22 -04001713 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001714 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001715 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001716 u32 irq_stat;
1717
Brett Russ20f733e2005-09-01 18:26:17 -04001718 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001719
1720 /* check the cases where we either have nothing pending or have read
1721 * a bogus register value which can indicate HW removal or PCI fault
1722 */
Jeff Garzik35177262007-02-24 21:26:42 -05001723 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001724 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001725
Jeff Garzikcca39742006-08-24 03:19:22 -04001726 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1727 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001728
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001729 if (unlikely(irq_stat & PCI_ERR)) {
1730 mv_pci_error(host, mmio);
1731 handled = 1;
1732 goto out_unlock; /* skip all other HC irq handling */
1733 }
1734
Brett Russ20f733e2005-09-01 18:26:17 -04001735 for (hc = 0; hc < n_hcs; hc++) {
1736 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1737 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001738 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001739 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001740 }
1741 }
Mark Lord615ab952006-05-19 16:24:56 -04001742
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001743out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001744 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001745
1746 return IRQ_RETVAL(handled);
1747}
1748
Jeff Garzikc9d39132005-11-13 17:47:51 -05001749static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1750{
1751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1753
1754 return hc_mmio + ofs;
1755}
1756
1757static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1758{
1759 unsigned int ofs;
1760
1761 switch (sc_reg_in) {
1762 case SCR_STATUS:
1763 case SCR_ERROR:
1764 case SCR_CONTROL:
1765 ofs = sc_reg_in * sizeof(u32);
1766 break;
1767 default:
1768 ofs = 0xffffffffU;
1769 break;
1770 }
1771 return ofs;
1772}
1773
Tejun Heoda3dbb12007-07-16 14:29:40 +09001774static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001775{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001776 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1777 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001778 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1779
Tejun Heoda3dbb12007-07-16 14:29:40 +09001780 if (ofs != 0xffffffffU) {
1781 *val = readl(addr + ofs);
1782 return 0;
1783 } else
1784 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001785}
1786
Tejun Heoda3dbb12007-07-16 14:29:40 +09001787static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001788{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001789 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1790 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001791 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1792
Tejun Heoda3dbb12007-07-16 14:29:40 +09001793 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001794 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001795 return 0;
1796 } else
1797 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001798}
1799
Jeff Garzik522479f2005-11-12 22:14:02 -05001800static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1801{
Jeff Garzik522479f2005-11-12 22:14:02 -05001802 int early_5080;
1803
Auke Kok44c10132007-06-08 15:46:36 -07001804 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001805
1806 if (!early_5080) {
1807 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1808 tmp |= (1 << 0);
1809 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1810 }
1811
1812 mv_reset_pci_bus(pdev, mmio);
1813}
1814
1815static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1816{
1817 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1818}
1819
Jeff Garzik47c2b672005-11-12 21:13:17 -05001820static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001821 void __iomem *mmio)
1822{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001823 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1824 u32 tmp;
1825
1826 tmp = readl(phy_mmio + MV5_PHY_MODE);
1827
1828 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1829 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001830}
1831
Jeff Garzik47c2b672005-11-12 21:13:17 -05001832static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001833{
Jeff Garzik522479f2005-11-12 22:14:02 -05001834 u32 tmp;
1835
1836 writel(0, mmio + MV_GPIO_PORT_CTL);
1837
1838 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1839
1840 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1841 tmp |= ~(1 << 0);
1842 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001843}
1844
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001845static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1846 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001847{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001848 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1849 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1850 u32 tmp;
1851 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1852
1853 if (fix_apm_sq) {
1854 tmp = readl(phy_mmio + MV5_LT_MODE);
1855 tmp |= (1 << 19);
1856 writel(tmp, phy_mmio + MV5_LT_MODE);
1857
1858 tmp = readl(phy_mmio + MV5_PHY_CTL);
1859 tmp &= ~0x3;
1860 tmp |= 0x1;
1861 writel(tmp, phy_mmio + MV5_PHY_CTL);
1862 }
1863
1864 tmp = readl(phy_mmio + MV5_PHY_MODE);
1865 tmp &= ~mask;
1866 tmp |= hpriv->signal[port].pre;
1867 tmp |= hpriv->signal[port].amps;
1868 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001869}
1870
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871
1872#undef ZERO
1873#define ZERO(reg) writel(0, port_mmio + (reg))
1874static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1875 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001876{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001877 void __iomem *port_mmio = mv_port_base(mmio, port);
1878
1879 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1880
1881 mv_channel_reset(hpriv, mmio, port);
1882
1883 ZERO(0x028); /* command */
1884 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1885 ZERO(0x004); /* timer */
1886 ZERO(0x008); /* irq err cause */
1887 ZERO(0x00c); /* irq err mask */
1888 ZERO(0x010); /* rq bah */
1889 ZERO(0x014); /* rq inp */
1890 ZERO(0x018); /* rq outp */
1891 ZERO(0x01c); /* respq bah */
1892 ZERO(0x024); /* respq outp */
1893 ZERO(0x020); /* respq inp */
1894 ZERO(0x02c); /* test control */
1895 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1896}
1897#undef ZERO
1898
1899#define ZERO(reg) writel(0, hc_mmio + (reg))
1900static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int hc)
1902{
1903 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1904 u32 tmp;
1905
1906 ZERO(0x00c);
1907 ZERO(0x010);
1908 ZERO(0x014);
1909 ZERO(0x018);
1910
1911 tmp = readl(hc_mmio + 0x20);
1912 tmp &= 0x1c1c1c1c;
1913 tmp |= 0x03030303;
1914 writel(tmp, hc_mmio + 0x20);
1915}
1916#undef ZERO
1917
1918static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1919 unsigned int n_hc)
1920{
1921 unsigned int hc, port;
1922
1923 for (hc = 0; hc < n_hc; hc++) {
1924 for (port = 0; port < MV_PORTS_PER_HC; port++)
1925 mv5_reset_hc_port(hpriv, mmio,
1926 (hc * MV_PORTS_PER_HC) + port);
1927
1928 mv5_reset_one_hc(hpriv, mmio, hc);
1929 }
1930
1931 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001932}
1933
Jeff Garzik101ffae2005-11-12 22:17:49 -05001934#undef ZERO
1935#define ZERO(reg) writel(0, mmio + (reg))
1936static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1937{
1938 u32 tmp;
1939
1940 tmp = readl(mmio + MV_PCI_MODE);
1941 tmp &= 0xff00ffff;
1942 writel(tmp, mmio + MV_PCI_MODE);
1943
1944 ZERO(MV_PCI_DISC_TIMER);
1945 ZERO(MV_PCI_MSI_TRIGGER);
1946 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1947 ZERO(HC_MAIN_IRQ_MASK_OFS);
1948 ZERO(MV_PCI_SERR_MASK);
1949 ZERO(PCI_IRQ_CAUSE_OFS);
1950 ZERO(PCI_IRQ_MASK_OFS);
1951 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1952 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1953 ZERO(MV_PCI_ERR_ATTRIBUTE);
1954 ZERO(MV_PCI_ERR_COMMAND);
1955}
1956#undef ZERO
1957
1958static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1959{
1960 u32 tmp;
1961
1962 mv5_reset_flash(hpriv, mmio);
1963
1964 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1965 tmp &= 0x3;
1966 tmp |= (1 << 5) | (1 << 6);
1967 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1968}
1969
1970/**
1971 * mv6_reset_hc - Perform the 6xxx global soft reset
1972 * @mmio: base address of the HBA
1973 *
1974 * This routine only applies to 6xxx parts.
1975 *
1976 * LOCKING:
1977 * Inherited from caller.
1978 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001979static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1980 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001981{
1982 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1983 int i, rc = 0;
1984 u32 t;
1985
1986 /* Following procedure defined in PCI "main command and status
1987 * register" table.
1988 */
1989 t = readl(reg);
1990 writel(t | STOP_PCI_MASTER, reg);
1991
1992 for (i = 0; i < 1000; i++) {
1993 udelay(1);
1994 t = readl(reg);
1995 if (PCI_MASTER_EMPTY & t) {
1996 break;
1997 }
1998 }
1999 if (!(PCI_MASTER_EMPTY & t)) {
2000 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2001 rc = 1;
2002 goto done;
2003 }
2004
2005 /* set reset */
2006 i = 5;
2007 do {
2008 writel(t | GLOB_SFT_RST, reg);
2009 t = readl(reg);
2010 udelay(1);
2011 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2012
2013 if (!(GLOB_SFT_RST & t)) {
2014 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2015 rc = 1;
2016 goto done;
2017 }
2018
2019 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2020 i = 5;
2021 do {
2022 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2023 t = readl(reg);
2024 udelay(1);
2025 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2026
2027 if (GLOB_SFT_RST & t) {
2028 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2029 rc = 1;
2030 }
2031done:
2032 return rc;
2033}
2034
Jeff Garzik47c2b672005-11-12 21:13:17 -05002035static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002036 void __iomem *mmio)
2037{
2038 void __iomem *port_mmio;
2039 u32 tmp;
2040
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002041 tmp = readl(mmio + MV_RESET_CFG);
2042 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002043 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002044 hpriv->signal[idx].pre = 0x1 << 5;
2045 return;
2046 }
2047
2048 port_mmio = mv_port_base(mmio, idx);
2049 tmp = readl(port_mmio + PHY_MODE2);
2050
2051 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2052 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2053}
2054
Jeff Garzik47c2b672005-11-12 21:13:17 -05002055static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002056{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002057 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002058}
2059
Jeff Garzikc9d39132005-11-13 17:47:51 -05002060static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002061 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002062{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002063 void __iomem *port_mmio = mv_port_base(mmio, port);
2064
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002065 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002066 int fix_phy_mode2 =
2067 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002068 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002069 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2070 u32 m2, tmp;
2071
2072 if (fix_phy_mode2) {
2073 m2 = readl(port_mmio + PHY_MODE2);
2074 m2 &= ~(1 << 16);
2075 m2 |= (1 << 31);
2076 writel(m2, port_mmio + PHY_MODE2);
2077
2078 udelay(200);
2079
2080 m2 = readl(port_mmio + PHY_MODE2);
2081 m2 &= ~((1 << 16) | (1 << 31));
2082 writel(m2, port_mmio + PHY_MODE2);
2083
2084 udelay(200);
2085 }
2086
2087 /* who knows what this magic does */
2088 tmp = readl(port_mmio + PHY_MODE3);
2089 tmp &= ~0x7F800000;
2090 tmp |= 0x2A800000;
2091 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002092
2093 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002094 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002095
2096 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002097
2098 if (hp_flags & MV_HP_ERRATA_60X1B2)
2099 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002100
2101 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2102
2103 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002104
2105 if (hp_flags & MV_HP_ERRATA_60X1B2)
2106 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002107 }
2108
2109 /* Revert values of pre-emphasis and signal amps to the saved ones */
2110 m2 = readl(port_mmio + PHY_MODE2);
2111
2112 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002113 m2 |= hpriv->signal[port].amps;
2114 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002115 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002116
Jeff Garzike4e7b892006-01-31 12:18:41 -05002117 /* according to mvSata 3.6.1, some IIE values are fixed */
2118 if (IS_GEN_IIE(hpriv)) {
2119 m2 &= ~0xC30FF01F;
2120 m2 |= 0x0000900F;
2121 }
2122
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002123 writel(m2, port_mmio + PHY_MODE2);
2124}
2125
Jeff Garzikc9d39132005-11-13 17:47:51 -05002126static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2127 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002128{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002129 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002130
Brett Russ31961942005-09-30 01:36:00 -04002131 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002132
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002133 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002134 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002135 ifctl |= (1 << 7); /* enable gen2i speed */
2136 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002137 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2138 }
2139
Brett Russ20f733e2005-09-01 18:26:17 -04002140 udelay(25); /* allow reset propagation */
2141
2142 /* Spec never mentions clearing the bit. Marvell's driver does
2143 * clear the bit, however.
2144 */
Brett Russ31961942005-09-30 01:36:00 -04002145 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002146
Jeff Garzikc9d39132005-11-13 17:47:51 -05002147 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2148
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002149 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002150 mdelay(1);
2151}
2152
Jeff Garzikc9d39132005-11-13 17:47:51 -05002153/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002154 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002155 * @ap: ATA channel to manipulate
2156 *
2157 * Part of this is taken from __sata_phy_reset and modified to
2158 * not sleep since this routine gets called from interrupt level.
2159 *
2160 * LOCKING:
2161 * Inherited from caller. This is coded to safe to call at
2162 * interrupt level, i.e. it does not sleep.
2163 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002164static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2165 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002166{
2167 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002168 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002169 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002170 int retry = 5;
2171 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002172
2173 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002174
Tejun Heoda3dbb12007-07-16 14:29:40 +09002175#ifdef DEBUG
2176 {
2177 u32 sstatus, serror, scontrol;
2178
2179 mv_scr_read(ap, SCR_STATUS, &sstatus);
2180 mv_scr_read(ap, SCR_ERROR, &serror);
2181 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2182 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2183 "SCtrl 0x%08x\n", status, serror, scontrol);
2184 }
2185#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002186
Jeff Garzik22374672005-11-17 10:59:48 -05002187 /* Issue COMRESET via SControl */
2188comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09002189 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002190 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002191
Tejun Heo81952c52006-05-15 20:57:47 +09002192 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002193 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002194
Brett Russ31961942005-09-30 01:36:00 -04002195 do {
Tejun Heo81952c52006-05-15 20:57:47 +09002196 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002197 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002198 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002199
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002200 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002201 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002202
Jeff Garzik22374672005-11-17 10:59:48 -05002203 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002204 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002205 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2206 (retry-- > 0))
2207 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002208
Tejun Heoda3dbb12007-07-16 14:29:40 +09002209#ifdef DEBUG
2210 {
2211 u32 sstatus, serror, scontrol;
2212
2213 mv_scr_read(ap, SCR_STATUS, &sstatus);
2214 mv_scr_read(ap, SCR_ERROR, &serror);
2215 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2216 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2217 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2218 }
2219#endif
Brett Russ31961942005-09-30 01:36:00 -04002220
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002221 if (ata_port_offline(ap)) {
2222 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002223 return;
2224 }
2225
Jeff Garzik22374672005-11-17 10:59:48 -05002226 /* even after SStatus reflects that device is ready,
2227 * it seems to take a while for link to be fully
2228 * established (and thus Status no longer 0x80/0x7F),
2229 * so we poll a bit for that, here.
2230 */
2231 retry = 20;
2232 while (1) {
2233 u8 drv_stat = ata_check_status(ap);
2234 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2235 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002236 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002237 if (retry-- <= 0)
2238 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002239 if (time_after(jiffies, deadline))
2240 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002241 }
2242
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002243 /* FIXME: if we passed the deadline, the following
2244 * code probably produces an invalid result
2245 */
Brett Russ20f733e2005-09-01 18:26:17 -04002246
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002247 /* finally, read device signature from TF registers */
2248 *class = ata_dev_try_classify(ap, 0, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002249
2250 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2251
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002252 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002253
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002254 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002255}
2256
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002257static int mv_prereset(struct ata_port *ap, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002258{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002259 struct mv_port_priv *pp = ap->private_data;
2260 struct ata_eh_context *ehc = &ap->eh_context;
2261 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002262
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002263 rc = mv_stop_dma(ap);
2264 if (rc)
2265 ehc->i.action |= ATA_EH_HARDRESET;
2266
2267 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2268 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2269 ehc->i.action |= ATA_EH_HARDRESET;
2270 }
2271
2272 /* if we're about to do hardreset, nothing more to do */
2273 if (ehc->i.action & ATA_EH_HARDRESET)
2274 return 0;
2275
2276 if (ata_port_online(ap))
2277 rc = ata_wait_ready(ap, deadline);
2278 else
2279 rc = -ENODEV;
2280
2281 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002282}
2283
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002284static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2285 unsigned long deadline)
2286{
2287 struct mv_host_priv *hpriv = ap->host->private_data;
2288 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2289
2290 mv_stop_dma(ap);
2291
2292 mv_channel_reset(hpriv, mmio, ap->port_no);
2293
2294 mv_phy_reset(ap, class, deadline);
2295
2296 return 0;
2297}
2298
2299static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2300{
2301 u32 serr;
2302
2303 /* print link status */
2304 sata_print_link_status(ap);
2305
2306 /* clear SError */
2307 sata_scr_read(ap, SCR_ERROR, &serr);
2308 sata_scr_write_flush(ap, SCR_ERROR, serr);
2309
2310 /* bail out if no device is present */
2311 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2312 DPRINTK("EXIT, no device\n");
2313 return;
2314 }
2315
2316 /* set up device control */
2317 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2318}
2319
2320static void mv_error_handler(struct ata_port *ap)
2321{
2322 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2323 mv_hardreset, mv_postreset);
2324}
2325
2326static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2327{
2328 mv_stop_dma(qc->ap);
2329}
2330
2331static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002332{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002333 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2335 u32 tmp, mask;
2336 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002337
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002338 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002339
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 shift = ap->port_no * 2;
2341 if (hc > 0)
2342 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002343
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002344 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002345
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 /* disable assertion of portN err, done events */
2347 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2348 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2349}
2350
2351static void mv_eh_thaw(struct ata_port *ap)
2352{
2353 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2354 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2355 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2356 void __iomem *port_mmio = mv_ap_base(ap);
2357 u32 tmp, mask, hc_irq_cause;
2358 unsigned int shift, hc_port_no = ap->port_no;
2359
2360 /* FIXME: handle coalescing completion events properly */
2361
2362 shift = ap->port_no * 2;
2363 if (hc > 0) {
2364 shift++;
2365 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002366 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002367
2368 mask = 0x3 << shift;
2369
2370 /* clear EDMA errors on this port */
2371 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2372
2373 /* clear pending irq events */
2374 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2375 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2376 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2377 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2378
2379 /* enable assertion of portN err, done events */
2380 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2381 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002382}
2383
Brett Russ05b308e2005-10-05 17:08:53 -04002384/**
2385 * mv_port_init - Perform some early initialization on a single port.
2386 * @port: libata data structure storing shadow register addresses
2387 * @port_mmio: base address of the port
2388 *
2389 * Initialize shadow register mmio addresses, clear outstanding
2390 * interrupts on the port, and unmask interrupts for the future
2391 * start of the port.
2392 *
2393 * LOCKING:
2394 * Inherited from caller.
2395 */
Brett Russ31961942005-09-30 01:36:00 -04002396static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2397{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002398 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002399 unsigned serr_ofs;
2400
Jeff Garzik8b260242005-11-12 12:32:50 -05002401 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002402 */
2403 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002404 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002405 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2406 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2407 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2408 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2409 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2410 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002411 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002412 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2413 /* special case: control/altstatus doesn't have ATA_REG_ address */
2414 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2415
2416 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002417 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002418
Brett Russ31961942005-09-30 01:36:00 -04002419 /* Clear any currently outstanding port interrupt conditions */
2420 serr_ofs = mv_scr_offset(SCR_ERROR);
2421 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2422 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2423
Brett Russ20f733e2005-09-01 18:26:17 -04002424 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002425 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002426
Jeff Garzik8b260242005-11-12 12:32:50 -05002427 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002428 readl(port_mmio + EDMA_CFG_OFS),
2429 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2430 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002431}
2432
Tejun Heo4447d352007-04-17 23:44:08 +09002433static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002434{
Tejun Heo4447d352007-04-17 23:44:08 +09002435 struct pci_dev *pdev = to_pci_dev(host->dev);
2436 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002437 u32 hp_flags = hpriv->hp_flags;
2438
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002439 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002440 case chip_5080:
2441 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002442 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002443
Auke Kok44c10132007-06-08 15:46:36 -07002444 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002445 case 0x1:
2446 hp_flags |= MV_HP_ERRATA_50XXB0;
2447 break;
2448 case 0x3:
2449 hp_flags |= MV_HP_ERRATA_50XXB2;
2450 break;
2451 default:
2452 dev_printk(KERN_WARNING, &pdev->dev,
2453 "Applying 50XXB2 workarounds to unknown rev\n");
2454 hp_flags |= MV_HP_ERRATA_50XXB2;
2455 break;
2456 }
2457 break;
2458
2459 case chip_504x:
2460 case chip_508x:
2461 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002462 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002463
Auke Kok44c10132007-06-08 15:46:36 -07002464 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002465 case 0x0:
2466 hp_flags |= MV_HP_ERRATA_50XXB0;
2467 break;
2468 case 0x3:
2469 hp_flags |= MV_HP_ERRATA_50XXB2;
2470 break;
2471 default:
2472 dev_printk(KERN_WARNING, &pdev->dev,
2473 "Applying B2 workarounds to unknown rev\n");
2474 hp_flags |= MV_HP_ERRATA_50XXB2;
2475 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002476 }
2477 break;
2478
2479 case chip_604x:
2480 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002481 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002482 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002483
Auke Kok44c10132007-06-08 15:46:36 -07002484 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002485 case 0x7:
2486 hp_flags |= MV_HP_ERRATA_60X1B2;
2487 break;
2488 case 0x9:
2489 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002490 break;
2491 default:
2492 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002493 "Applying B2 workarounds to unknown rev\n");
2494 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002495 break;
2496 }
2497 break;
2498
Jeff Garzike4e7b892006-01-31 12:18:41 -05002499 case chip_7042:
2500 case chip_6042:
2501 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002502 hp_flags |= MV_HP_GEN_IIE;
2503
Auke Kok44c10132007-06-08 15:46:36 -07002504 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002505 case 0x0:
2506 hp_flags |= MV_HP_ERRATA_XX42A0;
2507 break;
2508 case 0x1:
2509 hp_flags |= MV_HP_ERRATA_60X1C0;
2510 break;
2511 default:
2512 dev_printk(KERN_WARNING, &pdev->dev,
2513 "Applying 60X1C0 workarounds to unknown rev\n");
2514 hp_flags |= MV_HP_ERRATA_60X1C0;
2515 break;
2516 }
2517 break;
2518
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002519 default:
2520 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2521 return 1;
2522 }
2523
2524 hpriv->hp_flags = hp_flags;
2525
2526 return 0;
2527}
2528
Brett Russ05b308e2005-10-05 17:08:53 -04002529/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002530 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002531 * @host: ATA host to initialize
2532 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002533 *
2534 * If possible, do an early global reset of the host. Then do
2535 * our port init and clear/unmask all/relevant host interrupts.
2536 *
2537 * LOCKING:
2538 * Inherited from caller.
2539 */
Tejun Heo4447d352007-04-17 23:44:08 +09002540static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002541{
2542 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002543 struct pci_dev *pdev = to_pci_dev(host->dev);
2544 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2545 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002546
Jeff Garzik47c2b672005-11-12 21:13:17 -05002547 /* global interrupt mask */
2548 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2549
Tejun Heo4447d352007-04-17 23:44:08 +09002550 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002551 if (rc)
2552 goto done;
2553
Tejun Heo4447d352007-04-17 23:44:08 +09002554 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002555
Tejun Heo4447d352007-04-17 23:44:08 +09002556 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002557 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002558
Jeff Garzikc9d39132005-11-13 17:47:51 -05002559 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002560 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002561 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002562
Jeff Garzik522479f2005-11-12 22:14:02 -05002563 hpriv->ops->reset_flash(hpriv, mmio);
2564 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002565 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002566
Tejun Heo4447d352007-04-17 23:44:08 +09002567 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002568 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002569 void __iomem *port_mmio = mv_port_base(mmio, port);
2570
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002571 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002572 ifctl |= (1 << 7); /* enable gen2i speed */
2573 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002574 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2575 }
2576
Jeff Garzikc9d39132005-11-13 17:47:51 -05002577 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002578 }
2579
Tejun Heo4447d352007-04-17 23:44:08 +09002580 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002581 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002582 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002583 }
2584
2585 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002586 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2587
2588 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2589 "(before clear)=0x%08x\n", hc,
2590 readl(hc_mmio + HC_CFG_OFS),
2591 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2592
2593 /* Clear any currently outstanding hc interrupt conditions */
2594 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002595 }
2596
Brett Russ31961942005-09-30 01:36:00 -04002597 /* Clear any currently outstanding host interrupt conditions */
2598 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2599
2600 /* and unmask interrupt generation for host regs */
2601 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002602
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002603 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002604 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2605 else
2606 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002607
2608 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002609 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002610 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2611 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2612 readl(mmio + PCI_IRQ_CAUSE_OFS),
2613 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002614
Brett Russ31961942005-09-30 01:36:00 -04002615done:
Brett Russ20f733e2005-09-01 18:26:17 -04002616 return rc;
2617}
2618
Brett Russ05b308e2005-10-05 17:08:53 -04002619/**
2620 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002621 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002622 *
2623 * FIXME: complete this.
2624 *
2625 * LOCKING:
2626 * Inherited from caller.
2627 */
Tejun Heo4447d352007-04-17 23:44:08 +09002628static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002629{
Tejun Heo4447d352007-04-17 23:44:08 +09002630 struct pci_dev *pdev = to_pci_dev(host->dev);
2631 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002632 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002633 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002634
2635 /* Use this to determine the HW stepping of the chip so we know
2636 * what errata to workaround
2637 */
Brett Russ31961942005-09-30 01:36:00 -04002638 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2639 if (scc == 0)
2640 scc_s = "SCSI";
2641 else if (scc == 0x01)
2642 scc_s = "RAID";
2643 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002644 scc_s = "?";
2645
2646 if (IS_GEN_I(hpriv))
2647 gen = "I";
2648 else if (IS_GEN_II(hpriv))
2649 gen = "II";
2650 else if (IS_GEN_IIE(hpriv))
2651 gen = "IIE";
2652 else
2653 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002654
Jeff Garzika9524a72005-10-30 14:39:11 -05002655 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002656 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2657 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002658 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2659}
2660
Brett Russ05b308e2005-10-05 17:08:53 -04002661/**
2662 * mv_init_one - handle a positive probe of a Marvell host
2663 * @pdev: PCI device found
2664 * @ent: PCI device ID entry for the matched host
2665 *
2666 * LOCKING:
2667 * Inherited from caller.
2668 */
Brett Russ20f733e2005-09-01 18:26:17 -04002669static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2670{
2671 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002672 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002673 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2674 struct ata_host *host;
2675 struct mv_host_priv *hpriv;
2676 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002677
Jeff Garzika9524a72005-10-30 14:39:11 -05002678 if (!printed_version++)
2679 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002680
Tejun Heo4447d352007-04-17 23:44:08 +09002681 /* allocate host */
2682 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2683
2684 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2685 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2686 if (!host || !hpriv)
2687 return -ENOMEM;
2688 host->private_data = hpriv;
2689
2690 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002691 rc = pcim_enable_device(pdev);
2692 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002693 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002694
Tejun Heo0d5ff562007-02-01 15:06:36 +09002695 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2696 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002697 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002698 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002699 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002700 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002701
Jeff Garzikd88184f2007-02-26 01:26:06 -05002702 rc = pci_go_64(pdev);
2703 if (rc)
2704 return rc;
2705
Brett Russ20f733e2005-09-01 18:26:17 -04002706 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002707 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002708 if (rc)
2709 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002710
Brett Russ31961942005-09-30 01:36:00 -04002711 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002712 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002713 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002714
Brett Russ31961942005-09-30 01:36:00 -04002715 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002716 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002717
Tejun Heo4447d352007-04-17 23:44:08 +09002718 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002719 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002720 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002721 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002722}
2723
2724static int __init mv_init(void)
2725{
Pavel Roskinb7887192006-08-10 18:13:18 +09002726 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002727}
2728
2729static void __exit mv_exit(void)
2730{
2731 pci_unregister_driver(&mv_pci_driver);
2732}
2733
2734MODULE_AUTHOR("Brett Russ");
2735MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2736MODULE_LICENSE("GPL");
2737MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2738MODULE_VERSION(DRV_VERSION);
2739
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002740module_param(msi, int, 0444);
2741MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2742
Brett Russ20f733e2005-09-01 18:26:17 -04002743module_init(mv_init);
2744module_exit(mv_exit);