blob: 8f356e4417d20d56d7cf781f3def59e2270cd7ce [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
Mark Lord85afb932008-04-19 14:54:41 -040026 * sata_mv TODO list:
27 *
28 * --> Errata workaround for NCQ device errors.
29 *
30 * --> More errata workarounds for PCI-X.
31 *
32 * --> Complete a full errata audit for all chipsets to identify others.
33 *
Mark Lord85afb932008-04-19 14:54:41 -040034 * --> Develop a low-power-consumption strategy, and implement it.
35 *
36 * --> [Experiment, low priority] Investigate interrupt coalescing.
37 * Quite often, especially with PCI Message Signalled Interrupts (MSI),
38 * the overhead reduced by interrupt mitigation is quite often not
39 * worth the latency cost.
40 *
41 * --> [Experiment, Marvell value added] Is it possible to use target
42 * mode to cross-connect two Linux boxes with Marvell cards? If so,
43 * creating LibATA target mode support would be very interesting.
44 *
45 * Target mode, for those without docs, is the ability to directly
46 * connect two SATA ports.
47 */
Jeff Garzik4a05e202007-05-24 23:40:15 -040048
Brett Russ20f733e2005-09-01 18:26:17 -040049#include <linux/kernel.h>
50#include <linux/module.h>
51#include <linux/pci.h>
52#include <linux/init.h>
53#include <linux/blkdev.h>
54#include <linux/delay.h>
55#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080056#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040057#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050058#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050059#include <linux/platform_device.h>
60#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040061#include <linux/mbus.h>
Mark Lordc46938c2008-05-02 14:02:28 -040062#include <linux/bitops.h>
Brett Russ20f733e2005-09-01 18:26:17 -040063#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050064#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040065#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040066#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040067
68#define DRV_NAME "sata_mv"
Mark Lordda142652009-01-30 18:51:54 -050069#define DRV_VERSION "1.26"
Brett Russ20f733e2005-09-01 18:26:17 -040070
71enum {
72 /* BAR's are enumerated in terms of pci_resource_start() terms */
73 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
74 MV_IO_BAR = 2, /* offset 0x18: IO space */
75 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
76
77 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
78 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
79
80 MV_PCI_REG_BASE = 0,
81 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040082 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
83 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
84 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
85 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
86 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
87
Brett Russ20f733e2005-09-01 18:26:17 -040088 MV_SATAHC0_REG_BASE = 0x20000,
Mark Lord8e7decd2008-05-02 02:07:51 -040089 MV_FLASH_CTL_OFS = 0x1046c,
90 MV_GPIO_PORT_CTL_OFS = 0x104f0,
91 MV_RESET_CFG_OFS = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040092
93 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
94 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
95 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
96 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
97
Brett Russ31961942005-09-30 01:36:00 -040098 MV_MAX_Q_DEPTH = 32,
99 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
100
101 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
102 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400103 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
104 */
105 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
106 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500107 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400108 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400109
Mark Lord352fab72008-04-19 14:43:42 -0400110 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400111 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400112 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
113 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
114 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400115
116 /* Host Flags */
117 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
118 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100119
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400120 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Mark Lord91b1a842009-01-30 18:46:39 -0500121 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
Mark Lordad3aef52008-05-14 09:21:43 -0400122
Mark Lord91b1a842009-01-30 18:46:39 -0500123 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
Brett Russ20f733e2005-09-01 18:26:17 -0400124
Mark Lord91b1a842009-01-30 18:46:39 -0500125 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | MV_FLAG_IRQ_COALESCE |
Mark Lordad3aef52008-05-14 09:21:43 -0400126 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lordda142652009-01-30 18:51:54 -0500127 ATA_FLAG_NCQ,
Mark Lord91b1a842009-01-30 18:46:39 -0500128
129 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
Mark Lordad3aef52008-05-14 09:21:43 -0400130
Brett Russ31961942005-09-30 01:36:00 -0400131 CRQB_FLAG_READ = (1 << 0),
132 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400134 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
139
140 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400143
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
145
Brett Russ20f733e2005-09-01 18:26:17 -0400146 /* PCI interface registers */
147
Brett Russ31961942005-09-30 01:36:00 -0400148 PCI_COMMAND_OFS = 0xc00,
Mark Lord8e7decd2008-05-02 02:07:51 -0400149 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
Brett Russ31961942005-09-30 01:36:00 -0400150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Mark Lord8e7decd2008-05-02 02:07:51 -0400156 MV_PCI_MODE_OFS = 0xd00,
157 MV_PCI_MODE_MASK = 0x30,
158
Jeff Garzik522479f2005-11-12 22:14:02 -0500159 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
160 MV_PCI_DISC_TIMER = 0xd04,
161 MV_PCI_MSI_TRIGGER = 0xc38,
162 MV_PCI_SERR_MASK = 0xc28,
Mark Lord8e7decd2008-05-02 02:07:51 -0400163 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
Jeff Garzik522479f2005-11-12 22:14:02 -0500164 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
165 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
166 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
167 MV_PCI_ERR_COMMAND = 0x1d50,
168
Mark Lord02a121d2007-12-01 13:07:22 -0500169 PCI_IRQ_CAUSE_OFS = 0x1d58,
170 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400171 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
172
Mark Lord02a121d2007-12-01 13:07:22 -0500173 PCIE_IRQ_CAUSE_OFS = 0x1900,
174 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500175 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500176
Mark Lord7368f912008-04-25 11:24:24 -0400177 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
178 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
179 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
180 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
181 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
Mark Lord352fab72008-04-19 14:43:42 -0400182 ERR_IRQ = (1 << 0), /* shift by port # */
183 DONE_IRQ = (1 << 1), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400184 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
185 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
186 PCI_ERR = (1 << 18),
187 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
188 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500189 PORTS_0_3_COAL_DONE = (1 << 8),
190 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400191 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
192 GPIO_INT = (1 << 22),
193 SELF_INT = (1 << 23),
194 TWSI_INT = (1 << 24),
195 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500196 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400197 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Brett Russ20f733e2005-09-01 18:26:17 -0400198
199 /* SATAHC registers */
200 HC_CFG_OFS = 0,
201
202 HC_IRQ_CAUSE_OFS = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400203 DMA_IRQ = (1 << 0), /* shift by port # */
204 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400205 DEV_IRQ = (1 << 8), /* shift by port # */
206
207 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400208 SHD_BLK_OFS = 0x100,
209 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400210
211 /* SATA registers */
212 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
213 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500214 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lordc443c502008-05-14 09:24:39 -0400215 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
Mark Lord17c5aab2008-04-16 14:56:51 -0400216
Mark Lorde12bef52008-03-31 19:33:56 -0400217 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400218 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
219
Jeff Garzik47c2b672005-11-12 21:13:17 -0500220 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500221 PHY_MODE4 = 0x314,
Mark Lordba069e32008-05-31 16:46:34 -0400222 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
223 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
224 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
225 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
226
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500227 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400228 SATA_IFCTL_OFS = 0x344,
Mark Lord8e7decd2008-05-02 02:07:51 -0400229 SATA_TESTCTL_OFS = 0x348,
Mark Lorde12bef52008-03-31 19:33:56 -0400230 SATA_IFSTAT_OFS = 0x34c,
231 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400232
Mark Lord8e7decd2008-05-02 02:07:51 -0400233 FISCFG_OFS = 0x360,
234 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
235 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
Mark Lord17c5aab2008-04-16 14:56:51 -0400236
Jeff Garzikc9d39132005-11-13 17:47:51 -0500237 MV5_PHY_MODE = 0x74,
Mark Lord8e7decd2008-05-02 02:07:51 -0400238 MV5_LTMODE_OFS = 0x30,
239 MV5_PHY_CTL_OFS = 0x0C,
240 SATA_INTERFACE_CFG_OFS = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500241
242 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 /* Port registers */
245 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500246 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
247 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
248 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
249 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
250 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400251 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
252 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400253
254 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
255 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
257 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
258 EDMA_ERR_DEV = (1 << 2), /* device error */
259 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
260 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
261 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400262 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
263 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400265 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
267 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
268 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
269 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500270
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400271 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500272 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
273 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
274 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
275 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500280 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
283 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
284 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
285
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500287
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400288 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400289 EDMA_ERR_OVERRUN_5 = (1 << 5),
290 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500291
292 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
293 EDMA_ERR_LNK_CTRL_RX_1 |
294 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord85afb932008-04-19 14:54:41 -0400295 EDMA_ERR_LNK_CTRL_TX,
Mark Lord646a4da2008-01-26 18:30:37 -0500296
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400297 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
298 EDMA_ERR_PRD_PAR |
299 EDMA_ERR_DEV_DCON |
300 EDMA_ERR_DEV_CON |
301 EDMA_ERR_SERR |
302 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400303 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400304 EDMA_ERR_CRPB_PAR |
305 EDMA_ERR_INTRL_PAR |
306 EDMA_ERR_IORDY |
307 EDMA_ERR_LNK_CTRL_RX_2 |
308 EDMA_ERR_LNK_DATA_RX |
309 EDMA_ERR_LNK_DATA_TX |
310 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400311
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400312 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
313 EDMA_ERR_PRD_PAR |
314 EDMA_ERR_DEV_DCON |
315 EDMA_ERR_DEV_CON |
316 EDMA_ERR_OVERRUN_5 |
317 EDMA_ERR_UNDERRUN_5 |
318 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400319 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400320 EDMA_ERR_CRPB_PAR |
321 EDMA_ERR_INTRL_PAR |
322 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400323
Brett Russ31961942005-09-30 01:36:00 -0400324 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
325 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400326
327 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
328 EDMA_REQ_Q_PTR_SHIFT = 5,
329
330 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
331 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
332 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400333 EDMA_RSP_Q_PTR_SHIFT = 3,
334
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400335 EDMA_CMD_OFS = 0x28, /* EDMA command register */
336 EDMA_EN = (1 << 0), /* enable EDMA */
337 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
Mark Lord8e7decd2008-05-02 02:07:51 -0400338 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400339
Mark Lord8e7decd2008-05-02 02:07:51 -0400340 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
341 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
342 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
343
344 EDMA_IORDY_TMOUT_OFS = 0x34,
345 EDMA_ARB_CFG_OFS = 0x38,
346
347 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500348
Mark Lordda142652009-01-30 18:51:54 -0500349
350 BMDMA_CMD_OFS = 0x224, /* bmdma command register */
351 BMDMA_STATUS_OFS = 0x228, /* bmdma status register */
352 BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */
353 BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */
354
Brett Russ31961942005-09-30 01:36:00 -0400355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Mark Lord616d4a92008-05-02 02:08:32 -0400365 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
Mark Lord1f398472008-05-27 17:54:48 -0400366 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
Brett Russ20f733e2005-09-01 18:26:17 -0400367
Brett Russ31961942005-09-30 01:36:00 -0400368 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400369 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500370 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Mark Lord00f42ea2008-05-02 02:11:45 -0400371 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
Mark Lord29d187b2008-05-02 02:15:37 -0400372 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
Brett Russ31961942005-09-30 01:36:00 -0400373};
374
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400375#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
376#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500377#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Mark Lord8e7decd2008-05-02 02:07:51 -0400378#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
Mark Lord1f398472008-05-27 17:54:48 -0400379#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500380
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400381#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
382#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
383
Jeff Garzik095fec82005-11-12 09:50:49 -0500384enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400385 /* DMA boundary 0xffff is required by the s/g splitting
386 * we need on /length/ in mv_fill-sg().
387 */
388 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500389
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400390 /* mask of register bits containing lower 32 bits
391 * of EDMA request queue DMA address
392 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500393 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
394
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400395 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500396 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
397};
398
Jeff Garzik522479f2005-11-12 22:14:02 -0500399enum chip_type {
400 chip_504x,
401 chip_508x,
402 chip_5080,
403 chip_604x,
404 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500405 chip_6042,
406 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500407 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500408};
409
Brett Russ31961942005-09-30 01:36:00 -0400410/* Command ReQuest Block: 32B */
411struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400412 __le32 sg_addr;
413 __le32 sg_addr_hi;
414 __le16 ctrl_flags;
415 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400416};
417
Jeff Garzike4e7b892006-01-31 12:18:41 -0500418struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400419 __le32 addr;
420 __le32 addr_hi;
421 __le32 flags;
422 __le32 len;
423 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500424};
425
Brett Russ31961942005-09-30 01:36:00 -0400426/* Command ResPonse Block: 8B */
427struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400428 __le16 id;
429 __le16 flags;
430 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400431};
432
433/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
434struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400435 __le32 addr;
436 __le32 flags_size;
437 __le32 addr_hi;
438 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400439};
440
441struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400442 struct mv_crqb *crqb;
443 dma_addr_t crqb_dma;
444 struct mv_crpb *crpb;
445 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500446 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
447 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400448
449 unsigned int req_idx;
450 unsigned int resp_idx;
451
Brett Russ31961942005-09-30 01:36:00 -0400452 u32 pp_flags;
Mark Lord29d187b2008-05-02 02:15:37 -0400453 unsigned int delayed_eh_pmp_map;
Brett Russ20f733e2005-09-01 18:26:17 -0400454};
455
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500456struct mv_port_signal {
457 u32 amps;
458 u32 pre;
459};
460
Mark Lord02a121d2007-12-01 13:07:22 -0500461struct mv_host_priv {
462 u32 hp_flags;
Mark Lord96e2c482008-05-17 13:38:00 -0400463 u32 main_irq_mask;
Mark Lord02a121d2007-12-01 13:07:22 -0500464 struct mv_port_signal signal[8];
465 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500466 int n_ports;
467 void __iomem *base;
Mark Lord7368f912008-04-25 11:24:24 -0400468 void __iomem *main_irq_cause_addr;
469 void __iomem *main_irq_mask_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500470 u32 irq_cause_ofs;
471 u32 irq_mask_ofs;
472 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500473 /*
474 * These consistent DMA memory pools give us guaranteed
475 * alignment for hardware-accessed data structures,
476 * and less memory waste in accomplishing the alignment.
477 */
478 struct dma_pool *crqb_pool;
479 struct dma_pool *crpb_pool;
480 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500481};
482
Jeff Garzik47c2b672005-11-12 21:13:17 -0500483struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500484 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
485 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500486 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
487 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
488 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500489 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500491 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100492 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500493};
494
Tejun Heo82ef04f2008-07-31 17:02:40 +0900495static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
496static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
497static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
498static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400499static int mv_port_start(struct ata_port *ap);
500static void mv_port_stop(struct ata_port *ap);
Mark Lord3e4a1392008-05-02 02:10:02 -0400501static int mv_qc_defer(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400502static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500503static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900504static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900505static int mv_hardreset(struct ata_link *link, unsigned int *class,
506 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400507static void mv_eh_freeze(struct ata_port *ap);
508static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500509static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400510
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500511static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500513static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
514static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
515 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
517 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500518static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100519static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500520
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500521static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
522 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500523static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
524static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
527 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500528static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500529static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
530 void __iomem *mmio);
531static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
532 void __iomem *mmio);
533static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
534 void __iomem *mmio, unsigned int n_hc);
535static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
536 void __iomem *mmio);
537static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100538static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400539static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500540 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400541static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400542static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lord00b81232009-01-30 18:47:51 -0500543static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500544
Mark Lorde49856d2008-04-16 14:59:07 -0400545static void mv_pmp_select(struct ata_port *ap, int pmp);
546static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
547 unsigned long deadline);
548static int mv_softreset(struct ata_link *link, unsigned int *class,
549 unsigned long deadline);
Mark Lord29d187b2008-05-02 02:15:37 -0400550static void mv_pmp_error_handler(struct ata_port *ap);
Mark Lord4c299ca2008-05-02 02:16:20 -0400551static void mv_process_crpb_entries(struct ata_port *ap,
552 struct mv_port_priv *pp);
Brett Russ20f733e2005-09-01 18:26:17 -0400553
Mark Lordda142652009-01-30 18:51:54 -0500554static unsigned long mv_mode_filter(struct ata_device *dev,
555 unsigned long xfer_mask);
556static void mv_sff_irq_clear(struct ata_port *ap);
557static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
558static void mv_bmdma_setup(struct ata_queued_cmd *qc);
559static void mv_bmdma_start(struct ata_queued_cmd *qc);
560static void mv_bmdma_stop(struct ata_queued_cmd *qc);
561static u8 mv_bmdma_status(struct ata_port *ap);
562
Mark Lordeb73d552008-01-29 13:24:00 -0500563/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
564 * because we have to allow room for worst case splitting of
565 * PRDs for 64K boundaries in mv_fill_sg().
566 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400567static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900568 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400569 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400570 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400571};
572
573static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900574 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500575 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400576 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400577 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400578};
579
Tejun Heo029cfd62008-03-25 12:22:49 +0900580static struct ata_port_operations mv5_ops = {
581 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500582
Mark Lord3e4a1392008-05-02 02:10:02 -0400583 .qc_defer = mv_qc_defer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500584 .qc_prep = mv_qc_prep,
585 .qc_issue = mv_qc_issue,
586
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400587 .freeze = mv_eh_freeze,
588 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900589 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900590 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900591 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400592
Jeff Garzikc9d39132005-11-13 17:47:51 -0500593 .scr_read = mv5_scr_read,
594 .scr_write = mv5_scr_write,
595
596 .port_start = mv_port_start,
597 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598};
599
Tejun Heo029cfd62008-03-25 12:22:49 +0900600static struct ata_port_operations mv6_ops = {
601 .inherits = &mv5_ops,
Mark Lordf2738272008-01-26 18:32:29 -0500602 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
605
Mark Lorde49856d2008-04-16 14:59:07 -0400606 .pmp_hardreset = mv_pmp_hardreset,
607 .pmp_softreset = mv_softreset,
608 .softreset = mv_softreset,
Mark Lord29d187b2008-05-02 02:15:37 -0400609 .error_handler = mv_pmp_error_handler,
Mark Lordda142652009-01-30 18:51:54 -0500610
611 .sff_irq_clear = mv_sff_irq_clear,
612 .check_atapi_dma = mv_check_atapi_dma,
613 .bmdma_setup = mv_bmdma_setup,
614 .bmdma_start = mv_bmdma_start,
615 .bmdma_stop = mv_bmdma_stop,
616 .bmdma_status = mv_bmdma_status,
617 .mode_filter = mv_mode_filter,
Brett Russ20f733e2005-09-01 18:26:17 -0400618};
619
Tejun Heo029cfd62008-03-25 12:22:49 +0900620static struct ata_port_operations mv_iie_ops = {
621 .inherits = &mv6_ops,
622 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500623 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500624};
625
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100626static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400627 { /* chip_504x */
Mark Lord91b1a842009-01-30 18:46:39 -0500628 .flags = MV_GEN_I_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400629 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400630 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500631 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400632 },
633 { /* chip_508x */
Mark Lord91b1a842009-01-30 18:46:39 -0500634 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400635 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400636 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500637 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400638 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500639 { /* chip_5080 */
Mark Lord91b1a842009-01-30 18:46:39 -0500640 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500641 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400642 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500643 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500644 },
Brett Russ20f733e2005-09-01 18:26:17 -0400645 { /* chip_604x */
Mark Lord91b1a842009-01-30 18:46:39 -0500646 .flags = MV_GEN_II_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400647 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400648 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500649 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400650 },
651 { /* chip_608x */
Mark Lord91b1a842009-01-30 18:46:39 -0500652 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400653 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400654 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500655 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400656 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500657 { /* chip_6042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500658 .flags = MV_GEN_IIE_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500659 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400660 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500661 .port_ops = &mv_iie_ops,
662 },
663 { /* chip_7042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500664 .flags = MV_GEN_IIE_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500665 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400666 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500667 .port_ops = &mv_iie_ops,
668 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500669 { /* chip_soc */
Mark Lord91b1a842009-01-30 18:46:39 -0500670 .flags = MV_GEN_IIE_FLAGS,
Mark Lord17c5aab2008-04-16 14:56:51 -0400671 .pio_mask = 0x1f, /* pio0-4 */
672 .udma_mask = ATA_UDMA6,
673 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500674 },
Brett Russ20f733e2005-09-01 18:26:17 -0400675};
676
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500677static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400678 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
679 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
680 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
681 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Mark Lord46c57842008-09-04 18:21:07 -0400682 /* RocketRAID 1720/174x have different identifiers */
683 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
Mark Lord44622542009-01-27 16:33:13 -0500684 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
685 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
Brett Russ20f733e2005-09-01 18:26:17 -0400686
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400687 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
688 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
689 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
690 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
691 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500692
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400693 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
694
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200695 /* Adaptec 1430SA */
696 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
697
Mark Lord02a121d2007-12-01 13:07:22 -0500698 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800699 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
700
Mark Lord02a121d2007-12-01 13:07:22 -0500701 /* Highpoint RocketRAID PCIe series */
702 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
703 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
704
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400705 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400706};
707
Jeff Garzik47c2b672005-11-12 21:13:17 -0500708static const struct mv_hw_ops mv5xxx_ops = {
709 .phy_errata = mv5_phy_errata,
710 .enable_leds = mv5_enable_leds,
711 .read_preamp = mv5_read_preamp,
712 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500713 .reset_flash = mv5_reset_flash,
714 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500715};
716
717static const struct mv_hw_ops mv6xxx_ops = {
718 .phy_errata = mv6_phy_errata,
719 .enable_leds = mv6_enable_leds,
720 .read_preamp = mv6_read_preamp,
721 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500722 .reset_flash = mv6_reset_flash,
723 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500724};
725
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500726static const struct mv_hw_ops mv_soc_ops = {
727 .phy_errata = mv6_phy_errata,
728 .enable_leds = mv_soc_enable_leds,
729 .read_preamp = mv_soc_read_preamp,
730 .reset_hc = mv_soc_reset_hc,
731 .reset_flash = mv_soc_reset_flash,
732 .reset_bus = mv_soc_reset_bus,
733};
734
Brett Russ20f733e2005-09-01 18:26:17 -0400735/*
736 * Functions
737 */
738
739static inline void writelfl(unsigned long data, void __iomem *addr)
740{
741 writel(data, addr);
742 (void) readl(addr); /* flush to avoid PCI posted write */
743}
744
Jeff Garzikc9d39132005-11-13 17:47:51 -0500745static inline unsigned int mv_hc_from_port(unsigned int port)
746{
747 return port >> MV_PORT_HC_SHIFT;
748}
749
750static inline unsigned int mv_hardport_from_port(unsigned int port)
751{
752 return port & MV_PORT_MASK;
753}
754
Mark Lord1cfd19a2008-04-19 15:05:50 -0400755/*
756 * Consolidate some rather tricky bit shift calculations.
757 * This is hot-path stuff, so not a function.
758 * Simple code, with two return values, so macro rather than inline.
759 *
760 * port is the sole input, in range 0..7.
Mark Lord7368f912008-04-25 11:24:24 -0400761 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
762 * hardport is the other output, in range 0..3.
Mark Lord1cfd19a2008-04-19 15:05:50 -0400763 *
764 * Note that port and hardport may be the same variable in some cases.
765 */
766#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
767{ \
768 shift = mv_hc_from_port(port) * HC_SHIFT; \
769 hardport = mv_hardport_from_port(port); \
770 shift += hardport * 2; \
771}
772
Mark Lord352fab72008-04-19 14:43:42 -0400773static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
774{
775 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
776}
777
Jeff Garzikc9d39132005-11-13 17:47:51 -0500778static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
779 unsigned int port)
780{
781 return mv_hc_base(base, mv_hc_from_port(port));
782}
783
Brett Russ20f733e2005-09-01 18:26:17 -0400784static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
785{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500786 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500787 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500788 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400789}
790
Mark Lorde12bef52008-03-31 19:33:56 -0400791static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
792{
793 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
794 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
795
796 return hc_mmio + ofs;
797}
798
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500799static inline void __iomem *mv_host_base(struct ata_host *host)
800{
801 struct mv_host_priv *hpriv = host->private_data;
802 return hpriv->base;
803}
804
Brett Russ20f733e2005-09-01 18:26:17 -0400805static inline void __iomem *mv_ap_base(struct ata_port *ap)
806{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500807 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400808}
809
Jeff Garzikcca39742006-08-24 03:19:22 -0400810static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400811{
Jeff Garzikcca39742006-08-24 03:19:22 -0400812 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400813}
814
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400815static void mv_set_edma_ptrs(void __iomem *port_mmio,
816 struct mv_host_priv *hpriv,
817 struct mv_port_priv *pp)
818{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819 u32 index;
820
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821 /*
822 * initialize request queue
823 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400824 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
825 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400826
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400827 WARN_ON(pp->crqb_dma & 0x3ff);
828 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400829 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Mark Lord5cf73bf2008-05-27 17:58:56 -0400831 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400832
833 /*
834 * initialize response queue
835 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400836 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
837 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400838
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400839 WARN_ON(pp->crpb_dma & 0xff);
840 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
Mark Lord5cf73bf2008-05-27 17:58:56 -0400841 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400842 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400843 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400844}
845
Mark Lordc4de5732008-05-17 13:35:21 -0400846static void mv_set_main_irq_mask(struct ata_host *host,
847 u32 disable_bits, u32 enable_bits)
848{
849 struct mv_host_priv *hpriv = host->private_data;
850 u32 old_mask, new_mask;
851
Mark Lord96e2c482008-05-17 13:38:00 -0400852 old_mask = hpriv->main_irq_mask;
Mark Lordc4de5732008-05-17 13:35:21 -0400853 new_mask = (old_mask & ~disable_bits) | enable_bits;
Mark Lord96e2c482008-05-17 13:38:00 -0400854 if (new_mask != old_mask) {
855 hpriv->main_irq_mask = new_mask;
Mark Lordc4de5732008-05-17 13:35:21 -0400856 writelfl(new_mask, hpriv->main_irq_mask_addr);
Mark Lord96e2c482008-05-17 13:38:00 -0400857 }
Mark Lordc4de5732008-05-17 13:35:21 -0400858}
859
860static void mv_enable_port_irqs(struct ata_port *ap,
861 unsigned int port_bits)
862{
863 unsigned int shift, hardport, port = ap->port_no;
864 u32 disable_bits, enable_bits;
865
866 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
867
868 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
869 enable_bits = port_bits << shift;
870 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
871}
872
Mark Lord00b81232009-01-30 18:47:51 -0500873static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
874 void __iomem *port_mmio,
875 unsigned int port_irqs)
876{
877 struct mv_host_priv *hpriv = ap->host->private_data;
878 int hardport = mv_hardport_from_port(ap->port_no);
879 void __iomem *hc_mmio = mv_hc_base_from_port(
880 mv_host_base(ap->host), ap->port_no);
881 u32 hc_irq_cause;
882
883 /* clear EDMA event indicators, if any */
884 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
885
886 /* clear pending irq events */
887 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
888 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
889
890 /* clear FIS IRQ Cause */
891 if (IS_GEN_IIE(hpriv))
892 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
893
894 mv_enable_port_irqs(ap, port_irqs);
895}
896
Brett Russ05b308e2005-10-05 17:08:53 -0400897/**
Mark Lord00b81232009-01-30 18:47:51 -0500898 * mv_start_edma - Enable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400899 * @base: port base address
900 * @pp: port private data
901 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900902 * Verify the local cache of the eDMA state is accurate with a
903 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400904 *
905 * LOCKING:
906 * Inherited from caller.
907 */
Mark Lord00b81232009-01-30 18:47:51 -0500908static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500909 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400910{
Mark Lord72109162008-01-26 18:31:33 -0500911 int want_ncq = (protocol == ATA_PROT_NCQ);
912
913 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
914 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
915 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400916 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500917 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400918 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500919 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord0c589122008-01-26 18:31:16 -0500920
Mark Lord00b81232009-01-30 18:47:51 -0500921 mv_edma_cfg(ap, want_ncq, 1);
Mark Lord0c589122008-01-26 18:31:16 -0500922
Mark Lordf630d562008-01-26 18:31:00 -0500923 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Mark Lord00b81232009-01-30 18:47:51 -0500924 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400925
Mark Lordf630d562008-01-26 18:31:00 -0500926 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400927 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
928 }
Brett Russ31961942005-09-30 01:36:00 -0400929}
930
Mark Lord9b2c4e02008-05-02 02:09:14 -0400931static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
932{
933 void __iomem *port_mmio = mv_ap_base(ap);
934 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
935 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
936 int i;
937
938 /*
939 * Wait for the EDMA engine to finish transactions in progress.
Mark Lordc46938c2008-05-02 14:02:28 -0400940 * No idea what a good "timeout" value might be, but measurements
941 * indicate that it often requires hundreds of microseconds
942 * with two drives in-use. So we use the 15msec value above
943 * as a rough guess at what even more drives might require.
Mark Lord9b2c4e02008-05-02 02:09:14 -0400944 */
945 for (i = 0; i < timeout; ++i) {
946 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
947 if ((edma_stat & empty_idle) == empty_idle)
948 break;
949 udelay(per_loop);
950 }
951 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
952}
953
Brett Russ05b308e2005-10-05 17:08:53 -0400954/**
Mark Lorde12bef52008-03-31 19:33:56 -0400955 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400956 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400957 *
958 * LOCKING:
959 * Inherited from caller.
960 */
Mark Lordb5624682008-03-31 19:34:40 -0400961static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400962{
Mark Lordb5624682008-03-31 19:34:40 -0400963 int i;
Brett Russ31961942005-09-30 01:36:00 -0400964
Mark Lordb5624682008-03-31 19:34:40 -0400965 /* Disable eDMA. The disable bit auto clears. */
966 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500967
Mark Lordb5624682008-03-31 19:34:40 -0400968 /* Wait for the chip to confirm eDMA is off. */
969 for (i = 10000; i > 0; i--) {
970 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400971 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400972 return 0;
973 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400974 }
Mark Lordb5624682008-03-31 19:34:40 -0400975 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400976}
977
Mark Lorde12bef52008-03-31 19:33:56 -0400978static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400979{
Mark Lordb5624682008-03-31 19:34:40 -0400980 void __iomem *port_mmio = mv_ap_base(ap);
981 struct mv_port_priv *pp = ap->private_data;
Mark Lord66e57a22009-01-30 18:52:58 -0500982 int err = 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400983
Mark Lordb5624682008-03-31 19:34:40 -0400984 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
985 return 0;
986 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lord9b2c4e02008-05-02 02:09:14 -0400987 mv_wait_for_edma_empty_idle(ap);
Mark Lordb5624682008-03-31 19:34:40 -0400988 if (mv_stop_edma_engine(port_mmio)) {
989 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Mark Lord66e57a22009-01-30 18:52:58 -0500990 err = -EIO;
Mark Lordb5624682008-03-31 19:34:40 -0400991 }
Mark Lord66e57a22009-01-30 18:52:58 -0500992 mv_edma_cfg(ap, 0, 0);
993 return err;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400994}
995
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400996#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400997static void mv_dump_mem(void __iomem *start, unsigned bytes)
998{
Brett Russ31961942005-09-30 01:36:00 -0400999 int b, w;
1000 for (b = 0; b < bytes; ) {
1001 DPRINTK("%p: ", start + b);
1002 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001003 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -04001004 b += sizeof(u32);
1005 }
1006 printk("\n");
1007 }
Brett Russ31961942005-09-30 01:36:00 -04001008}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001009#endif
1010
Brett Russ31961942005-09-30 01:36:00 -04001011static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1012{
1013#ifdef ATA_DEBUG
1014 int b, w;
1015 u32 dw;
1016 for (b = 0; b < bytes; ) {
1017 DPRINTK("%02x: ", b);
1018 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001019 (void) pci_read_config_dword(pdev, b, &dw);
1020 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -04001021 b += sizeof(u32);
1022 }
1023 printk("\n");
1024 }
1025#endif
1026}
1027static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1028 struct pci_dev *pdev)
1029{
1030#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -05001031 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -04001032 port >> MV_PORT_HC_SHIFT);
1033 void __iomem *port_base;
1034 int start_port, num_ports, p, start_hc, num_hcs, hc;
1035
1036 if (0 > port) {
1037 start_hc = start_port = 0;
1038 num_ports = 8; /* shld be benign for 4 port devs */
1039 num_hcs = 2;
1040 } else {
1041 start_hc = port >> MV_PORT_HC_SHIFT;
1042 start_port = port;
1043 num_ports = num_hcs = 1;
1044 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001045 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001046 num_ports > 1 ? num_ports - 1 : start_port);
1047
1048 if (NULL != pdev) {
1049 DPRINTK("PCI config space regs:\n");
1050 mv_dump_pci_cfg(pdev, 0x68);
1051 }
1052 DPRINTK("PCI regs:\n");
1053 mv_dump_mem(mmio_base+0xc00, 0x3c);
1054 mv_dump_mem(mmio_base+0xd00, 0x34);
1055 mv_dump_mem(mmio_base+0xf00, 0x4);
1056 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1057 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001058 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001059 DPRINTK("HC regs (HC %i):\n", hc);
1060 mv_dump_mem(hc_base, 0x1c);
1061 }
1062 for (p = start_port; p < start_port + num_ports; p++) {
1063 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001064 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001065 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001066 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001067 mv_dump_mem(port_base+0x300, 0x60);
1068 }
1069#endif
1070}
1071
Brett Russ20f733e2005-09-01 18:26:17 -04001072static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1073{
1074 unsigned int ofs;
1075
1076 switch (sc_reg_in) {
1077 case SCR_STATUS:
1078 case SCR_CONTROL:
1079 case SCR_ERROR:
1080 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1081 break;
1082 case SCR_ACTIVE:
1083 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1084 break;
1085 default:
1086 ofs = 0xffffffffU;
1087 break;
1088 }
1089 return ofs;
1090}
1091
Tejun Heo82ef04f2008-07-31 17:02:40 +09001092static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001093{
1094 unsigned int ofs = mv_scr_offset(sc_reg_in);
1095
Tejun Heoda3dbb12007-07-16 14:29:40 +09001096 if (ofs != 0xffffffffU) {
Tejun Heo82ef04f2008-07-31 17:02:40 +09001097 *val = readl(mv_ap_base(link->ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001098 return 0;
1099 } else
1100 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001101}
1102
Tejun Heo82ef04f2008-07-31 17:02:40 +09001103static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001104{
1105 unsigned int ofs = mv_scr_offset(sc_reg_in);
1106
Tejun Heoda3dbb12007-07-16 14:29:40 +09001107 if (ofs != 0xffffffffU) {
Tejun Heo82ef04f2008-07-31 17:02:40 +09001108 writelfl(val, mv_ap_base(link->ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001109 return 0;
1110 } else
1111 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001112}
1113
Mark Lordf2738272008-01-26 18:32:29 -05001114static void mv6_dev_config(struct ata_device *adev)
1115{
1116 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001117 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1118 *
1119 * Gen-II does not support NCQ over a port multiplier
1120 * (no FIS-based switching).
Mark Lordf2738272008-01-26 18:32:29 -05001121 */
Mark Lorde49856d2008-04-16 14:59:07 -04001122 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001123 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001124 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001125 ata_dev_printk(adev, KERN_INFO,
1126 "NCQ disabled for command-based switching\n");
Mark Lord352fab72008-04-19 14:43:42 -04001127 }
Mark Lorde49856d2008-04-16 14:59:07 -04001128 }
Mark Lordf2738272008-01-26 18:32:29 -05001129}
1130
Mark Lord3e4a1392008-05-02 02:10:02 -04001131static int mv_qc_defer(struct ata_queued_cmd *qc)
1132{
1133 struct ata_link *link = qc->dev->link;
1134 struct ata_port *ap = link->ap;
1135 struct mv_port_priv *pp = ap->private_data;
1136
1137 /*
Mark Lord29d187b2008-05-02 02:15:37 -04001138 * Don't allow new commands if we're in a delayed EH state
1139 * for NCQ and/or FIS-based switching.
1140 */
1141 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1142 return ATA_DEFER_PORT;
1143 /*
Mark Lord3e4a1392008-05-02 02:10:02 -04001144 * If the port is completely idle, then allow the new qc.
1145 */
1146 if (ap->nr_active_links == 0)
1147 return 0;
1148
Tejun Heo4bdee6c2008-08-13 20:24:16 +09001149 /*
1150 * The port is operating in host queuing mode (EDMA) with NCQ
1151 * enabled, allow multiple NCQ commands. EDMA also allows
1152 * queueing multiple DMA commands but libata core currently
1153 * doesn't allow it.
1154 */
1155 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1156 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1157 return 0;
1158
Mark Lord3e4a1392008-05-02 02:10:02 -04001159 return ATA_DEFER_PORT;
1160}
1161
Mark Lord00f42ea2008-05-02 02:11:45 -04001162static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
Mark Lorde49856d2008-04-16 14:59:07 -04001163{
Mark Lord00f42ea2008-05-02 02:11:45 -04001164 u32 new_fiscfg, old_fiscfg;
1165 u32 new_ltmode, old_ltmode;
1166 u32 new_haltcond, old_haltcond;
1167
1168 old_fiscfg = readl(port_mmio + FISCFG_OFS);
1169 old_ltmode = readl(port_mmio + LTMODE_OFS);
1170 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
1171
1172 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1173 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1174 new_haltcond = old_haltcond | EDMA_ERR_DEV;
1175
1176 if (want_fbs) {
1177 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1178 new_ltmode = old_ltmode | LTMODE_BIT8;
Mark Lord4c299ca2008-05-02 02:16:20 -04001179 if (want_ncq)
1180 new_haltcond &= ~EDMA_ERR_DEV;
1181 else
1182 new_fiscfg |= FISCFG_WAIT_DEV_ERR;
Mark Lorde49856d2008-04-16 14:59:07 -04001183 }
Mark Lord00f42ea2008-05-02 02:11:45 -04001184
Mark Lord8e7decd2008-05-02 02:07:51 -04001185 if (new_fiscfg != old_fiscfg)
1186 writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
Mark Lorde49856d2008-04-16 14:59:07 -04001187 if (new_ltmode != old_ltmode)
1188 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lord00f42ea2008-05-02 02:11:45 -04001189 if (new_haltcond != old_haltcond)
1190 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
Mark Lord0c589122008-01-26 18:31:16 -05001191}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001192
Mark Lorddd2890f2008-05-02 02:10:56 -04001193static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1194{
1195 struct mv_host_priv *hpriv = ap->host->private_data;
1196 u32 old, new;
1197
1198 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1199 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1200 if (want_ncq)
1201 new = old | (1 << 22);
1202 else
1203 new = old & ~(1 << 22);
1204 if (new != old)
1205 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1206}
1207
Mark Lord00b81232009-01-30 18:47:51 -05001208static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001209{
1210 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001211 struct mv_port_priv *pp = ap->private_data;
1212 struct mv_host_priv *hpriv = ap->host->private_data;
1213 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001214
1215 /* set up non-NCQ EDMA configuration */
1216 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Mark Lord00b81232009-01-30 18:47:51 -05001217 pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001218
1219 if (IS_GEN_I(hpriv))
1220 cfg |= (1 << 8); /* enab config burst size mask */
1221
Mark Lorddd2890f2008-05-02 02:10:56 -04001222 else if (IS_GEN_II(hpriv)) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05001223 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Mark Lorddd2890f2008-05-02 02:10:56 -04001224 mv_60x1_errata_sata25(ap, want_ncq);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001225
Mark Lorddd2890f2008-05-02 02:10:56 -04001226 } else if (IS_GEN_IIE(hpriv)) {
Mark Lord00f42ea2008-05-02 02:11:45 -04001227 int want_fbs = sata_pmp_attached(ap);
1228 /*
1229 * Possible future enhancement:
1230 *
1231 * The chip can use FBS with non-NCQ, if we allow it,
1232 * But first we need to have the error handling in place
1233 * for this mode (datasheet section 7.3.15.4.2.3).
1234 * So disallow non-NCQ FBS for now.
1235 */
1236 want_fbs &= want_ncq;
1237
1238 mv_config_fbs(port_mmio, want_ncq, want_fbs);
1239
1240 if (want_fbs) {
1241 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1242 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1243 }
1244
Jeff Garzike728eab2007-02-25 02:53:41 -05001245 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
Mark Lord00b81232009-01-30 18:47:51 -05001246 if (want_edma) {
1247 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1248 if (!IS_SOC(hpriv))
1249 cfg |= (1 << 18); /* enab early completion */
1250 }
Mark Lord616d4a92008-05-02 02:08:32 -04001251 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1252 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001253 }
1254
Mark Lord72109162008-01-26 18:31:33 -05001255 if (want_ncq) {
1256 cfg |= EDMA_CFG_NCQ;
1257 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
Mark Lord00b81232009-01-30 18:47:51 -05001258 }
Mark Lord72109162008-01-26 18:31:33 -05001259
Jeff Garzike4e7b892006-01-31 12:18:41 -05001260 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1261}
1262
Mark Lordda2fa9b2008-01-26 18:32:45 -05001263static void mv_port_free_dma_mem(struct ata_port *ap)
1264{
1265 struct mv_host_priv *hpriv = ap->host->private_data;
1266 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001267 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001268
1269 if (pp->crqb) {
1270 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1271 pp->crqb = NULL;
1272 }
1273 if (pp->crpb) {
1274 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1275 pp->crpb = NULL;
1276 }
Mark Lordeb73d552008-01-29 13:24:00 -05001277 /*
1278 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1279 * For later hardware, we have one unique sg_tbl per NCQ tag.
1280 */
1281 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1282 if (pp->sg_tbl[tag]) {
1283 if (tag == 0 || !IS_GEN_I(hpriv))
1284 dma_pool_free(hpriv->sg_tbl_pool,
1285 pp->sg_tbl[tag],
1286 pp->sg_tbl_dma[tag]);
1287 pp->sg_tbl[tag] = NULL;
1288 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001289 }
1290}
1291
Brett Russ05b308e2005-10-05 17:08:53 -04001292/**
1293 * mv_port_start - Port specific init/start routine.
1294 * @ap: ATA channel to manipulate
1295 *
1296 * Allocate and point to DMA memory, init port private memory,
1297 * zero indices.
1298 *
1299 * LOCKING:
1300 * Inherited from caller.
1301 */
Brett Russ31961942005-09-30 01:36:00 -04001302static int mv_port_start(struct ata_port *ap)
1303{
Jeff Garzikcca39742006-08-24 03:19:22 -04001304 struct device *dev = ap->host->dev;
1305 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001306 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001307 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001308
Tejun Heo24dc5f32007-01-20 16:00:28 +09001309 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001310 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001311 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001312 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001313
Mark Lordda2fa9b2008-01-26 18:32:45 -05001314 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1315 if (!pp->crqb)
1316 return -ENOMEM;
1317 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001318
Mark Lordda2fa9b2008-01-26 18:32:45 -05001319 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1320 if (!pp->crpb)
1321 goto out_port_free_dma_mem;
1322 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001323
Mark Lord3bd0a702008-06-18 12:11:16 -04001324 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1325 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1326 ap->flags |= ATA_FLAG_AN;
Mark Lordeb73d552008-01-29 13:24:00 -05001327 /*
1328 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1329 * For later hardware, we need one unique sg_tbl per NCQ tag.
1330 */
1331 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1332 if (tag == 0 || !IS_GEN_I(hpriv)) {
1333 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1334 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1335 if (!pp->sg_tbl[tag])
1336 goto out_port_free_dma_mem;
1337 } else {
1338 pp->sg_tbl[tag] = pp->sg_tbl[0];
1339 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1340 }
1341 }
Mark Lord66e57a22009-01-30 18:52:58 -05001342 mv_edma_cfg(ap, 0, 0);
Brett Russ31961942005-09-30 01:36:00 -04001343 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001344
1345out_port_free_dma_mem:
1346 mv_port_free_dma_mem(ap);
1347 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001348}
1349
Brett Russ05b308e2005-10-05 17:08:53 -04001350/**
1351 * mv_port_stop - Port specific cleanup/stop routine.
1352 * @ap: ATA channel to manipulate
1353 *
1354 * Stop DMA, cleanup port memory.
1355 *
1356 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001357 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001358 */
Brett Russ31961942005-09-30 01:36:00 -04001359static void mv_port_stop(struct ata_port *ap)
1360{
Mark Lorde12bef52008-03-31 19:33:56 -04001361 mv_stop_edma(ap);
Mark Lord88e675e2008-05-17 13:36:30 -04001362 mv_enable_port_irqs(ap, 0);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001363 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001364}
1365
Brett Russ05b308e2005-10-05 17:08:53 -04001366/**
1367 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1368 * @qc: queued command whose SG list to source from
1369 *
1370 * Populate the SG list and mark the last entry.
1371 *
1372 * LOCKING:
1373 * Inherited from caller.
1374 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001375static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001376{
1377 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001378 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001379 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001380 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001381
Mark Lordeb73d552008-01-29 13:24:00 -05001382 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001383 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001384 dma_addr_t addr = sg_dma_address(sg);
1385 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001386
Olof Johansson4007b492007-10-02 20:45:27 -05001387 while (sg_len) {
1388 u32 offset = addr & 0xffff;
1389 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001390
Mark Lord32cd11a2009-02-01 16:50:32 -05001391 if (offset + len > 0x10000)
Olof Johansson4007b492007-10-02 20:45:27 -05001392 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001393
Olof Johansson4007b492007-10-02 20:45:27 -05001394 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1395 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001396 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Mark Lord32cd11a2009-02-01 16:50:32 -05001397 mv_sg->reserved = 0;
Olof Johansson4007b492007-10-02 20:45:27 -05001398
1399 sg_len -= len;
1400 addr += len;
1401
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001402 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001403 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001404 }
Brett Russ31961942005-09-30 01:36:00 -04001405 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001406
1407 if (likely(last_sg))
1408 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Mark Lord32cd11a2009-02-01 16:50:32 -05001409 mb(); /* ensure data structure is visible to the chipset */
Brett Russ31961942005-09-30 01:36:00 -04001410}
1411
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001412static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001413{
Mark Lord559eeda2006-05-19 16:40:15 -04001414 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001415 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001416 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001417}
1418
Brett Russ05b308e2005-10-05 17:08:53 -04001419/**
Mark Lordda142652009-01-30 18:51:54 -05001420 * mv_mode_filter - Allow ATAPI DMA only on GenII chips.
1421 * @dev: device whose xfer modes are being configured.
1422 *
1423 * Only the GenII hardware can use DMA with ATAPI drives.
1424 */
1425static unsigned long mv_mode_filter(struct ata_device *adev,
1426 unsigned long xfer_mask)
1427{
1428 if (adev->class == ATA_DEV_ATAPI) {
1429 struct mv_host_priv *hpriv = adev->link->ap->host->private_data;
1430 if (!IS_GEN_II(hpriv)) {
1431 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1432 ata_dev_printk(adev, KERN_INFO,
1433 "ATAPI DMA not supported on this chipset\n");
1434 }
1435 }
1436 return xfer_mask;
1437}
1438
1439/**
1440 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1441 * @ap: Port associated with this ATA transaction.
1442 *
1443 * We need this only for ATAPI bmdma transactions,
1444 * as otherwise we experience spurious interrupts
1445 * after libata-sff handles the bmdma interrupts.
1446 */
1447static void mv_sff_irq_clear(struct ata_port *ap)
1448{
1449 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1450}
1451
1452/**
1453 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1454 * @qc: queued command to check for chipset/DMA compatibility.
1455 *
1456 * The bmdma engines cannot handle speculative data sizes
1457 * (bytecount under/over flow). So only allow DMA for
1458 * data transfer commands with known data sizes.
1459 *
1460 * LOCKING:
1461 * Inherited from caller.
1462 */
1463static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1464{
1465 struct scsi_cmnd *scmd = qc->scsicmd;
1466
1467 if (scmd) {
1468 switch (scmd->cmnd[0]) {
1469 case READ_6:
1470 case READ_10:
1471 case READ_12:
1472 case WRITE_6:
1473 case WRITE_10:
1474 case WRITE_12:
1475 case GPCMD_READ_CD:
1476 case GPCMD_SEND_DVD_STRUCTURE:
1477 case GPCMD_SEND_CUE_SHEET:
1478 return 0; /* DMA is safe */
1479 }
1480 }
1481 return -EOPNOTSUPP; /* use PIO instead */
1482}
1483
1484/**
1485 * mv_bmdma_setup - Set up BMDMA transaction
1486 * @qc: queued command to prepare DMA for.
1487 *
1488 * LOCKING:
1489 * Inherited from caller.
1490 */
1491static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1492{
1493 struct ata_port *ap = qc->ap;
1494 void __iomem *port_mmio = mv_ap_base(ap);
1495 struct mv_port_priv *pp = ap->private_data;
1496
1497 mv_fill_sg(qc);
1498
1499 /* clear all DMA cmd bits */
1500 writel(0, port_mmio + BMDMA_CMD_OFS);
1501
1502 /* load PRD table addr. */
1503 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1504 port_mmio + BMDMA_PRD_HIGH_OFS);
1505 writelfl(pp->sg_tbl_dma[qc->tag],
1506 port_mmio + BMDMA_PRD_LOW_OFS);
1507
1508 /* issue r/w command */
1509 ap->ops->sff_exec_command(ap, &qc->tf);
1510}
1511
1512/**
1513 * mv_bmdma_start - Start a BMDMA transaction
1514 * @qc: queued command to start DMA on.
1515 *
1516 * LOCKING:
1517 * Inherited from caller.
1518 */
1519static void mv_bmdma_start(struct ata_queued_cmd *qc)
1520{
1521 struct ata_port *ap = qc->ap;
1522 void __iomem *port_mmio = mv_ap_base(ap);
1523 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1524 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1525
1526 /* start host DMA transaction */
1527 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1528}
1529
1530/**
1531 * mv_bmdma_stop - Stop BMDMA transfer
1532 * @qc: queued command to stop DMA on.
1533 *
1534 * Clears the ATA_DMA_START flag in the bmdma control register
1535 *
1536 * LOCKING:
1537 * Inherited from caller.
1538 */
1539static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1540{
1541 struct ata_port *ap = qc->ap;
1542 void __iomem *port_mmio = mv_ap_base(ap);
1543 u32 cmd;
1544
1545 /* clear start/stop bit */
1546 cmd = readl(port_mmio + BMDMA_CMD_OFS);
1547 cmd &= ~ATA_DMA_START;
1548 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1549
1550 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1551 ata_sff_dma_pause(ap);
1552}
1553
1554/**
1555 * mv_bmdma_status - Read BMDMA status
1556 * @ap: port for which to retrieve DMA status.
1557 *
1558 * Read and return equivalent of the sff BMDMA status register.
1559 *
1560 * LOCKING:
1561 * Inherited from caller.
1562 */
1563static u8 mv_bmdma_status(struct ata_port *ap)
1564{
1565 void __iomem *port_mmio = mv_ap_base(ap);
1566 u32 reg, status;
1567
1568 /*
1569 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1570 * and the ATA_DMA_INTR bit doesn't exist.
1571 */
1572 reg = readl(port_mmio + BMDMA_STATUS_OFS);
1573 if (reg & ATA_DMA_ACTIVE)
1574 status = ATA_DMA_ACTIVE;
1575 else
1576 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1577 return status;
1578}
1579
1580/**
Brett Russ05b308e2005-10-05 17:08:53 -04001581 * mv_qc_prep - Host specific command preparation.
1582 * @qc: queued command to prepare
1583 *
1584 * This routine simply redirects to the general purpose routine
1585 * if command is not DMA. Else, it handles prep of the CRQB
1586 * (command request block), does some sanity checking, and calls
1587 * the SG load routine.
1588 *
1589 * LOCKING:
1590 * Inherited from caller.
1591 */
Brett Russ31961942005-09-30 01:36:00 -04001592static void mv_qc_prep(struct ata_queued_cmd *qc)
1593{
1594 struct ata_port *ap = qc->ap;
1595 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001596 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001597 struct ata_taskfile *tf;
1598 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001599 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001600
Mark Lord138bfdd2008-01-26 18:33:18 -05001601 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1602 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001603 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001604
Brett Russ31961942005-09-30 01:36:00 -04001605 /* Fill in command request block
1606 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001607 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001608 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001609 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001610 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001611 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001612
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001613 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001614 in_index = pp->req_idx;
Brett Russ31961942005-09-30 01:36:00 -04001615
Mark Lorda6432432006-05-19 16:36:36 -04001616 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001617 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001618 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001619 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001620 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1621
1622 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001623 tf = &qc->tf;
1624
1625 /* Sadly, the CRQB cannot accomodate all registers--there are
1626 * only 11 bytes...so we must pick and choose required
1627 * registers based on the command. So, we drop feature and
1628 * hob_feature for [RW] DMA commands, but they are needed for
Mark Lordcd12e1f2009-01-19 18:06:28 -05001629 * NCQ. NCQ will drop hob_nsect, which is not needed there
1630 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
Brett Russ31961942005-09-30 01:36:00 -04001631 */
1632 switch (tf->command) {
1633 case ATA_CMD_READ:
1634 case ATA_CMD_READ_EXT:
1635 case ATA_CMD_WRITE:
1636 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001637 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001638 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1639 break;
Brett Russ31961942005-09-30 01:36:00 -04001640 case ATA_CMD_FPDMA_READ:
1641 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001642 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001643 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1644 break;
Brett Russ31961942005-09-30 01:36:00 -04001645 default:
1646 /* The only other commands EDMA supports in non-queued and
1647 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1648 * of which are defined/used by Linux. If we get here, this
1649 * driver needs work.
1650 *
1651 * FIXME: modify libata to give qc_prep a return value and
1652 * return error here.
1653 */
1654 BUG_ON(tf->command);
1655 break;
1656 }
1657 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1658 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1659 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1660 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1661 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1662 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1663 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1664 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1665 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1666
Jeff Garzike4e7b892006-01-31 12:18:41 -05001667 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001668 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001669 mv_fill_sg(qc);
1670}
1671
1672/**
1673 * mv_qc_prep_iie - Host specific command preparation.
1674 * @qc: queued command to prepare
1675 *
1676 * This routine simply redirects to the general purpose routine
1677 * if command is not DMA. Else, it handles prep of the CRQB
1678 * (command request block), does some sanity checking, and calls
1679 * the SG load routine.
1680 *
1681 * LOCKING:
1682 * Inherited from caller.
1683 */
1684static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1685{
1686 struct ata_port *ap = qc->ap;
1687 struct mv_port_priv *pp = ap->private_data;
1688 struct mv_crqb_iie *crqb;
1689 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001690 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001691 u32 flags = 0;
1692
Mark Lord138bfdd2008-01-26 18:33:18 -05001693 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1694 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001695 return;
1696
Mark Lorde12bef52008-03-31 19:33:56 -04001697 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001698 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1699 flags |= CRQB_FLAG_READ;
1700
Tejun Heobeec7db2006-02-11 19:11:13 +09001701 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001702 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001703 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001704 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001705
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001706 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001707 in_index = pp->req_idx;
Mark Lorda6432432006-05-19 16:36:36 -04001708
1709 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001710 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1711 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001712 crqb->flags = cpu_to_le32(flags);
1713
1714 tf = &qc->tf;
1715 crqb->ata_cmd[0] = cpu_to_le32(
1716 (tf->command << 16) |
1717 (tf->feature << 24)
1718 );
1719 crqb->ata_cmd[1] = cpu_to_le32(
1720 (tf->lbal << 0) |
1721 (tf->lbam << 8) |
1722 (tf->lbah << 16) |
1723 (tf->device << 24)
1724 );
1725 crqb->ata_cmd[2] = cpu_to_le32(
1726 (tf->hob_lbal << 0) |
1727 (tf->hob_lbam << 8) |
1728 (tf->hob_lbah << 16) |
1729 (tf->hob_feature << 24)
1730 );
1731 crqb->ata_cmd[3] = cpu_to_le32(
1732 (tf->nsect << 0) |
1733 (tf->hob_nsect << 8)
1734 );
1735
1736 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1737 return;
Brett Russ31961942005-09-30 01:36:00 -04001738 mv_fill_sg(qc);
1739}
1740
Brett Russ05b308e2005-10-05 17:08:53 -04001741/**
1742 * mv_qc_issue - Initiate a command to the host
1743 * @qc: queued command to start
1744 *
1745 * This routine simply redirects to the general purpose routine
1746 * if command is not DMA. Else, it sanity checks our local
1747 * caches of the request producer/consumer indices then enables
1748 * DMA and bumps the request producer index.
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001753static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001754{
Mark Lordf48765c2009-01-30 18:48:41 -05001755 static int limit_warnings = 10;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001756 struct ata_port *ap = qc->ap;
1757 void __iomem *port_mmio = mv_ap_base(ap);
1758 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001759 u32 in_index;
Mark Lordf48765c2009-01-30 18:48:41 -05001760 unsigned int port_irqs = DONE_IRQ | ERR_IRQ;
Brett Russ31961942005-09-30 01:36:00 -04001761
Mark Lordf48765c2009-01-30 18:48:41 -05001762 switch (qc->tf.protocol) {
1763 case ATA_PROT_DMA:
1764 case ATA_PROT_NCQ:
1765 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
1766 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1767 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1768
1769 /* Write the request in pointer to kick the EDMA to life */
1770 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1771 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1772 return 0;
1773
1774 case ATA_PROT_PIO:
Mark Lordc6112bd2008-06-18 12:13:02 -04001775 /*
1776 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
1777 *
1778 * Someday, we might implement special polling workarounds
1779 * for these, but it all seems rather unnecessary since we
1780 * normally use only DMA for commands which transfer more
1781 * than a single block of data.
1782 *
1783 * Much of the time, this could just work regardless.
1784 * So for now, just log the incident, and allow the attempt.
1785 */
Mark Lordc7843e82008-06-18 21:57:42 -04001786 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
Mark Lordc6112bd2008-06-18 12:13:02 -04001787 --limit_warnings;
1788 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
1789 ": attempting PIO w/multiple DRQ: "
1790 "this may fail due to h/w errata\n");
1791 }
Mark Lordf48765c2009-01-30 18:48:41 -05001792 /* drop through */
1793 case ATAPI_PROT_PIO:
1794 port_irqs = ERR_IRQ; /* leave DONE_IRQ masked for PIO */
1795 /* drop through */
1796 default:
Mark Lord17c5aab2008-04-16 14:56:51 -04001797 /*
1798 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001799 * port. Turn off EDMA so there won't be problems accessing
1800 * shadow block, etc registers.
1801 */
Mark Lordb5624682008-03-31 19:34:40 -04001802 mv_stop_edma(ap);
Mark Lordf48765c2009-01-30 18:48:41 -05001803 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
Mark Lorde49856d2008-04-16 14:59:07 -04001804 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001805 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001806 }
Brett Russ31961942005-09-30 01:36:00 -04001807}
1808
Mark Lord8f767f82008-04-19 14:53:07 -04001809static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1810{
1811 struct mv_port_priv *pp = ap->private_data;
1812 struct ata_queued_cmd *qc;
1813
1814 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1815 return NULL;
1816 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Mark Lord95db5052009-01-30 18:49:29 -05001817 if (qc) {
1818 if (qc->tf.flags & ATA_TFLAG_POLLING)
1819 qc = NULL;
1820 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
1821 qc = NULL;
1822 }
Mark Lord8f767f82008-04-19 14:53:07 -04001823 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1824 qc = NULL;
1825 return qc;
1826}
1827
Mark Lord29d187b2008-05-02 02:15:37 -04001828static void mv_pmp_error_handler(struct ata_port *ap)
1829{
1830 unsigned int pmp, pmp_map;
1831 struct mv_port_priv *pp = ap->private_data;
1832
1833 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
1834 /*
1835 * Perform NCQ error analysis on failed PMPs
1836 * before we freeze the port entirely.
1837 *
1838 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
1839 */
1840 pmp_map = pp->delayed_eh_pmp_map;
1841 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
1842 for (pmp = 0; pmp_map != 0; pmp++) {
1843 unsigned int this_pmp = (1 << pmp);
1844 if (pmp_map & this_pmp) {
1845 struct ata_link *link = &ap->pmp_link[pmp];
1846 pmp_map &= ~this_pmp;
1847 ata_eh_analyze_ncq_error(link);
1848 }
1849 }
1850 ata_port_freeze(ap);
1851 }
1852 sata_pmp_error_handler(ap);
1853}
1854
Mark Lord4c299ca2008-05-02 02:16:20 -04001855static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
1856{
1857 void __iomem *port_mmio = mv_ap_base(ap);
1858
1859 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
1860}
1861
Mark Lord4c299ca2008-05-02 02:16:20 -04001862static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1863{
1864 struct ata_eh_info *ehi;
1865 unsigned int pmp;
1866
1867 /*
1868 * Initialize EH info for PMPs which saw device errors
1869 */
1870 ehi = &ap->link.eh_info;
1871 for (pmp = 0; pmp_map != 0; pmp++) {
1872 unsigned int this_pmp = (1 << pmp);
1873 if (pmp_map & this_pmp) {
1874 struct ata_link *link = &ap->pmp_link[pmp];
1875
1876 pmp_map &= ~this_pmp;
1877 ehi = &link->eh_info;
1878 ata_ehi_clear_desc(ehi);
1879 ata_ehi_push_desc(ehi, "dev err");
1880 ehi->err_mask |= AC_ERR_DEV;
1881 ehi->action |= ATA_EH_RESET;
1882 ata_link_abort(link);
1883 }
1884 }
1885}
1886
Mark Lord06aaca32008-05-19 09:01:24 -04001887static int mv_req_q_empty(struct ata_port *ap)
1888{
1889 void __iomem *port_mmio = mv_ap_base(ap);
1890 u32 in_ptr, out_ptr;
1891
1892 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
1893 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1894 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1895 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1896 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
1897}
1898
Mark Lord4c299ca2008-05-02 02:16:20 -04001899static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1900{
1901 struct mv_port_priv *pp = ap->private_data;
1902 int failed_links;
1903 unsigned int old_map, new_map;
1904
1905 /*
1906 * Device error during FBS+NCQ operation:
1907 *
1908 * Set a port flag to prevent further I/O being enqueued.
1909 * Leave the EDMA running to drain outstanding commands from this port.
1910 * Perform the post-mortem/EH only when all responses are complete.
1911 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1912 */
1913 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
1914 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
1915 pp->delayed_eh_pmp_map = 0;
1916 }
1917 old_map = pp->delayed_eh_pmp_map;
1918 new_map = old_map | mv_get_err_pmp_map(ap);
1919
1920 if (old_map != new_map) {
1921 pp->delayed_eh_pmp_map = new_map;
1922 mv_pmp_eh_prep(ap, new_map & ~old_map);
1923 }
Mark Lordc46938c2008-05-02 14:02:28 -04001924 failed_links = hweight16(new_map);
Mark Lord4c299ca2008-05-02 02:16:20 -04001925
1926 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
1927 "failed_links=%d nr_active_links=%d\n",
1928 __func__, pp->delayed_eh_pmp_map,
1929 ap->qc_active, failed_links,
1930 ap->nr_active_links);
1931
Mark Lord06aaca32008-05-19 09:01:24 -04001932 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
Mark Lord4c299ca2008-05-02 02:16:20 -04001933 mv_process_crpb_entries(ap, pp);
1934 mv_stop_edma(ap);
1935 mv_eh_freeze(ap);
1936 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
1937 return 1; /* handled */
1938 }
1939 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
1940 return 1; /* handled */
1941}
1942
1943static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
1944{
1945 /*
1946 * Possible future enhancement:
1947 *
1948 * FBS+non-NCQ operation is not yet implemented.
1949 * See related notes in mv_edma_cfg().
1950 *
1951 * Device error during FBS+non-NCQ operation:
1952 *
1953 * We need to snapshot the shadow registers for each failed command.
1954 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1955 */
1956 return 0; /* not handled */
1957}
1958
1959static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
1960{
1961 struct mv_port_priv *pp = ap->private_data;
1962
1963 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1964 return 0; /* EDMA was not active: not handled */
1965 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
1966 return 0; /* FBS was not active: not handled */
1967
1968 if (!(edma_err_cause & EDMA_ERR_DEV))
1969 return 0; /* non DEV error: not handled */
1970 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
1971 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
1972 return 0; /* other problems: not handled */
1973
1974 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1975 /*
1976 * EDMA should NOT have self-disabled for this case.
1977 * If it did, then something is wrong elsewhere,
1978 * and we cannot handle it here.
1979 */
1980 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1981 ata_port_printk(ap, KERN_WARNING,
1982 "%s: err_cause=0x%x pp_flags=0x%x\n",
1983 __func__, edma_err_cause, pp->pp_flags);
1984 return 0; /* not handled */
1985 }
1986 return mv_handle_fbs_ncq_dev_err(ap);
1987 } else {
1988 /*
1989 * EDMA should have self-disabled for this case.
1990 * If it did not, then something is wrong elsewhere,
1991 * and we cannot handle it here.
1992 */
1993 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
1994 ata_port_printk(ap, KERN_WARNING,
1995 "%s: err_cause=0x%x pp_flags=0x%x\n",
1996 __func__, edma_err_cause, pp->pp_flags);
1997 return 0; /* not handled */
1998 }
1999 return mv_handle_fbs_non_ncq_dev_err(ap);
2000 }
2001 return 0; /* not handled */
2002}
2003
Mark Lorda9010322008-05-02 02:14:02 -04002004static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
Mark Lord8f767f82008-04-19 14:53:07 -04002005{
Mark Lord8f767f82008-04-19 14:53:07 -04002006 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lorda9010322008-05-02 02:14:02 -04002007 char *when = "idle";
Mark Lord8f767f82008-04-19 14:53:07 -04002008
Mark Lord8f767f82008-04-19 14:53:07 -04002009 ata_ehi_clear_desc(ehi);
Mark Lorda9010322008-05-02 02:14:02 -04002010 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2011 when = "disabled";
2012 } else if (edma_was_enabled) {
2013 when = "EDMA enabled";
Mark Lord8f767f82008-04-19 14:53:07 -04002014 } else {
2015 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2016 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
Mark Lorda9010322008-05-02 02:14:02 -04002017 when = "polling";
Mark Lord8f767f82008-04-19 14:53:07 -04002018 }
Mark Lorda9010322008-05-02 02:14:02 -04002019 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
Mark Lord8f767f82008-04-19 14:53:07 -04002020 ehi->err_mask |= AC_ERR_OTHER;
2021 ehi->action |= ATA_EH_RESET;
2022 ata_port_freeze(ap);
2023}
2024
Brett Russ05b308e2005-10-05 17:08:53 -04002025/**
Brett Russ05b308e2005-10-05 17:08:53 -04002026 * mv_err_intr - Handle error interrupts on the port
2027 * @ap: ATA channel to manipulate
2028 *
Mark Lord8d073792008-04-19 15:07:49 -04002029 * Most cases require a full reset of the chip's state machine,
2030 * which also performs a COMRESET.
2031 * Also, if the port disabled DMA, update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04002032 *
2033 * LOCKING:
2034 * Inherited from caller.
2035 */
Mark Lord37b90462008-05-02 02:12:34 -04002036static void mv_err_intr(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002037{
Brett Russ31961942005-09-30 01:36:00 -04002038 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002039 u32 edma_err_cause, eh_freeze_mask, serr = 0;
Mark Lorde4006072008-05-14 09:19:30 -04002040 u32 fis_cause = 0;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002041 struct mv_port_priv *pp = ap->private_data;
2042 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002043 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002044 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lord37b90462008-05-02 02:12:34 -04002045 struct ata_queued_cmd *qc;
2046 int abort = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002047
Mark Lord8d073792008-04-19 15:07:49 -04002048 /*
Mark Lord37b90462008-05-02 02:12:34 -04002049 * Read and clear the SError and err_cause bits.
Mark Lorde4006072008-05-14 09:19:30 -04002050 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2051 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
Mark Lord8d073792008-04-19 15:07:49 -04002052 */
Mark Lord37b90462008-05-02 02:12:34 -04002053 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2054 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2055
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002056 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Mark Lorde4006072008-05-14 09:19:30 -04002057 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2058 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
2059 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
2060 }
Mark Lord8d073792008-04-19 15:07:49 -04002061 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002062
Mark Lord4c299ca2008-05-02 02:16:20 -04002063 if (edma_err_cause & EDMA_ERR_DEV) {
2064 /*
2065 * Device errors during FIS-based switching operation
2066 * require special handling.
2067 */
2068 if (mv_handle_dev_err(ap, edma_err_cause))
2069 return;
2070 }
2071
Mark Lord37b90462008-05-02 02:12:34 -04002072 qc = mv_get_active_qc(ap);
2073 ata_ehi_clear_desc(ehi);
2074 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2075 edma_err_cause, pp->pp_flags);
Mark Lorde4006072008-05-14 09:19:30 -04002076
Mark Lordc443c502008-05-14 09:24:39 -04002077 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
Mark Lorde4006072008-05-14 09:19:30 -04002078 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
Mark Lordc443c502008-05-14 09:24:39 -04002079 if (fis_cause & SATA_FIS_IRQ_AN) {
2080 u32 ec = edma_err_cause &
2081 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2082 sata_async_notification(ap);
2083 if (!ec)
2084 return; /* Just an AN; no need for the nukes */
2085 ata_ehi_push_desc(ehi, "SDB notify");
2086 }
2087 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002088 /*
Mark Lord352fab72008-04-19 14:43:42 -04002089 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002090 */
Mark Lord37b90462008-05-02 02:12:34 -04002091 if (edma_err_cause & EDMA_ERR_DEV) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002092 err_mask |= AC_ERR_DEV;
Mark Lord37b90462008-05-02 02:12:34 -04002093 action |= ATA_EH_RESET;
2094 ata_ehi_push_desc(ehi, "dev error");
2095 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002096 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002097 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002098 EDMA_ERR_INTRL_PAR)) {
2099 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002100 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09002101 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04002102 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002103 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2104 ata_ehi_hotplugged(ehi);
2105 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09002106 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09002107 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002108 }
2109
Mark Lord352fab72008-04-19 14:43:42 -04002110 /*
2111 * Gen-I has a different SELF_DIS bit,
2112 * different FREEZE bits, and no SERR bit:
2113 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002114 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002115 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002116 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002117 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002118 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002119 }
2120 } else {
2121 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002122 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002123 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002124 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002125 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002126 if (edma_err_cause & EDMA_ERR_SERR) {
Mark Lord8d073792008-04-19 15:07:49 -04002127 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2128 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002129 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002130 }
2131 }
Brett Russ20f733e2005-09-01 18:26:17 -04002132
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002133 if (!err_mask) {
2134 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09002135 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002136 }
2137
2138 ehi->serror |= serr;
2139 ehi->action |= action;
2140
2141 if (qc)
2142 qc->err_mask |= err_mask;
2143 else
2144 ehi->err_mask |= err_mask;
2145
Mark Lord37b90462008-05-02 02:12:34 -04002146 if (err_mask == AC_ERR_DEV) {
2147 /*
2148 * Cannot do ata_port_freeze() here,
2149 * because it would kill PIO access,
2150 * which is needed for further diagnosis.
2151 */
2152 mv_eh_freeze(ap);
2153 abort = 1;
2154 } else if (edma_err_cause & eh_freeze_mask) {
2155 /*
2156 * Note to self: ata_port_freeze() calls ata_port_abort()
2157 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002158 ata_port_freeze(ap);
Mark Lord37b90462008-05-02 02:12:34 -04002159 } else {
2160 abort = 1;
2161 }
2162
2163 if (abort) {
2164 if (qc)
2165 ata_link_abort(qc->dev->link);
2166 else
2167 ata_port_abort(ap);
2168 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002169}
2170
Mark Lordfcfb1f72008-04-19 15:06:40 -04002171static void mv_process_crpb_response(struct ata_port *ap,
2172 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2173{
2174 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2175
2176 if (qc) {
2177 u8 ata_status;
2178 u16 edma_status = le16_to_cpu(response->flags);
2179 /*
2180 * edma_status from a response queue entry:
2181 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
2182 * MSB is saved ATA status from command completion.
2183 */
2184 if (!ncq_enabled) {
2185 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2186 if (err_cause) {
2187 /*
2188 * Error will be seen/handled by mv_err_intr().
2189 * So do nothing at all here.
2190 */
2191 return;
2192 }
2193 }
2194 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord37b90462008-05-02 02:12:34 -04002195 if (!ac_err_mask(ata_status))
2196 ata_qc_complete(qc);
2197 /* else: leave it for mv_err_intr() */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002198 } else {
2199 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2200 __func__, tag);
2201 }
2202}
2203
2204static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002205{
2206 void __iomem *port_mmio = mv_ap_base(ap);
2207 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002208 u32 in_index;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002209 bool work_done = false;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002210 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002211
Mark Lordfcfb1f72008-04-19 15:06:40 -04002212 /* Get the hardware queue position index */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002213 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
2214 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2215
Mark Lordfcfb1f72008-04-19 15:06:40 -04002216 /* Process new responses from since the last time we looked */
2217 while (in_index != pp->resp_idx) {
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002218 unsigned int tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002219 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002220
Mark Lordfcfb1f72008-04-19 15:06:40 -04002221 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002222
Mark Lordfcfb1f72008-04-19 15:06:40 -04002223 if (IS_GEN_I(hpriv)) {
2224 /* 50xx: no NCQ, only one command active at a time */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002225 tag = ap->link.active_tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002226 } else {
2227 /* Gen II/IIE: get command tag from CRPB entry */
2228 tag = le16_to_cpu(response->id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002229 }
Mark Lordfcfb1f72008-04-19 15:06:40 -04002230 mv_process_crpb_response(ap, response, tag, ncq_enabled);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002231 work_done = true;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002232 }
2233
Mark Lord352fab72008-04-19 14:43:42 -04002234 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002235 if (work_done)
2236 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
Mark Lordfcfb1f72008-04-19 15:06:40 -04002237 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002238 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002239}
2240
Mark Lorda9010322008-05-02 02:14:02 -04002241static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2242{
2243 struct mv_port_priv *pp;
2244 int edma_was_enabled;
2245
2246 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2247 mv_unexpected_intr(ap, 0);
2248 return;
2249 }
2250 /*
2251 * Grab a snapshot of the EDMA_EN flag setting,
2252 * so that we have a consistent view for this port,
2253 * even if something we call of our routines changes it.
2254 */
2255 pp = ap->private_data;
2256 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2257 /*
2258 * Process completed CRPB response(s) before other events.
2259 */
2260 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2261 mv_process_crpb_entries(ap, pp);
Mark Lord4c299ca2008-05-02 02:16:20 -04002262 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2263 mv_handle_fbs_ncq_dev_err(ap);
Mark Lorda9010322008-05-02 02:14:02 -04002264 }
2265 /*
2266 * Handle chip-reported errors, or continue on to handle PIO.
2267 */
2268 if (unlikely(port_cause & ERR_IRQ)) {
2269 mv_err_intr(ap);
2270 } else if (!edma_was_enabled) {
2271 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2272 if (qc)
2273 ata_sff_host_intr(ap, qc);
2274 else
2275 mv_unexpected_intr(ap, edma_was_enabled);
2276 }
2277}
2278
Brett Russ05b308e2005-10-05 17:08:53 -04002279/**
2280 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04002281 * @host: host specific structure
Mark Lord7368f912008-04-25 11:24:24 -04002282 * @main_irq_cause: Main interrupt cause register for the chip.
Brett Russ05b308e2005-10-05 17:08:53 -04002283 *
2284 * LOCKING:
2285 * Inherited from caller.
2286 */
Mark Lord7368f912008-04-25 11:24:24 -04002287static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
Brett Russ20f733e2005-09-01 18:26:17 -04002288{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002289 struct mv_host_priv *hpriv = host->private_data;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002290 void __iomem *mmio = hpriv->base, *hc_mmio;
Mark Lorda3718c12008-04-19 15:07:18 -04002291 unsigned int handled = 0, port;
Brett Russ20f733e2005-09-01 18:26:17 -04002292
Mark Lorda3718c12008-04-19 15:07:18 -04002293 for (port = 0; port < hpriv->n_ports; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04002294 struct ata_port *ap = host->ports[port];
Mark Lordeabd5eb2008-05-02 02:13:27 -04002295 unsigned int p, shift, hardport, port_cause;
2296
Mark Lorda3718c12008-04-19 15:07:18 -04002297 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Mark Lorda3718c12008-04-19 15:07:18 -04002298 /*
Mark Lordeabd5eb2008-05-02 02:13:27 -04002299 * Each hc within the host has its own hc_irq_cause register,
2300 * where the interrupting ports bits get ack'd.
Mark Lorda3718c12008-04-19 15:07:18 -04002301 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002302 if (hardport == 0) { /* first port on this hc ? */
2303 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2304 u32 port_mask, ack_irqs;
2305 /*
2306 * Skip this entire hc if nothing pending for any ports
2307 */
2308 if (!hc_cause) {
2309 port += MV_PORTS_PER_HC - 1;
2310 continue;
2311 }
2312 /*
2313 * We don't need/want to read the hc_irq_cause register,
2314 * because doing so hurts performance, and
2315 * main_irq_cause already gives us everything we need.
2316 *
2317 * But we do have to *write* to the hc_irq_cause to ack
2318 * the ports that we are handling this time through.
2319 *
2320 * This requires that we create a bitmap for those
2321 * ports which interrupted us, and use that bitmap
2322 * to ack (only) those ports via hc_irq_cause.
2323 */
2324 ack_irqs = 0;
2325 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2326 if ((port + p) >= hpriv->n_ports)
2327 break;
2328 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2329 if (hc_cause & port_mask)
2330 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2331 }
Mark Lorda3718c12008-04-19 15:07:18 -04002332 hc_mmio = mv_hc_base_from_port(mmio, port);
Mark Lordeabd5eb2008-05-02 02:13:27 -04002333 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lorda3718c12008-04-19 15:07:18 -04002334 handled = 1;
2335 }
Mark Lorda9010322008-05-02 02:14:02 -04002336 /*
2337 * Handle interrupts signalled for this port:
2338 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002339 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
Mark Lorda9010322008-05-02 02:14:02 -04002340 if (port_cause)
2341 mv_port_intr(ap, port_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04002342 }
Mark Lorda3718c12008-04-19 15:07:18 -04002343 return handled;
Brett Russ20f733e2005-09-01 18:26:17 -04002344}
2345
Mark Lorda3718c12008-04-19 15:07:18 -04002346static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002347{
Mark Lord02a121d2007-12-01 13:07:22 -05002348 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 struct ata_port *ap;
2350 struct ata_queued_cmd *qc;
2351 struct ata_eh_info *ehi;
2352 unsigned int i, err_mask, printed = 0;
2353 u32 err_cause;
2354
Mark Lord02a121d2007-12-01 13:07:22 -05002355 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002356
2357 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2358 err_cause);
2359
2360 DPRINTK("All regs @ PCI error\n");
2361 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2362
Mark Lord02a121d2007-12-01 13:07:22 -05002363 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002364
2365 for (i = 0; i < host->n_ports; i++) {
2366 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09002367 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002368 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002369 ata_ehi_clear_desc(ehi);
2370 if (!printed++)
2371 ata_ehi_push_desc(ehi,
2372 "PCI err cause 0x%08x", err_cause);
2373 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002374 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002375 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376 if (qc)
2377 qc->err_mask |= err_mask;
2378 else
2379 ehi->err_mask |= err_mask;
2380
2381 ata_port_freeze(ap);
2382 }
2383 }
Mark Lorda3718c12008-04-19 15:07:18 -04002384 return 1; /* handled */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002385}
2386
Brett Russ05b308e2005-10-05 17:08:53 -04002387/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002388 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04002389 * @irq: unused
2390 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04002391 *
2392 * Read the read only register to determine if any host
2393 * controllers have pending interrupts. If so, call lower level
2394 * routine to handle. Also check for PCI errors which are only
2395 * reported here.
2396 *
Jeff Garzik8b260242005-11-12 12:32:50 -05002397 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002398 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04002399 * interrupts.
2400 */
David Howells7d12e782006-10-05 14:55:46 +01002401static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04002402{
Jeff Garzikcca39742006-08-24 03:19:22 -04002403 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002404 struct mv_host_priv *hpriv = host->private_data;
Mark Lorda3718c12008-04-19 15:07:18 -04002405 unsigned int handled = 0;
Mark Lord6d3c30e2009-01-21 10:31:29 -05002406 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
Mark Lord96e2c482008-05-17 13:38:00 -04002407 u32 main_irq_cause, pending_irqs;
Brett Russ20f733e2005-09-01 18:26:17 -04002408
Mark Lord646a4da2008-01-26 18:30:37 -05002409 spin_lock(&host->lock);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002410
2411 /* for MSI: block new interrupts while in here */
2412 if (using_msi)
2413 writel(0, hpriv->main_irq_mask_addr);
2414
Mark Lord7368f912008-04-25 11:24:24 -04002415 main_irq_cause = readl(hpriv->main_irq_cause_addr);
Mark Lord96e2c482008-05-17 13:38:00 -04002416 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
Mark Lord352fab72008-04-19 14:43:42 -04002417 /*
2418 * Deal with cases where we either have nothing pending, or have read
2419 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04002420 */
Mark Lorda44253d2008-05-17 13:37:07 -04002421 if (pending_irqs && main_irq_cause != 0xffffffffU) {
Mark Lord1f398472008-05-27 17:54:48 -04002422 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
Mark Lorda3718c12008-04-19 15:07:18 -04002423 handled = mv_pci_error(host, hpriv->base);
2424 else
Mark Lorda44253d2008-05-17 13:37:07 -04002425 handled = mv_host_intr(host, pending_irqs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002426 }
Mark Lord6d3c30e2009-01-21 10:31:29 -05002427
2428 /* for MSI: unmask; interrupt cause bits will retrigger now */
2429 if (using_msi)
2430 writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr);
2431
Mark Lord9d51af72009-03-10 16:28:51 -04002432 spin_unlock(&host->lock);
2433
Brett Russ20f733e2005-09-01 18:26:17 -04002434 return IRQ_RETVAL(handled);
2435}
2436
Jeff Garzikc9d39132005-11-13 17:47:51 -05002437static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2438{
2439 unsigned int ofs;
2440
2441 switch (sc_reg_in) {
2442 case SCR_STATUS:
2443 case SCR_ERROR:
2444 case SCR_CONTROL:
2445 ofs = sc_reg_in * sizeof(u32);
2446 break;
2447 default:
2448 ofs = 0xffffffffU;
2449 break;
2450 }
2451 return ofs;
2452}
2453
Tejun Heo82ef04f2008-07-31 17:02:40 +09002454static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002455{
Tejun Heo82ef04f2008-07-31 17:02:40 +09002456 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002457 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002458 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002459 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2460
Tejun Heoda3dbb12007-07-16 14:29:40 +09002461 if (ofs != 0xffffffffU) {
2462 *val = readl(addr + ofs);
2463 return 0;
2464 } else
2465 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002466}
2467
Tejun Heo82ef04f2008-07-31 17:02:40 +09002468static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002469{
Tejun Heo82ef04f2008-07-31 17:02:40 +09002470 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002471 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002472 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002473 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2474
Tejun Heoda3dbb12007-07-16 14:29:40 +09002475 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09002476 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002477 return 0;
2478 } else
2479 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002480}
2481
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002482static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05002483{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002484 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05002485 int early_5080;
2486
Auke Kok44c10132007-06-08 15:46:36 -07002487 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05002488
2489 if (!early_5080) {
2490 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2491 tmp |= (1 << 0);
2492 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2493 }
2494
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002495 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05002496}
2497
2498static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2499{
Mark Lord8e7decd2008-05-02 02:07:51 -04002500 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
Jeff Garzik522479f2005-11-12 22:14:02 -05002501}
2502
Jeff Garzik47c2b672005-11-12 21:13:17 -05002503static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002504 void __iomem *mmio)
2505{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002506 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2507 u32 tmp;
2508
2509 tmp = readl(phy_mmio + MV5_PHY_MODE);
2510
2511 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2512 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002513}
2514
Jeff Garzik47c2b672005-11-12 21:13:17 -05002515static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002516{
Jeff Garzik522479f2005-11-12 22:14:02 -05002517 u32 tmp;
2518
Mark Lord8e7decd2008-05-02 02:07:51 -04002519 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzik522479f2005-11-12 22:14:02 -05002520
2521 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2522
2523 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2524 tmp |= ~(1 << 0);
2525 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002526}
2527
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002528static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2529 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002530{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002531 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2532 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2533 u32 tmp;
2534 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2535
2536 if (fix_apm_sq) {
Mark Lord8e7decd2008-05-02 02:07:51 -04002537 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002538 tmp |= (1 << 19);
Mark Lord8e7decd2008-05-02 02:07:51 -04002539 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002540
Mark Lord8e7decd2008-05-02 02:07:51 -04002541 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002542 tmp &= ~0x3;
2543 tmp |= 0x1;
Mark Lord8e7decd2008-05-02 02:07:51 -04002544 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002545 }
2546
2547 tmp = readl(phy_mmio + MV5_PHY_MODE);
2548 tmp &= ~mask;
2549 tmp |= hpriv->signal[port].pre;
2550 tmp |= hpriv->signal[port].amps;
2551 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002552}
2553
Jeff Garzikc9d39132005-11-13 17:47:51 -05002554
2555#undef ZERO
2556#define ZERO(reg) writel(0, port_mmio + (reg))
2557static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
2558 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002560 void __iomem *port_mmio = mv_port_base(mmio, port);
2561
Mark Lorde12bef52008-03-31 19:33:56 -04002562 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002563
2564 ZERO(0x028); /* command */
2565 writel(0x11f, port_mmio + EDMA_CFG_OFS);
2566 ZERO(0x004); /* timer */
2567 ZERO(0x008); /* irq err cause */
2568 ZERO(0x00c); /* irq err mask */
2569 ZERO(0x010); /* rq bah */
2570 ZERO(0x014); /* rq inp */
2571 ZERO(0x018); /* rq outp */
2572 ZERO(0x01c); /* respq bah */
2573 ZERO(0x024); /* respq outp */
2574 ZERO(0x020); /* respq inp */
2575 ZERO(0x02c); /* test control */
Mark Lord8e7decd2008-05-02 02:07:51 -04002576 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002577}
2578#undef ZERO
2579
2580#define ZERO(reg) writel(0, hc_mmio + (reg))
2581static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2582 unsigned int hc)
2583{
2584 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2585 u32 tmp;
2586
2587 ZERO(0x00c);
2588 ZERO(0x010);
2589 ZERO(0x014);
2590 ZERO(0x018);
2591
2592 tmp = readl(hc_mmio + 0x20);
2593 tmp &= 0x1c1c1c1c;
2594 tmp |= 0x03030303;
2595 writel(tmp, hc_mmio + 0x20);
2596}
2597#undef ZERO
2598
2599static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2600 unsigned int n_hc)
2601{
2602 unsigned int hc, port;
2603
2604 for (hc = 0; hc < n_hc; hc++) {
2605 for (port = 0; port < MV_PORTS_PER_HC; port++)
2606 mv5_reset_hc_port(hpriv, mmio,
2607 (hc * MV_PORTS_PER_HC) + port);
2608
2609 mv5_reset_one_hc(hpriv, mmio, hc);
2610 }
2611
2612 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002613}
2614
Jeff Garzik101ffae2005-11-12 22:17:49 -05002615#undef ZERO
2616#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002617static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002618{
Mark Lord02a121d2007-12-01 13:07:22 -05002619 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002620 u32 tmp;
2621
Mark Lord8e7decd2008-05-02 02:07:51 -04002622 tmp = readl(mmio + MV_PCI_MODE_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002623 tmp &= 0xff00ffff;
Mark Lord8e7decd2008-05-02 02:07:51 -04002624 writel(tmp, mmio + MV_PCI_MODE_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002625
2626 ZERO(MV_PCI_DISC_TIMER);
2627 ZERO(MV_PCI_MSI_TRIGGER);
Mark Lord8e7decd2008-05-02 02:07:51 -04002628 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002629 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002630 ZERO(hpriv->irq_cause_ofs);
2631 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002632 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2633 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2634 ZERO(MV_PCI_ERR_ATTRIBUTE);
2635 ZERO(MV_PCI_ERR_COMMAND);
2636}
2637#undef ZERO
2638
2639static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2640{
2641 u32 tmp;
2642
2643 mv5_reset_flash(hpriv, mmio);
2644
Mark Lord8e7decd2008-05-02 02:07:51 -04002645 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002646 tmp &= 0x3;
2647 tmp |= (1 << 5) | (1 << 6);
Mark Lord8e7decd2008-05-02 02:07:51 -04002648 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002649}
2650
2651/**
2652 * mv6_reset_hc - Perform the 6xxx global soft reset
2653 * @mmio: base address of the HBA
2654 *
2655 * This routine only applies to 6xxx parts.
2656 *
2657 * LOCKING:
2658 * Inherited from caller.
2659 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002660static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2661 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002662{
2663 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2664 int i, rc = 0;
2665 u32 t;
2666
2667 /* Following procedure defined in PCI "main command and status
2668 * register" table.
2669 */
2670 t = readl(reg);
2671 writel(t | STOP_PCI_MASTER, reg);
2672
2673 for (i = 0; i < 1000; i++) {
2674 udelay(1);
2675 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002676 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002677 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002678 }
2679 if (!(PCI_MASTER_EMPTY & t)) {
2680 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2681 rc = 1;
2682 goto done;
2683 }
2684
2685 /* set reset */
2686 i = 5;
2687 do {
2688 writel(t | GLOB_SFT_RST, reg);
2689 t = readl(reg);
2690 udelay(1);
2691 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2692
2693 if (!(GLOB_SFT_RST & t)) {
2694 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2695 rc = 1;
2696 goto done;
2697 }
2698
2699 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2700 i = 5;
2701 do {
2702 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2703 t = readl(reg);
2704 udelay(1);
2705 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2706
2707 if (GLOB_SFT_RST & t) {
2708 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2709 rc = 1;
2710 }
2711done:
2712 return rc;
2713}
2714
Jeff Garzik47c2b672005-11-12 21:13:17 -05002715static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002716 void __iomem *mmio)
2717{
2718 void __iomem *port_mmio;
2719 u32 tmp;
2720
Mark Lord8e7decd2008-05-02 02:07:51 -04002721 tmp = readl(mmio + MV_RESET_CFG_OFS);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002722 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002723 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002724 hpriv->signal[idx].pre = 0x1 << 5;
2725 return;
2726 }
2727
2728 port_mmio = mv_port_base(mmio, idx);
2729 tmp = readl(port_mmio + PHY_MODE2);
2730
2731 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2732 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2733}
2734
Jeff Garzik47c2b672005-11-12 21:13:17 -05002735static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002736{
Mark Lord8e7decd2008-05-02 02:07:51 -04002737 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002738}
2739
Jeff Garzikc9d39132005-11-13 17:47:51 -05002740static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002741 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002742{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002743 void __iomem *port_mmio = mv_port_base(mmio, port);
2744
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002745 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002746 int fix_phy_mode2 =
2747 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002748 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002749 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Mark Lord8c30a8b2008-05-27 17:56:31 -04002750 u32 m2, m3;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002751
2752 if (fix_phy_mode2) {
2753 m2 = readl(port_mmio + PHY_MODE2);
2754 m2 &= ~(1 << 16);
2755 m2 |= (1 << 31);
2756 writel(m2, port_mmio + PHY_MODE2);
2757
2758 udelay(200);
2759
2760 m2 = readl(port_mmio + PHY_MODE2);
2761 m2 &= ~((1 << 16) | (1 << 31));
2762 writel(m2, port_mmio + PHY_MODE2);
2763
2764 udelay(200);
2765 }
2766
Mark Lord8c30a8b2008-05-27 17:56:31 -04002767 /*
2768 * Gen-II/IIe PHY_MODE3 errata RM#2:
2769 * Achieves better receiver noise performance than the h/w default:
2770 */
2771 m3 = readl(port_mmio + PHY_MODE3);
2772 m3 = (m3 & 0x1f) | (0x5555601 << 5);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002773
Mark Lord0388a8c2008-05-28 13:41:52 -04002774 /* Guideline 88F5182 (GL# SATA-S11) */
2775 if (IS_SOC(hpriv))
2776 m3 &= ~0x1c;
2777
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002778 if (fix_phy_mode4) {
Mark Lordba069e32008-05-31 16:46:34 -04002779 u32 m4 = readl(port_mmio + PHY_MODE4);
2780 /*
2781 * Enforce reserved-bit restrictions on GenIIe devices only.
2782 * For earlier chipsets, force only the internal config field
2783 * (workaround for errata FEr SATA#10 part 1).
2784 */
Mark Lord8c30a8b2008-05-27 17:56:31 -04002785 if (IS_GEN_IIE(hpriv))
Mark Lordba069e32008-05-31 16:46:34 -04002786 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
2787 else
2788 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
Mark Lord8c30a8b2008-05-27 17:56:31 -04002789 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002790 }
Mark Lordb406c7a2008-05-28 12:01:12 -04002791 /*
2792 * Workaround for 60x1-B2 errata SATA#13:
2793 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
2794 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
2795 */
2796 writel(m3, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002797
2798 /* Revert values of pre-emphasis and signal amps to the saved ones */
2799 m2 = readl(port_mmio + PHY_MODE2);
2800
2801 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002802 m2 |= hpriv->signal[port].amps;
2803 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002804 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002805
Jeff Garzike4e7b892006-01-31 12:18:41 -05002806 /* according to mvSata 3.6.1, some IIE values are fixed */
2807 if (IS_GEN_IIE(hpriv)) {
2808 m2 &= ~0xC30FF01F;
2809 m2 |= 0x0000900F;
2810 }
2811
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002812 writel(m2, port_mmio + PHY_MODE2);
2813}
2814
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002815/* TODO: use the generic LED interface to configure the SATA Presence */
2816/* & Acitivy LEDs on the board */
2817static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2818 void __iomem *mmio)
2819{
2820 return;
2821}
2822
2823static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2824 void __iomem *mmio)
2825{
2826 void __iomem *port_mmio;
2827 u32 tmp;
2828
2829 port_mmio = mv_port_base(mmio, idx);
2830 tmp = readl(port_mmio + PHY_MODE2);
2831
2832 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2833 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2834}
2835
2836#undef ZERO
2837#define ZERO(reg) writel(0, port_mmio + (reg))
2838static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2839 void __iomem *mmio, unsigned int port)
2840{
2841 void __iomem *port_mmio = mv_port_base(mmio, port);
2842
Mark Lorde12bef52008-03-31 19:33:56 -04002843 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002844
2845 ZERO(0x028); /* command */
2846 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2847 ZERO(0x004); /* timer */
2848 ZERO(0x008); /* irq err cause */
2849 ZERO(0x00c); /* irq err mask */
2850 ZERO(0x010); /* rq bah */
2851 ZERO(0x014); /* rq inp */
2852 ZERO(0x018); /* rq outp */
2853 ZERO(0x01c); /* respq bah */
2854 ZERO(0x024); /* respq outp */
2855 ZERO(0x020); /* respq inp */
2856 ZERO(0x02c); /* test control */
Mark Lord8e7decd2008-05-02 02:07:51 -04002857 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002858}
2859
2860#undef ZERO
2861
2862#define ZERO(reg) writel(0, hc_mmio + (reg))
2863static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2864 void __iomem *mmio)
2865{
2866 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2867
2868 ZERO(0x00c);
2869 ZERO(0x010);
2870 ZERO(0x014);
2871
2872}
2873
2874#undef ZERO
2875
2876static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2877 void __iomem *mmio, unsigned int n_hc)
2878{
2879 unsigned int port;
2880
2881 for (port = 0; port < hpriv->n_ports; port++)
2882 mv_soc_reset_hc_port(hpriv, mmio, port);
2883
2884 mv_soc_reset_one_hc(hpriv, mmio);
2885
2886 return 0;
2887}
2888
2889static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2890 void __iomem *mmio)
2891{
2892 return;
2893}
2894
2895static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2896{
2897 return;
2898}
2899
Mark Lord8e7decd2008-05-02 02:07:51 -04002900static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
Mark Lordb67a1062008-03-31 19:35:13 -04002901{
Mark Lord8e7decd2008-05-02 02:07:51 -04002902 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
Mark Lordb67a1062008-03-31 19:35:13 -04002903
Mark Lord8e7decd2008-05-02 02:07:51 -04002904 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
Mark Lordb67a1062008-03-31 19:35:13 -04002905 if (want_gen2i)
Mark Lord8e7decd2008-05-02 02:07:51 -04002906 ifcfg |= (1 << 7); /* enable gen2i speed */
2907 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
Mark Lordb67a1062008-03-31 19:35:13 -04002908}
2909
Mark Lorde12bef52008-03-31 19:33:56 -04002910static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002911 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002912{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002913 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002914
Mark Lord8e7decd2008-05-02 02:07:51 -04002915 /*
2916 * The datasheet warns against setting EDMA_RESET when EDMA is active
2917 * (but doesn't say what the problem might be). So we first try
2918 * to disable the EDMA engine before doing the EDMA_RESET operation.
2919 */
Mark Lord0d8be5c2008-04-16 14:56:12 -04002920 mv_stop_edma_engine(port_mmio);
Mark Lord8e7decd2008-05-02 02:07:51 -04002921 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002922
Mark Lordb67a1062008-03-31 19:35:13 -04002923 if (!IS_GEN_I(hpriv)) {
Mark Lord8e7decd2008-05-02 02:07:51 -04002924 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2925 mv_setup_ifcfg(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002926 }
Mark Lordb67a1062008-03-31 19:35:13 -04002927 /*
Mark Lord8e7decd2008-05-02 02:07:51 -04002928 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
Mark Lordb67a1062008-03-31 19:35:13 -04002929 * link, and physical layers. It resets all SATA interface registers
2930 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002931 */
Mark Lord8e7decd2008-05-02 02:07:51 -04002932 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
Mark Lordb67a1062008-03-31 19:35:13 -04002933 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002934 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002935
Jeff Garzikc9d39132005-11-13 17:47:51 -05002936 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2937
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002938 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002939 mdelay(1);
2940}
2941
Mark Lorde49856d2008-04-16 14:59:07 -04002942static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002943{
Mark Lorde49856d2008-04-16 14:59:07 -04002944 if (sata_pmp_supported(ap)) {
2945 void __iomem *port_mmio = mv_ap_base(ap);
2946 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2947 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002948
Mark Lorde49856d2008-04-16 14:59:07 -04002949 if (old != pmp) {
2950 reg = (reg & ~0xf) | pmp;
2951 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2952 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09002953 }
Brett Russ20f733e2005-09-01 18:26:17 -04002954}
2955
Mark Lorde49856d2008-04-16 14:59:07 -04002956static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2957 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002958{
Mark Lorde49856d2008-04-16 14:59:07 -04002959 mv_pmp_select(link->ap, sata_srst_pmp(link));
2960 return sata_std_hardreset(link, class, deadline);
2961}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002962
Mark Lorde49856d2008-04-16 14:59:07 -04002963static int mv_softreset(struct ata_link *link, unsigned int *class,
2964 unsigned long deadline)
2965{
2966 mv_pmp_select(link->ap, sata_srst_pmp(link));
2967 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05002968}
2969
Tejun Heocc0680a2007-08-06 18:36:23 +09002970static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002971 unsigned long deadline)
2972{
Tejun Heocc0680a2007-08-06 18:36:23 +09002973 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002974 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002975 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002976 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002977 int rc, attempts = 0, extra = 0;
2978 u32 sstatus;
2979 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002980
Mark Lorde12bef52008-03-31 19:33:56 -04002981 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002982 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002983
Mark Lord0d8be5c2008-04-16 14:56:12 -04002984 /* Workaround for errata FEr SATA#10 (part 2) */
2985 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002986 const unsigned long *timing =
2987 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002988
Mark Lord17c5aab2008-04-16 14:56:51 -04002989 rc = sata_link_hardreset(link, timing, deadline + extra,
2990 &online, NULL);
Mark Lord9dcffd92008-05-14 09:18:12 -04002991 rc = online ? -EAGAIN : rc;
Mark Lord17c5aab2008-04-16 14:56:51 -04002992 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002993 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002994 sata_scr_read(link, SCR_STATUS, &sstatus);
2995 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2996 /* Force 1.5gb/s link speed and try again */
Mark Lord8e7decd2008-05-02 02:07:51 -04002997 mv_setup_ifcfg(mv_ap_base(ap), 0);
Mark Lord0d8be5c2008-04-16 14:56:12 -04002998 if (time_after(jiffies + HZ, deadline))
2999 extra = HZ; /* only extend it once, max */
3000 }
3001 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Mark Lord66e57a22009-01-30 18:52:58 -05003002 mv_edma_cfg(ap, 0, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003003
Mark Lord17c5aab2008-04-16 14:56:51 -04003004 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003005}
3006
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003007static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04003008{
Mark Lord1cfd19a2008-04-19 15:05:50 -04003009 mv_stop_edma(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003010 mv_enable_port_irqs(ap, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003011}
3012
3013static void mv_eh_thaw(struct ata_port *ap)
3014{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003015 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordc4de5732008-05-17 13:35:21 -04003016 unsigned int port = ap->port_no;
3017 unsigned int hardport = mv_hardport_from_port(port);
Mark Lord1cfd19a2008-04-19 15:05:50 -04003018 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003019 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003020 u32 hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003021
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003022 /* clear EDMA errors on this port */
3023 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
3024
3025 /* clear pending irq events */
Mark Lordcae6edc2009-01-19 18:05:42 -05003026 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
Mark Lord1cfd19a2008-04-19 15:05:50 -04003027 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003028
Mark Lord88e675e2008-05-17 13:36:30 -04003029 mv_enable_port_irqs(ap, ERR_IRQ);
Brett Russ31961942005-09-30 01:36:00 -04003030}
3031
Brett Russ05b308e2005-10-05 17:08:53 -04003032/**
3033 * mv_port_init - Perform some early initialization on a single port.
3034 * @port: libata data structure storing shadow register addresses
3035 * @port_mmio: base address of the port
3036 *
3037 * Initialize shadow register mmio addresses, clear outstanding
3038 * interrupts on the port, and unmask interrupts for the future
3039 * start of the port.
3040 *
3041 * LOCKING:
3042 * Inherited from caller.
3043 */
Brett Russ31961942005-09-30 01:36:00 -04003044static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3045{
Tejun Heo0d5ff562007-02-01 15:06:36 +09003046 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04003047 unsigned serr_ofs;
3048
Jeff Garzik8b260242005-11-12 12:32:50 -05003049 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04003050 */
3051 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05003052 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04003053 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3054 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3055 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3056 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3057 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3058 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05003059 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04003060 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3061 /* special case: control/altstatus doesn't have ATA_REG_ address */
3062 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
3063
3064 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08003065 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04003066
Brett Russ31961942005-09-30 01:36:00 -04003067 /* Clear any currently outstanding port interrupt conditions */
3068 serr_ofs = mv_scr_offset(SCR_ERROR);
3069 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
3070 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
3071
Mark Lord646a4da2008-01-26 18:30:37 -05003072 /* unmask all non-transient EDMA error interrupts */
3073 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04003074
Jeff Garzik8b260242005-11-12 12:32:50 -05003075 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04003076 readl(port_mmio + EDMA_CFG_OFS),
3077 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
3078 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04003079}
3080
Mark Lord616d4a92008-05-02 02:08:32 -04003081static unsigned int mv_in_pcix_mode(struct ata_host *host)
3082{
3083 struct mv_host_priv *hpriv = host->private_data;
3084 void __iomem *mmio = hpriv->base;
3085 u32 reg;
3086
Mark Lord1f398472008-05-27 17:54:48 -04003087 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
Mark Lord616d4a92008-05-02 02:08:32 -04003088 return 0; /* not PCI-X capable */
3089 reg = readl(mmio + MV_PCI_MODE_OFS);
3090 if ((reg & MV_PCI_MODE_MASK) == 0)
3091 return 0; /* conventional PCI mode */
3092 return 1; /* chip is in PCI-X mode */
3093}
3094
3095static int mv_pci_cut_through_okay(struct ata_host *host)
3096{
3097 struct mv_host_priv *hpriv = host->private_data;
3098 void __iomem *mmio = hpriv->base;
3099 u32 reg;
3100
3101 if (!mv_in_pcix_mode(host)) {
3102 reg = readl(mmio + PCI_COMMAND_OFS);
3103 if (reg & PCI_COMMAND_MRDTRIG)
3104 return 0; /* not okay */
3105 }
3106 return 1; /* okay */
3107}
3108
Tejun Heo4447d352007-04-17 23:44:08 +09003109static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003110{
Tejun Heo4447d352007-04-17 23:44:08 +09003111 struct pci_dev *pdev = to_pci_dev(host->dev);
3112 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003113 u32 hp_flags = hpriv->hp_flags;
3114
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003115 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003116 case chip_5080:
3117 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003118 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003119
Auke Kok44c10132007-06-08 15:46:36 -07003120 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003121 case 0x1:
3122 hp_flags |= MV_HP_ERRATA_50XXB0;
3123 break;
3124 case 0x3:
3125 hp_flags |= MV_HP_ERRATA_50XXB2;
3126 break;
3127 default:
3128 dev_printk(KERN_WARNING, &pdev->dev,
3129 "Applying 50XXB2 workarounds to unknown rev\n");
3130 hp_flags |= MV_HP_ERRATA_50XXB2;
3131 break;
3132 }
3133 break;
3134
3135 case chip_504x:
3136 case chip_508x:
3137 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003138 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003139
Auke Kok44c10132007-06-08 15:46:36 -07003140 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003141 case 0x0:
3142 hp_flags |= MV_HP_ERRATA_50XXB0;
3143 break;
3144 case 0x3:
3145 hp_flags |= MV_HP_ERRATA_50XXB2;
3146 break;
3147 default:
3148 dev_printk(KERN_WARNING, &pdev->dev,
3149 "Applying B2 workarounds to unknown rev\n");
3150 hp_flags |= MV_HP_ERRATA_50XXB2;
3151 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003152 }
3153 break;
3154
3155 case chip_604x:
3156 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05003157 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003158 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003159
Auke Kok44c10132007-06-08 15:46:36 -07003160 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003161 case 0x7:
3162 hp_flags |= MV_HP_ERRATA_60X1B2;
3163 break;
3164 case 0x9:
3165 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003166 break;
3167 default:
3168 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05003169 "Applying B2 workarounds to unknown rev\n");
3170 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003171 break;
3172 }
3173 break;
3174
Jeff Garzike4e7b892006-01-31 12:18:41 -05003175 case chip_7042:
Mark Lord616d4a92008-05-02 02:08:32 -04003176 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
Mark Lord306b30f2007-12-04 14:07:52 -05003177 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3178 (pdev->device == 0x2300 || pdev->device == 0x2310))
3179 {
Mark Lord4e520032007-12-11 12:58:05 -05003180 /*
3181 * Highpoint RocketRAID PCIe 23xx series cards:
3182 *
3183 * Unconfigured drives are treated as "Legacy"
3184 * by the BIOS, and it overwrites sector 8 with
3185 * a "Lgcy" metadata block prior to Linux boot.
3186 *
3187 * Configured drives (RAID or JBOD) leave sector 8
3188 * alone, but instead overwrite a high numbered
3189 * sector for the RAID metadata. This sector can
3190 * be determined exactly, by truncating the physical
3191 * drive capacity to a nice even GB value.
3192 *
3193 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3194 *
3195 * Warn the user, lest they think we're just buggy.
3196 */
3197 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3198 " BIOS CORRUPTS DATA on all attached drives,"
3199 " regardless of if/how they are configured."
3200 " BEWARE!\n");
3201 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3202 " use sectors 8-9 on \"Legacy\" drives,"
3203 " and avoid the final two gigabytes on"
3204 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05003205 }
Mark Lord8e7decd2008-05-02 02:07:51 -04003206 /* drop through */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003207 case chip_6042:
3208 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003209 hp_flags |= MV_HP_GEN_IIE;
Mark Lord616d4a92008-05-02 02:08:32 -04003210 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3211 hp_flags |= MV_HP_CUT_THROUGH;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003212
Auke Kok44c10132007-06-08 15:46:36 -07003213 switch (pdev->revision) {
Mark Lord5cf73bf2008-05-27 17:58:56 -04003214 case 0x2: /* Rev.B0: the first/only public release */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003215 hp_flags |= MV_HP_ERRATA_60X1C0;
3216 break;
3217 default:
3218 dev_printk(KERN_WARNING, &pdev->dev,
3219 "Applying 60X1C0 workarounds to unknown rev\n");
3220 hp_flags |= MV_HP_ERRATA_60X1C0;
3221 break;
3222 }
3223 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003224 case chip_soc:
3225 hpriv->ops = &mv_soc_ops;
Saeed Bisharaeb3a55a2008-08-04 00:52:55 -11003226 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3227 MV_HP_ERRATA_60X1C0;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003228 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003229
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003230 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003231 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003232 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003233 return 1;
3234 }
3235
3236 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05003237 if (hp_flags & MV_HP_PCIE) {
3238 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
3239 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
3240 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3241 } else {
3242 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
3243 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
3244 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3245 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003246
3247 return 0;
3248}
3249
Brett Russ05b308e2005-10-05 17:08:53 -04003250/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05003251 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09003252 * @host: ATA host to initialize
3253 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04003254 *
3255 * If possible, do an early global reset of the host. Then do
3256 * our port init and clear/unmask all/relevant host interrupts.
3257 *
3258 * LOCKING:
3259 * Inherited from caller.
3260 */
Tejun Heo4447d352007-04-17 23:44:08 +09003261static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04003262{
3263 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09003264 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003265 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003266
Tejun Heo4447d352007-04-17 23:44:08 +09003267 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003268 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04003269 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003270
Mark Lord1f398472008-05-27 17:54:48 -04003271 if (IS_SOC(hpriv)) {
Mark Lord7368f912008-04-25 11:24:24 -04003272 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3273 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
Mark Lord1f398472008-05-27 17:54:48 -04003274 } else {
3275 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3276 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003277 }
Mark Lord352fab72008-04-19 14:43:42 -04003278
Thomas Reitmayr5d0fb2e2009-01-24 20:24:58 +01003279 /* initialize shadow irq mask with register's value */
3280 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3281
Mark Lord352fab72008-04-19 14:43:42 -04003282 /* global interrupt mask: 0 == mask everything */
Mark Lordc4de5732008-05-17 13:35:21 -04003283 mv_set_main_irq_mask(host, ~0, 0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003284
Tejun Heo4447d352007-04-17 23:44:08 +09003285 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003286
Tejun Heo4447d352007-04-17 23:44:08 +09003287 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05003288 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003289
Jeff Garzikc9d39132005-11-13 17:47:51 -05003290 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003291 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003292 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04003293
Jeff Garzik522479f2005-11-12 22:14:02 -05003294 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003295 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003296 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003297
Tejun Heo4447d352007-04-17 23:44:08 +09003298 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09003299 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003300 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09003301
3302 mv_port_init(&ap->ioaddr, port_mmio);
3303
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003304#ifdef CONFIG_PCI
Mark Lord1f398472008-05-27 17:54:48 -04003305 if (!IS_SOC(hpriv)) {
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003306 unsigned int offset = port_mmio - mmio;
3307 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3308 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3309 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003310#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003311 }
3312
3313 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04003314 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3315
3316 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3317 "(before clear)=0x%08x\n", hc,
3318 readl(hc_mmio + HC_CFG_OFS),
3319 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
3320
3321 /* Clear any currently outstanding hc interrupt conditions */
3322 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04003323 }
3324
Mark Lord6be96ac2009-02-19 10:38:04 -05003325 /* Clear any currently outstanding host interrupt conditions */
3326 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04003327
Mark Lord6be96ac2009-02-19 10:38:04 -05003328 /* and unmask interrupt generation for host regs */
3329 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05003330
Mark Lord6be96ac2009-02-19 10:38:04 -05003331 /*
3332 * enable only global host interrupts for now.
3333 * The per-port interrupts get done later as ports are set up.
3334 */
3335 mv_set_main_irq_mask(host, 0, PCI_ERR);
Brett Russ31961942005-09-30 01:36:00 -04003336done:
Brett Russ20f733e2005-09-01 18:26:17 -04003337 return rc;
3338}
3339
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003340static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3341{
3342 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3343 MV_CRQB_Q_SZ, 0);
3344 if (!hpriv->crqb_pool)
3345 return -ENOMEM;
3346
3347 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3348 MV_CRPB_Q_SZ, 0);
3349 if (!hpriv->crpb_pool)
3350 return -ENOMEM;
3351
3352 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3353 MV_SG_TBL_SZ, 0);
3354 if (!hpriv->sg_tbl_pool)
3355 return -ENOMEM;
3356
3357 return 0;
3358}
3359
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003360static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3361 struct mbus_dram_target_info *dram)
3362{
3363 int i;
3364
3365 for (i = 0; i < 4; i++) {
3366 writel(0, hpriv->base + WINDOW_CTRL(i));
3367 writel(0, hpriv->base + WINDOW_BASE(i));
3368 }
3369
3370 for (i = 0; i < dram->num_cs; i++) {
3371 struct mbus_dram_window *cs = dram->cs + i;
3372
3373 writel(((cs->size - 1) & 0xffff0000) |
3374 (cs->mbus_attr << 8) |
3375 (dram->mbus_dram_target_id << 4) | 1,
3376 hpriv->base + WINDOW_CTRL(i));
3377 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3378 }
3379}
3380
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003381/**
3382 * mv_platform_probe - handle a positive probe of an soc Marvell
3383 * host
3384 * @pdev: platform device found
3385 *
3386 * LOCKING:
3387 * Inherited from caller.
3388 */
3389static int mv_platform_probe(struct platform_device *pdev)
3390{
3391 static int printed_version;
3392 const struct mv_sata_platform_data *mv_platform_data;
3393 const struct ata_port_info *ppi[] =
3394 { &mv_port_info[chip_soc], NULL };
3395 struct ata_host *host;
3396 struct mv_host_priv *hpriv;
3397 struct resource *res;
3398 int n_ports, rc;
3399
3400 if (!printed_version++)
3401 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3402
3403 /*
3404 * Simple resource validation ..
3405 */
3406 if (unlikely(pdev->num_resources != 2)) {
3407 dev_err(&pdev->dev, "invalid number of resources\n");
3408 return -EINVAL;
3409 }
3410
3411 /*
3412 * Get the register base first
3413 */
3414 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3415 if (res == NULL)
3416 return -EINVAL;
3417
3418 /* allocate host */
3419 mv_platform_data = pdev->dev.platform_data;
3420 n_ports = mv_platform_data->n_ports;
3421
3422 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3423 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3424
3425 if (!host || !hpriv)
3426 return -ENOMEM;
3427 host->private_data = hpriv;
3428 hpriv->n_ports = n_ports;
3429
3430 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11003431 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3432 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003433 hpriv->base -= MV_SATAHC0_REG_BASE;
3434
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003435 /*
3436 * (Re-)program MBUS remapping windows if we are asked to.
3437 */
3438 if (mv_platform_data->dram != NULL)
3439 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3440
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003441 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3442 if (rc)
3443 return rc;
3444
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003445 /* initialize adapter */
3446 rc = mv_init_host(host, chip_soc);
3447 if (rc)
3448 return rc;
3449
3450 dev_printk(KERN_INFO, &pdev->dev,
3451 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3452 host->n_ports);
3453
3454 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3455 IRQF_SHARED, &mv6_sht);
3456}
3457
3458/*
3459 *
3460 * mv_platform_remove - unplug a platform interface
3461 * @pdev: platform device
3462 *
3463 * A platform bus SATA device has been unplugged. Perform the needed
3464 * cleanup. Also called on module unload for any active devices.
3465 */
3466static int __devexit mv_platform_remove(struct platform_device *pdev)
3467{
3468 struct device *dev = &pdev->dev;
3469 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003470
3471 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003472 return 0;
3473}
3474
3475static struct platform_driver mv_platform_driver = {
3476 .probe = mv_platform_probe,
3477 .remove = __devexit_p(mv_platform_remove),
3478 .driver = {
3479 .name = DRV_NAME,
3480 .owner = THIS_MODULE,
3481 },
3482};
3483
3484
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003485#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003486static int mv_pci_init_one(struct pci_dev *pdev,
3487 const struct pci_device_id *ent);
3488
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003489
3490static struct pci_driver mv_pci_driver = {
3491 .name = DRV_NAME,
3492 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003493 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003494 .remove = ata_pci_remove_one,
3495};
3496
3497/*
3498 * module options
3499 */
3500static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3501
3502
3503/* move to PCI layer or libata core? */
3504static int pci_go_64(struct pci_dev *pdev)
3505{
3506 int rc;
3507
3508 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3509 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3510 if (rc) {
3511 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3512 if (rc) {
3513 dev_printk(KERN_ERR, &pdev->dev,
3514 "64-bit DMA enable failed\n");
3515 return rc;
3516 }
3517 }
3518 } else {
3519 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3520 if (rc) {
3521 dev_printk(KERN_ERR, &pdev->dev,
3522 "32-bit DMA enable failed\n");
3523 return rc;
3524 }
3525 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3526 if (rc) {
3527 dev_printk(KERN_ERR, &pdev->dev,
3528 "32-bit consistent DMA enable failed\n");
3529 return rc;
3530 }
3531 }
3532
3533 return rc;
3534}
3535
Brett Russ05b308e2005-10-05 17:08:53 -04003536/**
3537 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003538 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003539 *
3540 * FIXME: complete this.
3541 *
3542 * LOCKING:
3543 * Inherited from caller.
3544 */
Tejun Heo4447d352007-04-17 23:44:08 +09003545static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003546{
Tejun Heo4447d352007-04-17 23:44:08 +09003547 struct pci_dev *pdev = to_pci_dev(host->dev);
3548 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003549 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003550 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003551
3552 /* Use this to determine the HW stepping of the chip so we know
3553 * what errata to workaround
3554 */
Brett Russ31961942005-09-30 01:36:00 -04003555 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3556 if (scc == 0)
3557 scc_s = "SCSI";
3558 else if (scc == 0x01)
3559 scc_s = "RAID";
3560 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003561 scc_s = "?";
3562
3563 if (IS_GEN_I(hpriv))
3564 gen = "I";
3565 else if (IS_GEN_II(hpriv))
3566 gen = "II";
3567 else if (IS_GEN_IIE(hpriv))
3568 gen = "IIE";
3569 else
3570 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04003571
Jeff Garzika9524a72005-10-30 14:39:11 -05003572 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003573 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3574 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003575 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3576}
3577
Brett Russ05b308e2005-10-05 17:08:53 -04003578/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003579 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003580 * @pdev: PCI device found
3581 * @ent: PCI device ID entry for the matched host
3582 *
3583 * LOCKING:
3584 * Inherited from caller.
3585 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003586static int mv_pci_init_one(struct pci_dev *pdev,
3587 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003588{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003589 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003590 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003591 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3592 struct ata_host *host;
3593 struct mv_host_priv *hpriv;
3594 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003595
Jeff Garzika9524a72005-10-30 14:39:11 -05003596 if (!printed_version++)
3597 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003598
Tejun Heo4447d352007-04-17 23:44:08 +09003599 /* allocate host */
3600 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3601
3602 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3603 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3604 if (!host || !hpriv)
3605 return -ENOMEM;
3606 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003607 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003608
3609 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003610 rc = pcim_enable_device(pdev);
3611 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003612 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003613
Tejun Heo0d5ff562007-02-01 15:06:36 +09003614 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3615 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003616 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003617 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003618 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003619 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003620 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003621
Jeff Garzikd88184f2007-02-26 01:26:06 -05003622 rc = pci_go_64(pdev);
3623 if (rc)
3624 return rc;
3625
Mark Lordda2fa9b2008-01-26 18:32:45 -05003626 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3627 if (rc)
3628 return rc;
3629
Brett Russ20f733e2005-09-01 18:26:17 -04003630 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003631 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003632 if (rc)
3633 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003634
Mark Lord6d3c30e2009-01-21 10:31:29 -05003635 /* Enable message-switched interrupts, if requested */
3636 if (msi && pci_enable_msi(pdev) == 0)
3637 hpriv->hp_flags |= MV_HP_FLAG_MSI;
Brett Russ20f733e2005-09-01 18:26:17 -04003638
Brett Russ31961942005-09-30 01:36:00 -04003639 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003640 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003641
Tejun Heo4447d352007-04-17 23:44:08 +09003642 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003643 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003644 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003645 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003646}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003647#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003648
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003649static int mv_platform_probe(struct platform_device *pdev);
3650static int __devexit mv_platform_remove(struct platform_device *pdev);
3651
Brett Russ20f733e2005-09-01 18:26:17 -04003652static int __init mv_init(void)
3653{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003654 int rc = -ENODEV;
3655#ifdef CONFIG_PCI
3656 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003657 if (rc < 0)
3658 return rc;
3659#endif
3660 rc = platform_driver_register(&mv_platform_driver);
3661
3662#ifdef CONFIG_PCI
3663 if (rc < 0)
3664 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003665#endif
3666 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003667}
3668
3669static void __exit mv_exit(void)
3670{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003671#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003672 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003673#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003674 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003675}
3676
3677MODULE_AUTHOR("Brett Russ");
3678MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3679MODULE_LICENSE("GPL");
3680MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3681MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003682MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003683
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003684#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003685module_param(msi, int, 0444);
3686MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003687#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003688
Brett Russ20f733e2005-09-01 18:26:17 -04003689module_init(mv_init);
3690module_exit(mv_exit);