blob: 162260d6fe197357161cc99378891dbd460d3bf9 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
43 6) Add port multiplier support (intermediate)
44
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
46
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050080#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040081#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040082#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083
84#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050085#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040086
87enum {
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95
96 MV_PCI_REG_BASE = 0,
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040098 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
103
Brett Russ20f733e2005-09-01 18:26:17 -0400104 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500105 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400108
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113
Brett Russ31961942005-09-30 01:36:00 -0400114 MV_MAX_Q_DEPTH = 32,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500123 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400125
Brett Russ20f733e2005-09-01 18:26:17 -0400126 MV_PORTS_PER_HC = 4,
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400130 MV_PORT_MASK = 3,
131
132 /* Host Flags */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400136 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100137
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400142
Brett Russ31961942005-09-30 01:36:00 -0400143 CRQB_FLAG_READ = (1 << 0),
144 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
151
152 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400155
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
157
Brett Russ20f733e2005-09-01 18:26:17 -0400158 /* PCI interface registers */
159
Brett Russ31961942005-09-30 01:36:00 -0400160 PCI_COMMAND_OFS = 0xc00,
161
Brett Russ20f733e2005-09-01 18:26:17 -0400162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
166
Jeff Garzik522479f2005-11-12 22:14:02 -0500167 MV_PCI_MODE = 0xd00,
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
177
Mark Lord02a121d2007-12-01 13:07:22 -0500178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
181
Mark Lord02a121d2007-12-01 13:07:22 -0500182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500185
Brett Russ20f733e2005-09-01 18:26:17 -0400186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
194 PCI_ERR = (1 << 18),
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
208 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
210 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400212
213 /* SATAHC registers */
214 HC_CFG_OFS = 0,
215
216 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
220
221 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400222 SHD_BLK_OFS = 0x100,
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400224
225 /* SATA registers */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400229
Mark Lorde12bef52008-03-31 19:33:56 -0400230 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
232
Jeff Garzik47c2b672005-11-12 21:13:17 -0500233 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500234 PHY_MODE4 = 0x314,
235 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400239
Mark Lorde12bef52008-03-31 19:33:56 -0400240 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
242
Jeff Garzikc9d39132005-11-13 17:47:51 -0500243 MV5_PHY_MODE = 0x74,
244 MV5_LT_MODE = 0x30,
245 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400246 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500247
248 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400249
250 /* Port registers */
251 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400259
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
282
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
291
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500297
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
306 EDMA_ERR_PRD_PAR |
307 EDMA_ERR_DEV_DCON |
308 EDMA_ERR_DEV_CON |
309 EDMA_ERR_SERR |
310 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400311 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400312 EDMA_ERR_CRPB_PAR |
313 EDMA_ERR_INTRL_PAR |
314 EDMA_ERR_IORDY |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400319
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
321 EDMA_ERR_PRD_PAR |
322 EDMA_ERR_DEV_DCON |
323 EDMA_ERR_DEV_CON |
324 EDMA_ERR_OVERRUN_5 |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400327 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400328 EDMA_ERR_CRPB_PAR |
329 EDMA_ERR_INTRL_PAR |
330 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400334
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
337
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400341 EDMA_RSP_Q_PTR_SHIFT = 3,
342
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400347
Jeff Garzikc9d39132005-11-13 17:47:51 -0500348 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500349 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500350
Brett Russ31961942005-09-30 01:36:00 -0400351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500357 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400362
Brett Russ31961942005-09-30 01:36:00 -0400363 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400366};
367
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400368#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500370#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100371#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500372
Jeff Garzik095fec82005-11-12 09:50:49 -0500373enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
376 */
377 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500378
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
381 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
383
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400384 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
386};
387
Jeff Garzik522479f2005-11-12 22:14:02 -0500388enum chip_type {
389 chip_504x,
390 chip_508x,
391 chip_5080,
392 chip_604x,
393 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500394 chip_6042,
395 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500396 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500397};
398
Brett Russ31961942005-09-30 01:36:00 -0400399/* Command ReQuest Block: 32B */
400struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400401 __le32 sg_addr;
402 __le32 sg_addr_hi;
403 __le16 ctrl_flags;
404 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400405};
406
Jeff Garzike4e7b892006-01-31 12:18:41 -0500407struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 addr;
409 __le32 addr_hi;
410 __le32 flags;
411 __le32 len;
412 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500413};
414
Brett Russ31961942005-09-30 01:36:00 -0400415/* Command ResPonse Block: 8B */
416struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400417 __le16 id;
418 __le16 flags;
419 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400420};
421
422/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
423struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le32 addr;
425 __le32 flags_size;
426 __le32 addr_hi;
427 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400428};
429
430struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400431 struct mv_crqb *crqb;
432 dma_addr_t crqb_dma;
433 struct mv_crpb *crpb;
434 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400437
438 unsigned int req_idx;
439 unsigned int resp_idx;
440
Brett Russ31961942005-09-30 01:36:00 -0400441 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400442};
443
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500444struct mv_port_signal {
445 u32 amps;
446 u32 pre;
447};
448
Mark Lord02a121d2007-12-01 13:07:22 -0500449struct mv_host_priv {
450 u32 hp_flags;
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500453 int n_ports;
454 void __iomem *base;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500457 u32 irq_cause_ofs;
458 u32 irq_mask_ofs;
459 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500460 /*
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
464 */
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500468};
469
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480};
481
Tejun Heoda3dbb12007-07-16 14:29:40 +0900482static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400486static int mv_port_start(struct ata_port *ap);
487static void mv_port_stop(struct ata_port *ap);
488static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500489static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900490static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900491static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400493static void mv_eh_freeze(struct ata_port *ap);
494static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500495static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400496
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500497static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
498 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500499static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
501 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500502static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
503 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500504static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500507static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500509static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
511 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500512static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
513 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500514static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500515static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
516 void __iomem *mmio);
517static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
519static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
522 void __iomem *mmio);
523static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100524static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400525static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400527static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400528static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400529static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500530
Mark Lordeb73d552008-01-29 13:24:00 -0500531/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
532 * because we have to allow room for worst case splitting of
533 * PRDs for 64K boundaries in mv_fill_sg().
534 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400535static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900536 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400537 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400538 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400539};
540
541static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900542 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500543 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400544 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400545 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400546};
547
Tejun Heo029cfd62008-03-25 12:22:49 +0900548static struct ata_port_operations mv5_ops = {
549 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500550
Jeff Garzikc9d39132005-11-13 17:47:51 -0500551 .qc_prep = mv_qc_prep,
552 .qc_issue = mv_qc_issue,
553
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400554 .freeze = mv_eh_freeze,
555 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900556 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900557 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900558 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400559
Jeff Garzikc9d39132005-11-13 17:47:51 -0500560 .scr_read = mv5_scr_read,
561 .scr_write = mv5_scr_write,
562
563 .port_start = mv_port_start,
564 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500565};
566
Tejun Heo029cfd62008-03-25 12:22:49 +0900567static struct ata_port_operations mv6_ops = {
568 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500569 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900570 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400573};
574
Tejun Heo029cfd62008-03-25 12:22:49 +0900575static struct ata_port_operations mv_iie_ops = {
576 .inherits = &mv6_ops,
577 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500578 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579};
580
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100581static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400582 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400583 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500586 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400587 },
588 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500592 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400593 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500594 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400595 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500599 },
Brett Russ20f733e2005-09-01 18:26:17 -0400600 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500605 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400606 },
607 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500609 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400610 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400611 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500612 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400613 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500614 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500615 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
616 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500617 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400618 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500619 .port_ops = &mv_iie_ops,
620 },
621 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500622 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
623 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500624 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400625 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500626 .port_ops = &mv_iie_ops,
627 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500628 { /* chip_soc */
Mark Lord17c5aab2008-04-16 14:56:51 -0400629 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
630 .pio_mask = 0x1f, /* pio0-4 */
631 .udma_mask = ATA_UDMA6,
632 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500633 },
Brett Russ20f733e2005-09-01 18:26:17 -0400634};
635
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500636static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400637 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
638 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
639 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
640 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100641 /* RocketRAID 1740/174x have different identifiers */
642 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
643 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400644
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400645 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
646 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
647 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
648 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
649 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500650
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400651 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
652
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200653 /* Adaptec 1430SA */
654 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
655
Mark Lord02a121d2007-12-01 13:07:22 -0500656 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800657 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
658
Mark Lord02a121d2007-12-01 13:07:22 -0500659 /* Highpoint RocketRAID PCIe series */
660 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
661 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
662
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400663 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400664};
665
Jeff Garzik47c2b672005-11-12 21:13:17 -0500666static const struct mv_hw_ops mv5xxx_ops = {
667 .phy_errata = mv5_phy_errata,
668 .enable_leds = mv5_enable_leds,
669 .read_preamp = mv5_read_preamp,
670 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500671 .reset_flash = mv5_reset_flash,
672 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500673};
674
675static const struct mv_hw_ops mv6xxx_ops = {
676 .phy_errata = mv6_phy_errata,
677 .enable_leds = mv6_enable_leds,
678 .read_preamp = mv6_read_preamp,
679 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500680 .reset_flash = mv6_reset_flash,
681 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500682};
683
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500684static const struct mv_hw_ops mv_soc_ops = {
685 .phy_errata = mv6_phy_errata,
686 .enable_leds = mv_soc_enable_leds,
687 .read_preamp = mv_soc_read_preamp,
688 .reset_hc = mv_soc_reset_hc,
689 .reset_flash = mv_soc_reset_flash,
690 .reset_bus = mv_soc_reset_bus,
691};
692
Brett Russ20f733e2005-09-01 18:26:17 -0400693/*
694 * Functions
695 */
696
697static inline void writelfl(unsigned long data, void __iomem *addr)
698{
699 writel(data, addr);
700 (void) readl(addr); /* flush to avoid PCI posted write */
701}
702
Brett Russ20f733e2005-09-01 18:26:17 -0400703static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
704{
705 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
706}
707
Jeff Garzikc9d39132005-11-13 17:47:51 -0500708static inline unsigned int mv_hc_from_port(unsigned int port)
709{
710 return port >> MV_PORT_HC_SHIFT;
711}
712
713static inline unsigned int mv_hardport_from_port(unsigned int port)
714{
715 return port & MV_PORT_MASK;
716}
717
718static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
719 unsigned int port)
720{
721 return mv_hc_base(base, mv_hc_from_port(port));
722}
723
Brett Russ20f733e2005-09-01 18:26:17 -0400724static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
725{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500726 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500727 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500728 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400729}
730
Mark Lorde12bef52008-03-31 19:33:56 -0400731static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
732{
733 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
734 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
735
736 return hc_mmio + ofs;
737}
738
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500739static inline void __iomem *mv_host_base(struct ata_host *host)
740{
741 struct mv_host_priv *hpriv = host->private_data;
742 return hpriv->base;
743}
744
Brett Russ20f733e2005-09-01 18:26:17 -0400745static inline void __iomem *mv_ap_base(struct ata_port *ap)
746{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500747 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400748}
749
Jeff Garzikcca39742006-08-24 03:19:22 -0400750static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400751{
Jeff Garzikcca39742006-08-24 03:19:22 -0400752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400753}
754
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400755static void mv_set_edma_ptrs(void __iomem *port_mmio,
756 struct mv_host_priv *hpriv,
757 struct mv_port_priv *pp)
758{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400759 u32 index;
760
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400761 /*
762 * initialize request queue
763 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400764 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
765
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400766 WARN_ON(pp->crqb_dma & 0x3ff);
767 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400768 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400769 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
770
771 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400772 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400773 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
774 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400775 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400776
777 /*
778 * initialize response queue
779 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400780 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
781
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400782 WARN_ON(pp->crpb_dma & 0xff);
783 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
784
785 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400786 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
788 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400789 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400790
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400791 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400792 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793}
794
Brett Russ05b308e2005-10-05 17:08:53 -0400795/**
796 * mv_start_dma - Enable eDMA engine
797 * @base: port base address
798 * @pp: port private data
799 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900800 * Verify the local cache of the eDMA state is accurate with a
801 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400802 *
803 * LOCKING:
804 * Inherited from caller.
805 */
Mark Lord0c589122008-01-26 18:31:16 -0500806static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500807 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400808{
Mark Lord72109162008-01-26 18:31:33 -0500809 int want_ncq = (protocol == ATA_PROT_NCQ);
810
811 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
812 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
813 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400814 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500815 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400816 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500817 struct mv_host_priv *hpriv = ap->host->private_data;
818 int hard_port = mv_hardport_from_port(ap->port_no);
819 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100820 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500821 u32 hc_irq_cause, ipending;
822
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400823 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500824 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825
Mark Lord0c589122008-01-26 18:31:16 -0500826 /* clear EDMA interrupt indicator, if any */
827 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
828 ipending = (DEV_IRQ << hard_port) |
829 (CRPB_DMA_DONE << hard_port);
830 if (hc_irq_cause & ipending) {
831 writelfl(hc_irq_cause & ~ipending,
832 hc_mmio + HC_IRQ_CAUSE_OFS);
833 }
834
Mark Lorde12bef52008-03-31 19:33:56 -0400835 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500836
837 /* clear FIS IRQ Cause */
838 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
839
Mark Lordf630d562008-01-26 18:31:00 -0500840 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400841
Mark Lordf630d562008-01-26 18:31:00 -0500842 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400843 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
844 }
Mark Lordf630d562008-01-26 18:31:00 -0500845 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400846}
847
Brett Russ05b308e2005-10-05 17:08:53 -0400848/**
Mark Lorde12bef52008-03-31 19:33:56 -0400849 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400850 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400851 *
852 * LOCKING:
853 * Inherited from caller.
854 */
Mark Lordb5624682008-03-31 19:34:40 -0400855static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400856{
Mark Lordb5624682008-03-31 19:34:40 -0400857 int i;
Brett Russ31961942005-09-30 01:36:00 -0400858
Mark Lordb5624682008-03-31 19:34:40 -0400859 /* Disable eDMA. The disable bit auto clears. */
860 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500861
Mark Lordb5624682008-03-31 19:34:40 -0400862 /* Wait for the chip to confirm eDMA is off. */
863 for (i = 10000; i > 0; i--) {
864 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400865 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400866 return 0;
867 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400868 }
Mark Lordb5624682008-03-31 19:34:40 -0400869 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400870}
871
Mark Lorde12bef52008-03-31 19:33:56 -0400872static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400873{
Mark Lordb5624682008-03-31 19:34:40 -0400874 void __iomem *port_mmio = mv_ap_base(ap);
875 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400876
Mark Lordb5624682008-03-31 19:34:40 -0400877 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
878 return 0;
879 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
880 if (mv_stop_edma_engine(port_mmio)) {
881 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
882 return -EIO;
883 }
884 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400885}
886
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400887#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400888static void mv_dump_mem(void __iomem *start, unsigned bytes)
889{
Brett Russ31961942005-09-30 01:36:00 -0400890 int b, w;
891 for (b = 0; b < bytes; ) {
892 DPRINTK("%p: ", start + b);
893 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400894 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400895 b += sizeof(u32);
896 }
897 printk("\n");
898 }
Brett Russ31961942005-09-30 01:36:00 -0400899}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400900#endif
901
Brett Russ31961942005-09-30 01:36:00 -0400902static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
903{
904#ifdef ATA_DEBUG
905 int b, w;
906 u32 dw;
907 for (b = 0; b < bytes; ) {
908 DPRINTK("%02x: ", b);
909 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400910 (void) pci_read_config_dword(pdev, b, &dw);
911 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400912 b += sizeof(u32);
913 }
914 printk("\n");
915 }
916#endif
917}
918static void mv_dump_all_regs(void __iomem *mmio_base, int port,
919 struct pci_dev *pdev)
920{
921#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500922 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400923 port >> MV_PORT_HC_SHIFT);
924 void __iomem *port_base;
925 int start_port, num_ports, p, start_hc, num_hcs, hc;
926
927 if (0 > port) {
928 start_hc = start_port = 0;
929 num_ports = 8; /* shld be benign for 4 port devs */
930 num_hcs = 2;
931 } else {
932 start_hc = port >> MV_PORT_HC_SHIFT;
933 start_port = port;
934 num_ports = num_hcs = 1;
935 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500936 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400937 num_ports > 1 ? num_ports - 1 : start_port);
938
939 if (NULL != pdev) {
940 DPRINTK("PCI config space regs:\n");
941 mv_dump_pci_cfg(pdev, 0x68);
942 }
943 DPRINTK("PCI regs:\n");
944 mv_dump_mem(mmio_base+0xc00, 0x3c);
945 mv_dump_mem(mmio_base+0xd00, 0x34);
946 mv_dump_mem(mmio_base+0xf00, 0x4);
947 mv_dump_mem(mmio_base+0x1d00, 0x6c);
948 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700949 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400950 DPRINTK("HC regs (HC %i):\n", hc);
951 mv_dump_mem(hc_base, 0x1c);
952 }
953 for (p = start_port; p < start_port + num_ports; p++) {
954 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400955 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400956 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400957 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400958 mv_dump_mem(port_base+0x300, 0x60);
959 }
960#endif
961}
962
Brett Russ20f733e2005-09-01 18:26:17 -0400963static unsigned int mv_scr_offset(unsigned int sc_reg_in)
964{
965 unsigned int ofs;
966
967 switch (sc_reg_in) {
968 case SCR_STATUS:
969 case SCR_CONTROL:
970 case SCR_ERROR:
971 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
972 break;
973 case SCR_ACTIVE:
974 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
975 break;
976 default:
977 ofs = 0xffffffffU;
978 break;
979 }
980 return ofs;
981}
982
Tejun Heoda3dbb12007-07-16 14:29:40 +0900983static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400984{
985 unsigned int ofs = mv_scr_offset(sc_reg_in);
986
Tejun Heoda3dbb12007-07-16 14:29:40 +0900987 if (ofs != 0xffffffffU) {
988 *val = readl(mv_ap_base(ap) + ofs);
989 return 0;
990 } else
991 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400992}
993
Tejun Heoda3dbb12007-07-16 14:29:40 +0900994static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400995{
996 unsigned int ofs = mv_scr_offset(sc_reg_in);
997
Tejun Heoda3dbb12007-07-16 14:29:40 +0900998 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400999 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001000 return 0;
1001 } else
1002 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001003}
1004
Mark Lordf2738272008-01-26 18:32:29 -05001005static void mv6_dev_config(struct ata_device *adev)
1006{
1007 /*
1008 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1009 * See mv_qc_prep() for more info.
1010 */
1011 if (adev->flags & ATA_DFLAG_NCQ)
1012 if (adev->max_sectors > ATA_MAX_SECTORS)
1013 adev->max_sectors = ATA_MAX_SECTORS;
1014}
1015
Mark Lorde12bef52008-03-31 19:33:56 -04001016static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001017{
Mark Lord0c589122008-01-26 18:31:16 -05001018 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001019 struct mv_port_priv *pp = ap->private_data;
1020 struct mv_host_priv *hpriv = ap->host->private_data;
1021 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001022
1023 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001024 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001025
Mark Lord0c589122008-01-26 18:31:16 -05001026 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001027 cfg |= (1 << 8); /* enab config burst size mask */
1028
Mark Lord0c589122008-01-26 18:31:16 -05001029 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001030 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1031
1032 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001033 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1034 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001035 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001036 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001037 }
1038
Mark Lord72109162008-01-26 18:31:33 -05001039 if (want_ncq) {
1040 cfg |= EDMA_CFG_NCQ;
1041 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1042 } else
1043 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1044
Jeff Garzike4e7b892006-01-31 12:18:41 -05001045 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1046}
1047
Mark Lordda2fa9b2008-01-26 18:32:45 -05001048static void mv_port_free_dma_mem(struct ata_port *ap)
1049{
1050 struct mv_host_priv *hpriv = ap->host->private_data;
1051 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001052 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001053
1054 if (pp->crqb) {
1055 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1056 pp->crqb = NULL;
1057 }
1058 if (pp->crpb) {
1059 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1060 pp->crpb = NULL;
1061 }
Mark Lordeb73d552008-01-29 13:24:00 -05001062 /*
1063 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1064 * For later hardware, we have one unique sg_tbl per NCQ tag.
1065 */
1066 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1067 if (pp->sg_tbl[tag]) {
1068 if (tag == 0 || !IS_GEN_I(hpriv))
1069 dma_pool_free(hpriv->sg_tbl_pool,
1070 pp->sg_tbl[tag],
1071 pp->sg_tbl_dma[tag]);
1072 pp->sg_tbl[tag] = NULL;
1073 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001074 }
1075}
1076
Brett Russ05b308e2005-10-05 17:08:53 -04001077/**
1078 * mv_port_start - Port specific init/start routine.
1079 * @ap: ATA channel to manipulate
1080 *
1081 * Allocate and point to DMA memory, init port private memory,
1082 * zero indices.
1083 *
1084 * LOCKING:
1085 * Inherited from caller.
1086 */
Brett Russ31961942005-09-30 01:36:00 -04001087static int mv_port_start(struct ata_port *ap)
1088{
Jeff Garzikcca39742006-08-24 03:19:22 -04001089 struct device *dev = ap->host->dev;
1090 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001091 struct mv_port_priv *pp;
1092 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001093 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001094 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001095
Tejun Heo24dc5f32007-01-20 16:00:28 +09001096 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001097 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001098 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001099 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001100
Mark Lordda2fa9b2008-01-26 18:32:45 -05001101 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1102 if (!pp->crqb)
1103 return -ENOMEM;
1104 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001105
Mark Lordda2fa9b2008-01-26 18:32:45 -05001106 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1107 if (!pp->crpb)
1108 goto out_port_free_dma_mem;
1109 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001110
Mark Lordeb73d552008-01-29 13:24:00 -05001111 /*
1112 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1113 * For later hardware, we need one unique sg_tbl per NCQ tag.
1114 */
1115 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1116 if (tag == 0 || !IS_GEN_I(hpriv)) {
1117 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1118 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1119 if (!pp->sg_tbl[tag])
1120 goto out_port_free_dma_mem;
1121 } else {
1122 pp->sg_tbl[tag] = pp->sg_tbl[0];
1123 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1124 }
1125 }
Brett Russ31961942005-09-30 01:36:00 -04001126
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001127 spin_lock_irqsave(&ap->host->lock, flags);
1128
Mark Lorde12bef52008-03-31 19:33:56 -04001129 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001130 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001131
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001132 spin_unlock_irqrestore(&ap->host->lock, flags);
1133
Brett Russ31961942005-09-30 01:36:00 -04001134 /* Don't turn on EDMA here...do it before DMA commands only. Else
1135 * we'll be unable to send non-data, PIO, etc due to restricted access
1136 * to shadow regs.
1137 */
Brett Russ31961942005-09-30 01:36:00 -04001138 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001139
1140out_port_free_dma_mem:
1141 mv_port_free_dma_mem(ap);
1142 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001143}
1144
Brett Russ05b308e2005-10-05 17:08:53 -04001145/**
1146 * mv_port_stop - Port specific cleanup/stop routine.
1147 * @ap: ATA channel to manipulate
1148 *
1149 * Stop DMA, cleanup port memory.
1150 *
1151 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001152 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001153 */
Brett Russ31961942005-09-30 01:36:00 -04001154static void mv_port_stop(struct ata_port *ap)
1155{
Mark Lorde12bef52008-03-31 19:33:56 -04001156 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001157 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001158}
1159
Brett Russ05b308e2005-10-05 17:08:53 -04001160/**
1161 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1162 * @qc: queued command whose SG list to source from
1163 *
1164 * Populate the SG list and mark the last entry.
1165 *
1166 * LOCKING:
1167 * Inherited from caller.
1168 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001169static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001170{
1171 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001172 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001173 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001174 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001175
Mark Lordeb73d552008-01-29 13:24:00 -05001176 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001177 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001178 dma_addr_t addr = sg_dma_address(sg);
1179 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001180
Olof Johansson4007b492007-10-02 20:45:27 -05001181 while (sg_len) {
1182 u32 offset = addr & 0xffff;
1183 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001184
Olof Johansson4007b492007-10-02 20:45:27 -05001185 if ((offset + sg_len > 0x10000))
1186 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001187
Olof Johansson4007b492007-10-02 20:45:27 -05001188 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1189 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001190 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001191
1192 sg_len -= len;
1193 addr += len;
1194
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001195 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001196 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001197 }
Brett Russ31961942005-09-30 01:36:00 -04001198 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001199
1200 if (likely(last_sg))
1201 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001202}
1203
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001204static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001205{
Mark Lord559eeda2006-05-19 16:40:15 -04001206 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001207 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001208 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001209}
1210
Brett Russ05b308e2005-10-05 17:08:53 -04001211/**
1212 * mv_qc_prep - Host specific command preparation.
1213 * @qc: queued command to prepare
1214 *
1215 * This routine simply redirects to the general purpose routine
1216 * if command is not DMA. Else, it handles prep of the CRQB
1217 * (command request block), does some sanity checking, and calls
1218 * the SG load routine.
1219 *
1220 * LOCKING:
1221 * Inherited from caller.
1222 */
Brett Russ31961942005-09-30 01:36:00 -04001223static void mv_qc_prep(struct ata_queued_cmd *qc)
1224{
1225 struct ata_port *ap = qc->ap;
1226 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001227 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001228 struct ata_taskfile *tf;
1229 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001230 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001231
Mark Lord138bfdd2008-01-26 18:33:18 -05001232 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1233 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001234 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001235
Brett Russ31961942005-09-30 01:36:00 -04001236 /* Fill in command request block
1237 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001238 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001239 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001240 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001241 flags |= qc->tag << CRQB_TAG_SHIFT;
1242
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001243 /* get current queue index from software */
1244 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001245
Mark Lorda6432432006-05-19 16:36:36 -04001246 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001247 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001248 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001249 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001250 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1251
1252 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001253 tf = &qc->tf;
1254
1255 /* Sadly, the CRQB cannot accomodate all registers--there are
1256 * only 11 bytes...so we must pick and choose required
1257 * registers based on the command. So, we drop feature and
1258 * hob_feature for [RW] DMA commands, but they are needed for
1259 * NCQ. NCQ will drop hob_nsect.
1260 */
1261 switch (tf->command) {
1262 case ATA_CMD_READ:
1263 case ATA_CMD_READ_EXT:
1264 case ATA_CMD_WRITE:
1265 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001266 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001267 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1268 break;
Brett Russ31961942005-09-30 01:36:00 -04001269 case ATA_CMD_FPDMA_READ:
1270 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001271 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001272 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1273 break;
Brett Russ31961942005-09-30 01:36:00 -04001274 default:
1275 /* The only other commands EDMA supports in non-queued and
1276 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1277 * of which are defined/used by Linux. If we get here, this
1278 * driver needs work.
1279 *
1280 * FIXME: modify libata to give qc_prep a return value and
1281 * return error here.
1282 */
1283 BUG_ON(tf->command);
1284 break;
1285 }
1286 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1287 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1288 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1290 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1292 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1294 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1295
Jeff Garzike4e7b892006-01-31 12:18:41 -05001296 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001297 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001298 mv_fill_sg(qc);
1299}
1300
1301/**
1302 * mv_qc_prep_iie - Host specific command preparation.
1303 * @qc: queued command to prepare
1304 *
1305 * This routine simply redirects to the general purpose routine
1306 * if command is not DMA. Else, it handles prep of the CRQB
1307 * (command request block), does some sanity checking, and calls
1308 * the SG load routine.
1309 *
1310 * LOCKING:
1311 * Inherited from caller.
1312 */
1313static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1314{
1315 struct ata_port *ap = qc->ap;
1316 struct mv_port_priv *pp = ap->private_data;
1317 struct mv_crqb_iie *crqb;
1318 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001319 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001320 u32 flags = 0;
1321
Mark Lord138bfdd2008-01-26 18:33:18 -05001322 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1323 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001324 return;
1325
Mark Lorde12bef52008-03-31 19:33:56 -04001326 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001327 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1328 flags |= CRQB_FLAG_READ;
1329
Tejun Heobeec7db2006-02-11 19:11:13 +09001330 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001331 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001332 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001333
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001334 /* get current queue index from software */
1335 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001336
1337 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001338 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1339 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001340 crqb->flags = cpu_to_le32(flags);
1341
1342 tf = &qc->tf;
1343 crqb->ata_cmd[0] = cpu_to_le32(
1344 (tf->command << 16) |
1345 (tf->feature << 24)
1346 );
1347 crqb->ata_cmd[1] = cpu_to_le32(
1348 (tf->lbal << 0) |
1349 (tf->lbam << 8) |
1350 (tf->lbah << 16) |
1351 (tf->device << 24)
1352 );
1353 crqb->ata_cmd[2] = cpu_to_le32(
1354 (tf->hob_lbal << 0) |
1355 (tf->hob_lbam << 8) |
1356 (tf->hob_lbah << 16) |
1357 (tf->hob_feature << 24)
1358 );
1359 crqb->ata_cmd[3] = cpu_to_le32(
1360 (tf->nsect << 0) |
1361 (tf->hob_nsect << 8)
1362 );
1363
1364 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1365 return;
Brett Russ31961942005-09-30 01:36:00 -04001366 mv_fill_sg(qc);
1367}
1368
Brett Russ05b308e2005-10-05 17:08:53 -04001369/**
1370 * mv_qc_issue - Initiate a command to the host
1371 * @qc: queued command to start
1372 *
1373 * This routine simply redirects to the general purpose routine
1374 * if command is not DMA. Else, it sanity checks our local
1375 * caches of the request producer/consumer indices then enables
1376 * DMA and bumps the request producer index.
1377 *
1378 * LOCKING:
1379 * Inherited from caller.
1380 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001381static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001382{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001383 struct ata_port *ap = qc->ap;
1384 void __iomem *port_mmio = mv_ap_base(ap);
1385 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001386 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001387
Mark Lord138bfdd2008-01-26 18:33:18 -05001388 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1389 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001390 /*
1391 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001392 * port. Turn off EDMA so there won't be problems accessing
1393 * shadow block, etc registers.
1394 */
Mark Lordb5624682008-03-31 19:34:40 -04001395 mv_stop_edma(ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001396 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001397 }
1398
Mark Lord72109162008-01-26 18:31:33 -05001399 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001400
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001402
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001403 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001404
1405 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001406 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1407 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001408
1409 return 0;
1410}
1411
Brett Russ05b308e2005-10-05 17:08:53 -04001412/**
Brett Russ05b308e2005-10-05 17:08:53 -04001413 * mv_err_intr - Handle error interrupts on the port
1414 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001415 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001416 *
1417 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001418 * some cases require an eDMA reset, which also performs a COMRESET.
1419 * The SERR case requires a clear of pending errors in the SATA
1420 * SERROR register. Finally, if the port disabled DMA,
1421 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001422 *
1423 * LOCKING:
1424 * Inherited from caller.
1425 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001426static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001427{
Brett Russ31961942005-09-30 01:36:00 -04001428 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001429 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1430 struct mv_port_priv *pp = ap->private_data;
1431 struct mv_host_priv *hpriv = ap->host->private_data;
1432 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1433 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001434 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001435
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001436 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001437
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001438 if (!edma_enabled) {
1439 /* just a guess: do we need to do this? should we
1440 * expand this, and do it in all cases?
1441 */
Tejun Heo936fd732007-08-06 18:36:23 +09001442 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1443 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001444 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001445
1446 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1447
1448 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1449
1450 /*
1451 * all generations share these EDMA error cause bits
1452 */
1453
1454 if (edma_err_cause & EDMA_ERR_DEV)
1455 err_mask |= AC_ERR_DEV;
1456 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001457 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458 EDMA_ERR_INTRL_PAR)) {
1459 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001460 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001461 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001462 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001463 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1464 ata_ehi_hotplugged(ehi);
1465 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001466 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001467 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 }
1469
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001470 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001471 eh_freeze_mask = EDMA_EH_FREEZE_5;
1472
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001474 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001476 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 }
1478 } else {
1479 eh_freeze_mask = EDMA_EH_FREEZE;
1480
1481 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001482 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001483 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001484 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001485 }
1486
1487 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001488 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1489 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001491 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001492 }
1493 }
Brett Russ20f733e2005-09-01 18:26:17 -04001494
1495 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001496 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001497
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001498 if (!err_mask) {
1499 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001500 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001501 }
1502
1503 ehi->serror |= serr;
1504 ehi->action |= action;
1505
1506 if (qc)
1507 qc->err_mask |= err_mask;
1508 else
1509 ehi->err_mask |= err_mask;
1510
1511 if (edma_err_cause & eh_freeze_mask)
1512 ata_port_freeze(ap);
1513 else
1514 ata_port_abort(ap);
1515}
1516
1517static void mv_intr_pio(struct ata_port *ap)
1518{
1519 struct ata_queued_cmd *qc;
1520 u8 ata_status;
1521
1522 /* ignore spurious intr if drive still BUSY */
1523 ata_status = readb(ap->ioaddr.status_addr);
1524 if (unlikely(ata_status & ATA_BUSY))
1525 return;
1526
1527 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001528 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 if (unlikely(!qc)) /* no active tag */
1530 return;
1531 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1532 return;
1533
1534 /* and finally, complete the ATA command */
1535 qc->err_mask |= ac_err_mask(ata_status);
1536 ata_qc_complete(qc);
1537}
1538
1539static void mv_intr_edma(struct ata_port *ap)
1540{
1541 void __iomem *port_mmio = mv_ap_base(ap);
1542 struct mv_host_priv *hpriv = ap->host->private_data;
1543 struct mv_port_priv *pp = ap->private_data;
1544 struct ata_queued_cmd *qc;
1545 u32 out_index, in_index;
1546 bool work_done = false;
1547
1548 /* get h/w response queue pointer */
1549 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1550 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1551
1552 while (1) {
1553 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001554 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001555
1556 /* get s/w response queue last-read pointer, and compare */
1557 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1558 if (in_index == out_index)
1559 break;
1560
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001562 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001563 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001564
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001565 /* Gen II/IIE: get active ATA command via tag, to enable
1566 * support for queueing. this works transparently for
1567 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001568 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001569 else
1570 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001571
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001572 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001573
Mark Lordcb924412008-01-26 18:32:09 -05001574 /* For non-NCQ mode, the lower 8 bits of status
1575 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1576 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001577 */
1578 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001579 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001580 mv_err_intr(ap, qc);
1581 return;
1582 }
1583
1584 /* and finally, complete the ATA command */
1585 if (qc) {
1586 qc->err_mask |=
1587 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1588 ata_qc_complete(qc);
1589 }
1590
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001591 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001592 * indicate (after the loop completes) to hardware
1593 * that we have consumed a response queue entry.
1594 */
1595 work_done = true;
1596 pp->resp_idx++;
1597 }
1598
1599 if (work_done)
1600 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1601 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1602 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001603}
1604
Brett Russ05b308e2005-10-05 17:08:53 -04001605/**
1606 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001607 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001608 * @relevant: port error bits relevant to this host controller
1609 * @hc: which host controller we're to look at
1610 *
1611 * Read then write clear the HC interrupt status then walk each
1612 * port connected to the HC and see if it needs servicing. Port
1613 * success ints are reported in the HC interrupt status reg, the
1614 * port error ints are reported in the higher level main
1615 * interrupt status register and thus are passed in via the
1616 * 'relevant' argument.
1617 *
1618 * LOCKING:
1619 * Inherited from caller.
1620 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001621static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001622{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001623 struct mv_host_priv *hpriv = host->private_data;
1624 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001625 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001626 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001627 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001628
Jeff Garzik35177262007-02-24 21:26:42 -05001629 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001630 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001631 else
Brett Russ20f733e2005-09-01 18:26:17 -04001632 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001633
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001634 if (HAS_PCI(host))
1635 last_port = port0 + MV_PORTS_PER_HC;
1636 else
1637 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001638 /* we'll need the HC success int register in most cases */
1639 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001640 if (!hc_irq_cause)
1641 return;
1642
1643 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001644
1645 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001646 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001647
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001648 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001649 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001650 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001651 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001652
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001653 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001654 continue;
1655
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001656 pp = ap->private_data;
1657
Brett Russ31961942005-09-30 01:36:00 -04001658 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001659 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001660 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001661
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001662 have_err_bits = ((PORT0_ERR << shift) & relevant);
1663
1664 if (unlikely(have_err_bits)) {
1665 struct ata_queued_cmd *qc;
1666
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001667 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001668 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1669 continue;
1670
1671 mv_err_intr(ap, qc);
1672 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001673 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001674
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001675 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1676
1677 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1678 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1679 mv_intr_edma(ap);
1680 } else {
1681 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1682 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001683 }
1684 }
1685 VPRINTK("EXIT\n");
1686}
1687
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001688static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1689{
Mark Lord02a121d2007-12-01 13:07:22 -05001690 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001691 struct ata_port *ap;
1692 struct ata_queued_cmd *qc;
1693 struct ata_eh_info *ehi;
1694 unsigned int i, err_mask, printed = 0;
1695 u32 err_cause;
1696
Mark Lord02a121d2007-12-01 13:07:22 -05001697 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001698
1699 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1700 err_cause);
1701
1702 DPRINTK("All regs @ PCI error\n");
1703 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1704
Mark Lord02a121d2007-12-01 13:07:22 -05001705 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001706
1707 for (i = 0; i < host->n_ports; i++) {
1708 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001709 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001710 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711 ata_ehi_clear_desc(ehi);
1712 if (!printed++)
1713 ata_ehi_push_desc(ehi,
1714 "PCI err cause 0x%08x", err_cause);
1715 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001716 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001717 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001718 if (qc)
1719 qc->err_mask |= err_mask;
1720 else
1721 ehi->err_mask |= err_mask;
1722
1723 ata_port_freeze(ap);
1724 }
1725 }
1726}
1727
Brett Russ05b308e2005-10-05 17:08:53 -04001728/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001729 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001730 * @irq: unused
1731 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001732 *
1733 * Read the read only register to determine if any host
1734 * controllers have pending interrupts. If so, call lower level
1735 * routine to handle. Also check for PCI errors which are only
1736 * reported here.
1737 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001738 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001739 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001740 * interrupts.
1741 */
David Howells7d12e782006-10-05 14:55:46 +01001742static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001743{
Jeff Garzikcca39742006-08-24 03:19:22 -04001744 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001745 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001746 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001747 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001748 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001749
Mark Lorde12bef52008-03-31 19:33:56 -04001750 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001751 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001752
1753 irq_stat = readl(hpriv->main_cause_reg_addr);
1754 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001755
1756 /* check the cases where we either have nothing pending or have read
1757 * a bogus register value which can indicate HW removal or PCI fault
1758 */
Mark Lord646a4da2008-01-26 18:30:37 -05001759 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1760 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001761
Jeff Garzikcca39742006-08-24 03:19:22 -04001762 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001763
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001764 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001765 mv_pci_error(host, mmio);
1766 handled = 1;
1767 goto out_unlock; /* skip all other HC irq handling */
1768 }
1769
Brett Russ20f733e2005-09-01 18:26:17 -04001770 for (hc = 0; hc < n_hcs; hc++) {
1771 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1772 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001773 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001774 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001775 }
1776 }
Mark Lord615ab952006-05-19 16:24:56 -04001777
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001778out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001779 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001780
1781 return IRQ_RETVAL(handled);
1782}
1783
Jeff Garzikc9d39132005-11-13 17:47:51 -05001784static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1785{
1786 unsigned int ofs;
1787
1788 switch (sc_reg_in) {
1789 case SCR_STATUS:
1790 case SCR_ERROR:
1791 case SCR_CONTROL:
1792 ofs = sc_reg_in * sizeof(u32);
1793 break;
1794 default:
1795 ofs = 0xffffffffU;
1796 break;
1797 }
1798 return ofs;
1799}
1800
Tejun Heoda3dbb12007-07-16 14:29:40 +09001801static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001802{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001803 struct mv_host_priv *hpriv = ap->host->private_data;
1804 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001805 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001806 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1807
Tejun Heoda3dbb12007-07-16 14:29:40 +09001808 if (ofs != 0xffffffffU) {
1809 *val = readl(addr + ofs);
1810 return 0;
1811 } else
1812 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001813}
1814
Tejun Heoda3dbb12007-07-16 14:29:40 +09001815static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001816{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001817 struct mv_host_priv *hpriv = ap->host->private_data;
1818 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001819 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001820 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1821
Tejun Heoda3dbb12007-07-16 14:29:40 +09001822 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001823 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001824 return 0;
1825 } else
1826 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001827}
1828
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001829static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001830{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001831 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001832 int early_5080;
1833
Auke Kok44c10132007-06-08 15:46:36 -07001834 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001835
1836 if (!early_5080) {
1837 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1838 tmp |= (1 << 0);
1839 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1840 }
1841
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001842 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001843}
1844
1845static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1846{
1847 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1848}
1849
Jeff Garzik47c2b672005-11-12 21:13:17 -05001850static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001851 void __iomem *mmio)
1852{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001853 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1854 u32 tmp;
1855
1856 tmp = readl(phy_mmio + MV5_PHY_MODE);
1857
1858 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1859 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001860}
1861
Jeff Garzik47c2b672005-11-12 21:13:17 -05001862static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001863{
Jeff Garzik522479f2005-11-12 22:14:02 -05001864 u32 tmp;
1865
1866 writel(0, mmio + MV_GPIO_PORT_CTL);
1867
1868 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1869
1870 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1871 tmp |= ~(1 << 0);
1872 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001873}
1874
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001875static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1876 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001877{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001878 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1879 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1880 u32 tmp;
1881 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1882
1883 if (fix_apm_sq) {
1884 tmp = readl(phy_mmio + MV5_LT_MODE);
1885 tmp |= (1 << 19);
1886 writel(tmp, phy_mmio + MV5_LT_MODE);
1887
1888 tmp = readl(phy_mmio + MV5_PHY_CTL);
1889 tmp &= ~0x3;
1890 tmp |= 0x1;
1891 writel(tmp, phy_mmio + MV5_PHY_CTL);
1892 }
1893
1894 tmp = readl(phy_mmio + MV5_PHY_MODE);
1895 tmp &= ~mask;
1896 tmp |= hpriv->signal[port].pre;
1897 tmp |= hpriv->signal[port].amps;
1898 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001899}
1900
Jeff Garzikc9d39132005-11-13 17:47:51 -05001901
1902#undef ZERO
1903#define ZERO(reg) writel(0, port_mmio + (reg))
1904static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1905 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001906{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001907 void __iomem *port_mmio = mv_port_base(mmio, port);
1908
Mark Lordb5624682008-03-31 19:34:40 -04001909 /*
1910 * The datasheet warns against setting ATA_RST when EDMA is active
1911 * (but doesn't say what the problem might be). So we first try
1912 * to disable the EDMA engine before doing the ATA_RST operation.
1913 */
Mark Lorde12bef52008-03-31 19:33:56 -04001914 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001915
1916 ZERO(0x028); /* command */
1917 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1918 ZERO(0x004); /* timer */
1919 ZERO(0x008); /* irq err cause */
1920 ZERO(0x00c); /* irq err mask */
1921 ZERO(0x010); /* rq bah */
1922 ZERO(0x014); /* rq inp */
1923 ZERO(0x018); /* rq outp */
1924 ZERO(0x01c); /* respq bah */
1925 ZERO(0x024); /* respq outp */
1926 ZERO(0x020); /* respq inp */
1927 ZERO(0x02c); /* test control */
1928 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1929}
1930#undef ZERO
1931
1932#define ZERO(reg) writel(0, hc_mmio + (reg))
1933static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1934 unsigned int hc)
1935{
1936 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1937 u32 tmp;
1938
1939 ZERO(0x00c);
1940 ZERO(0x010);
1941 ZERO(0x014);
1942 ZERO(0x018);
1943
1944 tmp = readl(hc_mmio + 0x20);
1945 tmp &= 0x1c1c1c1c;
1946 tmp |= 0x03030303;
1947 writel(tmp, hc_mmio + 0x20);
1948}
1949#undef ZERO
1950
1951static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1952 unsigned int n_hc)
1953{
1954 unsigned int hc, port;
1955
1956 for (hc = 0; hc < n_hc; hc++) {
1957 for (port = 0; port < MV_PORTS_PER_HC; port++)
1958 mv5_reset_hc_port(hpriv, mmio,
1959 (hc * MV_PORTS_PER_HC) + port);
1960
1961 mv5_reset_one_hc(hpriv, mmio, hc);
1962 }
1963
1964 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001965}
1966
Jeff Garzik101ffae2005-11-12 22:17:49 -05001967#undef ZERO
1968#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001969static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001970{
Mark Lord02a121d2007-12-01 13:07:22 -05001971 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001972 u32 tmp;
1973
1974 tmp = readl(mmio + MV_PCI_MODE);
1975 tmp &= 0xff00ffff;
1976 writel(tmp, mmio + MV_PCI_MODE);
1977
1978 ZERO(MV_PCI_DISC_TIMER);
1979 ZERO(MV_PCI_MSI_TRIGGER);
1980 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1981 ZERO(HC_MAIN_IRQ_MASK_OFS);
1982 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001983 ZERO(hpriv->irq_cause_ofs);
1984 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001985 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1986 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1987 ZERO(MV_PCI_ERR_ATTRIBUTE);
1988 ZERO(MV_PCI_ERR_COMMAND);
1989}
1990#undef ZERO
1991
1992static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1993{
1994 u32 tmp;
1995
1996 mv5_reset_flash(hpriv, mmio);
1997
1998 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1999 tmp &= 0x3;
2000 tmp |= (1 << 5) | (1 << 6);
2001 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2002}
2003
2004/**
2005 * mv6_reset_hc - Perform the 6xxx global soft reset
2006 * @mmio: base address of the HBA
2007 *
2008 * This routine only applies to 6xxx parts.
2009 *
2010 * LOCKING:
2011 * Inherited from caller.
2012 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002013static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2014 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002015{
2016 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2017 int i, rc = 0;
2018 u32 t;
2019
2020 /* Following procedure defined in PCI "main command and status
2021 * register" table.
2022 */
2023 t = readl(reg);
2024 writel(t | STOP_PCI_MASTER, reg);
2025
2026 for (i = 0; i < 1000; i++) {
2027 udelay(1);
2028 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002029 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002030 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002031 }
2032 if (!(PCI_MASTER_EMPTY & t)) {
2033 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2034 rc = 1;
2035 goto done;
2036 }
2037
2038 /* set reset */
2039 i = 5;
2040 do {
2041 writel(t | GLOB_SFT_RST, reg);
2042 t = readl(reg);
2043 udelay(1);
2044 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2045
2046 if (!(GLOB_SFT_RST & t)) {
2047 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2048 rc = 1;
2049 goto done;
2050 }
2051
2052 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2053 i = 5;
2054 do {
2055 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2056 t = readl(reg);
2057 udelay(1);
2058 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2059
2060 if (GLOB_SFT_RST & t) {
2061 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2062 rc = 1;
2063 }
2064done:
2065 return rc;
2066}
2067
Jeff Garzik47c2b672005-11-12 21:13:17 -05002068static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002069 void __iomem *mmio)
2070{
2071 void __iomem *port_mmio;
2072 u32 tmp;
2073
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002074 tmp = readl(mmio + MV_RESET_CFG);
2075 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002076 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002077 hpriv->signal[idx].pre = 0x1 << 5;
2078 return;
2079 }
2080
2081 port_mmio = mv_port_base(mmio, idx);
2082 tmp = readl(port_mmio + PHY_MODE2);
2083
2084 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2085 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2086}
2087
Jeff Garzik47c2b672005-11-12 21:13:17 -05002088static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002089{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002090 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002091}
2092
Jeff Garzikc9d39132005-11-13 17:47:51 -05002093static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002094 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002095{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002096 void __iomem *port_mmio = mv_port_base(mmio, port);
2097
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002098 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002099 int fix_phy_mode2 =
2100 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002101 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002102 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2103 u32 m2, tmp;
2104
2105 if (fix_phy_mode2) {
2106 m2 = readl(port_mmio + PHY_MODE2);
2107 m2 &= ~(1 << 16);
2108 m2 |= (1 << 31);
2109 writel(m2, port_mmio + PHY_MODE2);
2110
2111 udelay(200);
2112
2113 m2 = readl(port_mmio + PHY_MODE2);
2114 m2 &= ~((1 << 16) | (1 << 31));
2115 writel(m2, port_mmio + PHY_MODE2);
2116
2117 udelay(200);
2118 }
2119
2120 /* who knows what this magic does */
2121 tmp = readl(port_mmio + PHY_MODE3);
2122 tmp &= ~0x7F800000;
2123 tmp |= 0x2A800000;
2124 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002125
2126 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002128
2129 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002130
2131 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002132 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002133
Mark Lorde12bef52008-03-31 19:33:56 -04002134 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002135 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2136
2137 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002138
2139 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002140 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002141 }
2142
2143 /* Revert values of pre-emphasis and signal amps to the saved ones */
2144 m2 = readl(port_mmio + PHY_MODE2);
2145
2146 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002147 m2 |= hpriv->signal[port].amps;
2148 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002149 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002150
Jeff Garzike4e7b892006-01-31 12:18:41 -05002151 /* according to mvSata 3.6.1, some IIE values are fixed */
2152 if (IS_GEN_IIE(hpriv)) {
2153 m2 &= ~0xC30FF01F;
2154 m2 |= 0x0000900F;
2155 }
2156
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002157 writel(m2, port_mmio + PHY_MODE2);
2158}
2159
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002160/* TODO: use the generic LED interface to configure the SATA Presence */
2161/* & Acitivy LEDs on the board */
2162static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2163 void __iomem *mmio)
2164{
2165 return;
2166}
2167
2168static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2169 void __iomem *mmio)
2170{
2171 void __iomem *port_mmio;
2172 u32 tmp;
2173
2174 port_mmio = mv_port_base(mmio, idx);
2175 tmp = readl(port_mmio + PHY_MODE2);
2176
2177 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2178 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2179}
2180
2181#undef ZERO
2182#define ZERO(reg) writel(0, port_mmio + (reg))
2183static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2184 void __iomem *mmio, unsigned int port)
2185{
2186 void __iomem *port_mmio = mv_port_base(mmio, port);
2187
Mark Lordb5624682008-03-31 19:34:40 -04002188 /*
2189 * The datasheet warns against setting ATA_RST when EDMA is active
2190 * (but doesn't say what the problem might be). So we first try
2191 * to disable the EDMA engine before doing the ATA_RST operation.
2192 */
Mark Lorde12bef52008-03-31 19:33:56 -04002193 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002194
2195 ZERO(0x028); /* command */
2196 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2197 ZERO(0x004); /* timer */
2198 ZERO(0x008); /* irq err cause */
2199 ZERO(0x00c); /* irq err mask */
2200 ZERO(0x010); /* rq bah */
2201 ZERO(0x014); /* rq inp */
2202 ZERO(0x018); /* rq outp */
2203 ZERO(0x01c); /* respq bah */
2204 ZERO(0x024); /* respq outp */
2205 ZERO(0x020); /* respq inp */
2206 ZERO(0x02c); /* test control */
2207 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2208}
2209
2210#undef ZERO
2211
2212#define ZERO(reg) writel(0, hc_mmio + (reg))
2213static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2214 void __iomem *mmio)
2215{
2216 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2217
2218 ZERO(0x00c);
2219 ZERO(0x010);
2220 ZERO(0x014);
2221
2222}
2223
2224#undef ZERO
2225
2226static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2227 void __iomem *mmio, unsigned int n_hc)
2228{
2229 unsigned int port;
2230
2231 for (port = 0; port < hpriv->n_ports; port++)
2232 mv_soc_reset_hc_port(hpriv, mmio, port);
2233
2234 mv_soc_reset_one_hc(hpriv, mmio);
2235
2236 return 0;
2237}
2238
2239static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2240 void __iomem *mmio)
2241{
2242 return;
2243}
2244
2245static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2246{
2247 return;
2248}
2249
Mark Lordb67a1062008-03-31 19:35:13 -04002250static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2251{
2252 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2253
2254 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2255 if (want_gen2i)
2256 ifctl |= (1 << 7); /* enable gen2i speed */
2257 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2258}
2259
Mark Lordb5624682008-03-31 19:34:40 -04002260/*
2261 * Caller must ensure that EDMA is not active,
2262 * by first doing mv_stop_edma() where needed.
2263 */
Mark Lorde12bef52008-03-31 19:33:56 -04002264static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002265 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002266{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002267 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002268
Mark Lord0d8be5c2008-04-16 14:56:12 -04002269 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002270 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002271
Mark Lordb67a1062008-03-31 19:35:13 -04002272 if (!IS_GEN_I(hpriv)) {
2273 /* Enable 3.0gb/s link speed */
2274 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002275 }
Mark Lordb67a1062008-03-31 19:35:13 -04002276 /*
2277 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2278 * link, and physical layers. It resets all SATA interface registers
2279 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002280 */
Mark Lordb67a1062008-03-31 19:35:13 -04002281 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2282 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002283 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002284
Jeff Garzikc9d39132005-11-13 17:47:51 -05002285 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2286
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002287 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002288 mdelay(1);
2289}
2290
Tejun Heocc0680a2007-08-06 18:36:23 +09002291static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002292 unsigned long deadline)
2293{
Tejun Heocc0680a2007-08-06 18:36:23 +09002294 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002295 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002296 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002297 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002298 int rc, attempts = 0, extra = 0;
2299 u32 sstatus;
2300 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002301
Mark Lorde12bef52008-03-31 19:33:56 -04002302 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002303 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002304
Mark Lord0d8be5c2008-04-16 14:56:12 -04002305 /* Workaround for errata FEr SATA#10 (part 2) */
2306 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002307 const unsigned long *timing =
2308 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002309
Mark Lord17c5aab2008-04-16 14:56:51 -04002310 rc = sata_link_hardreset(link, timing, deadline + extra,
2311 &online, NULL);
2312 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002313 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002314 sata_scr_read(link, SCR_STATUS, &sstatus);
2315 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2316 /* Force 1.5gb/s link speed and try again */
2317 mv_setup_ifctl(mv_ap_base(ap), 0);
2318 if (time_after(jiffies + HZ, deadline))
2319 extra = HZ; /* only extend it once, max */
2320 }
2321 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002322
Mark Lord17c5aab2008-04-16 14:56:51 -04002323 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002324}
2325
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002326static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002327{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002328 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002329 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2330 u32 tmp, mask;
2331 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002332
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002333 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002334
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002335 shift = ap->port_no * 2;
2336 if (hc > 0)
2337 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002338
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002339 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002340
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002341 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002342 tmp = readl(hpriv->main_mask_reg_addr);
2343 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002344}
2345
2346static void mv_eh_thaw(struct ata_port *ap)
2347{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002348 struct mv_host_priv *hpriv = ap->host->private_data;
2349 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2351 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2352 void __iomem *port_mmio = mv_ap_base(ap);
2353 u32 tmp, mask, hc_irq_cause;
2354 unsigned int shift, hc_port_no = ap->port_no;
2355
2356 /* FIXME: handle coalescing completion events properly */
2357
2358 shift = ap->port_no * 2;
2359 if (hc > 0) {
2360 shift++;
2361 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002362 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002363
2364 mask = 0x3 << shift;
2365
2366 /* clear EDMA errors on this port */
2367 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2368
2369 /* clear pending irq events */
2370 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2371 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2372 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2373 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2374
2375 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002376 tmp = readl(hpriv->main_mask_reg_addr);
2377 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002378}
2379
Brett Russ05b308e2005-10-05 17:08:53 -04002380/**
2381 * mv_port_init - Perform some early initialization on a single port.
2382 * @port: libata data structure storing shadow register addresses
2383 * @port_mmio: base address of the port
2384 *
2385 * Initialize shadow register mmio addresses, clear outstanding
2386 * interrupts on the port, and unmask interrupts for the future
2387 * start of the port.
2388 *
2389 * LOCKING:
2390 * Inherited from caller.
2391 */
Brett Russ31961942005-09-30 01:36:00 -04002392static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2393{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002394 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002395 unsigned serr_ofs;
2396
Jeff Garzik8b260242005-11-12 12:32:50 -05002397 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002398 */
2399 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002400 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002401 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2402 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2403 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2404 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2405 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2406 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002407 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002408 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2409 /* special case: control/altstatus doesn't have ATA_REG_ address */
2410 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2411
2412 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002413 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002414
Brett Russ31961942005-09-30 01:36:00 -04002415 /* Clear any currently outstanding port interrupt conditions */
2416 serr_ofs = mv_scr_offset(SCR_ERROR);
2417 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2418 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2419
Mark Lord646a4da2008-01-26 18:30:37 -05002420 /* unmask all non-transient EDMA error interrupts */
2421 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002422
Jeff Garzik8b260242005-11-12 12:32:50 -05002423 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002424 readl(port_mmio + EDMA_CFG_OFS),
2425 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2426 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002427}
2428
Tejun Heo4447d352007-04-17 23:44:08 +09002429static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002430{
Tejun Heo4447d352007-04-17 23:44:08 +09002431 struct pci_dev *pdev = to_pci_dev(host->dev);
2432 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002433 u32 hp_flags = hpriv->hp_flags;
2434
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002435 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002436 case chip_5080:
2437 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002438 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002439
Auke Kok44c10132007-06-08 15:46:36 -07002440 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002441 case 0x1:
2442 hp_flags |= MV_HP_ERRATA_50XXB0;
2443 break;
2444 case 0x3:
2445 hp_flags |= MV_HP_ERRATA_50XXB2;
2446 break;
2447 default:
2448 dev_printk(KERN_WARNING, &pdev->dev,
2449 "Applying 50XXB2 workarounds to unknown rev\n");
2450 hp_flags |= MV_HP_ERRATA_50XXB2;
2451 break;
2452 }
2453 break;
2454
2455 case chip_504x:
2456 case chip_508x:
2457 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002458 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002459
Auke Kok44c10132007-06-08 15:46:36 -07002460 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002461 case 0x0:
2462 hp_flags |= MV_HP_ERRATA_50XXB0;
2463 break;
2464 case 0x3:
2465 hp_flags |= MV_HP_ERRATA_50XXB2;
2466 break;
2467 default:
2468 dev_printk(KERN_WARNING, &pdev->dev,
2469 "Applying B2 workarounds to unknown rev\n");
2470 hp_flags |= MV_HP_ERRATA_50XXB2;
2471 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002472 }
2473 break;
2474
2475 case chip_604x:
2476 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002477 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002478 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002479
Auke Kok44c10132007-06-08 15:46:36 -07002480 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002481 case 0x7:
2482 hp_flags |= MV_HP_ERRATA_60X1B2;
2483 break;
2484 case 0x9:
2485 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002486 break;
2487 default:
2488 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002489 "Applying B2 workarounds to unknown rev\n");
2490 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002491 break;
2492 }
2493 break;
2494
Jeff Garzike4e7b892006-01-31 12:18:41 -05002495 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002496 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002497 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2498 (pdev->device == 0x2300 || pdev->device == 0x2310))
2499 {
Mark Lord4e520032007-12-11 12:58:05 -05002500 /*
2501 * Highpoint RocketRAID PCIe 23xx series cards:
2502 *
2503 * Unconfigured drives are treated as "Legacy"
2504 * by the BIOS, and it overwrites sector 8 with
2505 * a "Lgcy" metadata block prior to Linux boot.
2506 *
2507 * Configured drives (RAID or JBOD) leave sector 8
2508 * alone, but instead overwrite a high numbered
2509 * sector for the RAID metadata. This sector can
2510 * be determined exactly, by truncating the physical
2511 * drive capacity to a nice even GB value.
2512 *
2513 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2514 *
2515 * Warn the user, lest they think we're just buggy.
2516 */
2517 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2518 " BIOS CORRUPTS DATA on all attached drives,"
2519 " regardless of if/how they are configured."
2520 " BEWARE!\n");
2521 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2522 " use sectors 8-9 on \"Legacy\" drives,"
2523 " and avoid the final two gigabytes on"
2524 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002525 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002526 case chip_6042:
2527 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002528 hp_flags |= MV_HP_GEN_IIE;
2529
Auke Kok44c10132007-06-08 15:46:36 -07002530 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002531 case 0x0:
2532 hp_flags |= MV_HP_ERRATA_XX42A0;
2533 break;
2534 case 0x1:
2535 hp_flags |= MV_HP_ERRATA_60X1C0;
2536 break;
2537 default:
2538 dev_printk(KERN_WARNING, &pdev->dev,
2539 "Applying 60X1C0 workarounds to unknown rev\n");
2540 hp_flags |= MV_HP_ERRATA_60X1C0;
2541 break;
2542 }
2543 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002544 case chip_soc:
2545 hpriv->ops = &mv_soc_ops;
2546 hp_flags |= MV_HP_ERRATA_60X1C0;
2547 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002548
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002549 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002550 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002551 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002552 return 1;
2553 }
2554
2555 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002556 if (hp_flags & MV_HP_PCIE) {
2557 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2558 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2559 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2560 } else {
2561 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2562 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2563 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2564 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002565
2566 return 0;
2567}
2568
Brett Russ05b308e2005-10-05 17:08:53 -04002569/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002570 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002571 * @host: ATA host to initialize
2572 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002573 *
2574 * If possible, do an early global reset of the host. Then do
2575 * our port init and clear/unmask all/relevant host interrupts.
2576 *
2577 * LOCKING:
2578 * Inherited from caller.
2579 */
Tejun Heo4447d352007-04-17 23:44:08 +09002580static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002581{
2582 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002583 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002584 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002585
Tejun Heo4447d352007-04-17 23:44:08 +09002586 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002587 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002588 goto done;
2589
2590 if (HAS_PCI(host)) {
2591 hpriv->main_cause_reg_addr = hpriv->base +
2592 HC_MAIN_IRQ_CAUSE_OFS;
2593 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2594 } else {
2595 hpriv->main_cause_reg_addr = hpriv->base +
2596 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2597 hpriv->main_mask_reg_addr = hpriv->base +
2598 HC_SOC_MAIN_IRQ_MASK_OFS;
2599 }
2600 /* global interrupt mask */
2601 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002602
Tejun Heo4447d352007-04-17 23:44:08 +09002603 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002604
Tejun Heo4447d352007-04-17 23:44:08 +09002605 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002606 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002607
Jeff Garzikc9d39132005-11-13 17:47:51 -05002608 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002609 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002610 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002611
Jeff Garzik522479f2005-11-12 22:14:02 -05002612 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002613 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002614 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002615
Tejun Heo4447d352007-04-17 23:44:08 +09002616 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002617 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002618 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002619
2620 mv_port_init(&ap->ioaddr, port_mmio);
2621
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002622#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002623 if (HAS_PCI(host)) {
2624 unsigned int offset = port_mmio - mmio;
2625 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2626 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2627 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002628#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002629 }
2630
2631 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002632 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2633
2634 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2635 "(before clear)=0x%08x\n", hc,
2636 readl(hc_mmio + HC_CFG_OFS),
2637 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2638
2639 /* Clear any currently outstanding hc interrupt conditions */
2640 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002641 }
2642
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002643 if (HAS_PCI(host)) {
2644 /* Clear any currently outstanding host interrupt conditions */
2645 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002646
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002647 /* and unmask interrupt generation for host regs */
2648 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2649 if (IS_GEN_I(hpriv))
2650 writelfl(~HC_MAIN_MASKED_IRQS_5,
2651 hpriv->main_mask_reg_addr);
2652 else
2653 writelfl(~HC_MAIN_MASKED_IRQS,
2654 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002655
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002656 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2657 "PCI int cause/mask=0x%08x/0x%08x\n",
2658 readl(hpriv->main_cause_reg_addr),
2659 readl(hpriv->main_mask_reg_addr),
2660 readl(mmio + hpriv->irq_cause_ofs),
2661 readl(mmio + hpriv->irq_mask_ofs));
2662 } else {
2663 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2664 hpriv->main_mask_reg_addr);
2665 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2666 readl(hpriv->main_cause_reg_addr),
2667 readl(hpriv->main_mask_reg_addr));
2668 }
Brett Russ31961942005-09-30 01:36:00 -04002669done:
Brett Russ20f733e2005-09-01 18:26:17 -04002670 return rc;
2671}
2672
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002673static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2674{
2675 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2676 MV_CRQB_Q_SZ, 0);
2677 if (!hpriv->crqb_pool)
2678 return -ENOMEM;
2679
2680 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2681 MV_CRPB_Q_SZ, 0);
2682 if (!hpriv->crpb_pool)
2683 return -ENOMEM;
2684
2685 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2686 MV_SG_TBL_SZ, 0);
2687 if (!hpriv->sg_tbl_pool)
2688 return -ENOMEM;
2689
2690 return 0;
2691}
2692
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002693/**
2694 * mv_platform_probe - handle a positive probe of an soc Marvell
2695 * host
2696 * @pdev: platform device found
2697 *
2698 * LOCKING:
2699 * Inherited from caller.
2700 */
2701static int mv_platform_probe(struct platform_device *pdev)
2702{
2703 static int printed_version;
2704 const struct mv_sata_platform_data *mv_platform_data;
2705 const struct ata_port_info *ppi[] =
2706 { &mv_port_info[chip_soc], NULL };
2707 struct ata_host *host;
2708 struct mv_host_priv *hpriv;
2709 struct resource *res;
2710 int n_ports, rc;
2711
2712 if (!printed_version++)
2713 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2714
2715 /*
2716 * Simple resource validation ..
2717 */
2718 if (unlikely(pdev->num_resources != 2)) {
2719 dev_err(&pdev->dev, "invalid number of resources\n");
2720 return -EINVAL;
2721 }
2722
2723 /*
2724 * Get the register base first
2725 */
2726 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2727 if (res == NULL)
2728 return -EINVAL;
2729
2730 /* allocate host */
2731 mv_platform_data = pdev->dev.platform_data;
2732 n_ports = mv_platform_data->n_ports;
2733
2734 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2735 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2736
2737 if (!host || !hpriv)
2738 return -ENOMEM;
2739 host->private_data = hpriv;
2740 hpriv->n_ports = n_ports;
2741
2742 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002743 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2744 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002745 hpriv->base -= MV_SATAHC0_REG_BASE;
2746
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002747 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2748 if (rc)
2749 return rc;
2750
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002751 /* initialize adapter */
2752 rc = mv_init_host(host, chip_soc);
2753 if (rc)
2754 return rc;
2755
2756 dev_printk(KERN_INFO, &pdev->dev,
2757 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2758 host->n_ports);
2759
2760 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2761 IRQF_SHARED, &mv6_sht);
2762}
2763
2764/*
2765 *
2766 * mv_platform_remove - unplug a platform interface
2767 * @pdev: platform device
2768 *
2769 * A platform bus SATA device has been unplugged. Perform the needed
2770 * cleanup. Also called on module unload for any active devices.
2771 */
2772static int __devexit mv_platform_remove(struct platform_device *pdev)
2773{
2774 struct device *dev = &pdev->dev;
2775 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002776
2777 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002778 return 0;
2779}
2780
2781static struct platform_driver mv_platform_driver = {
2782 .probe = mv_platform_probe,
2783 .remove = __devexit_p(mv_platform_remove),
2784 .driver = {
2785 .name = DRV_NAME,
2786 .owner = THIS_MODULE,
2787 },
2788};
2789
2790
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002791#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002792static int mv_pci_init_one(struct pci_dev *pdev,
2793 const struct pci_device_id *ent);
2794
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002795
2796static struct pci_driver mv_pci_driver = {
2797 .name = DRV_NAME,
2798 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002799 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002800 .remove = ata_pci_remove_one,
2801};
2802
2803/*
2804 * module options
2805 */
2806static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2807
2808
2809/* move to PCI layer or libata core? */
2810static int pci_go_64(struct pci_dev *pdev)
2811{
2812 int rc;
2813
2814 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2815 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2816 if (rc) {
2817 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2818 if (rc) {
2819 dev_printk(KERN_ERR, &pdev->dev,
2820 "64-bit DMA enable failed\n");
2821 return rc;
2822 }
2823 }
2824 } else {
2825 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2826 if (rc) {
2827 dev_printk(KERN_ERR, &pdev->dev,
2828 "32-bit DMA enable failed\n");
2829 return rc;
2830 }
2831 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2832 if (rc) {
2833 dev_printk(KERN_ERR, &pdev->dev,
2834 "32-bit consistent DMA enable failed\n");
2835 return rc;
2836 }
2837 }
2838
2839 return rc;
2840}
2841
Brett Russ05b308e2005-10-05 17:08:53 -04002842/**
2843 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002844 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002845 *
2846 * FIXME: complete this.
2847 *
2848 * LOCKING:
2849 * Inherited from caller.
2850 */
Tejun Heo4447d352007-04-17 23:44:08 +09002851static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002852{
Tejun Heo4447d352007-04-17 23:44:08 +09002853 struct pci_dev *pdev = to_pci_dev(host->dev);
2854 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002855 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002856 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002857
2858 /* Use this to determine the HW stepping of the chip so we know
2859 * what errata to workaround
2860 */
Brett Russ31961942005-09-30 01:36:00 -04002861 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2862 if (scc == 0)
2863 scc_s = "SCSI";
2864 else if (scc == 0x01)
2865 scc_s = "RAID";
2866 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002867 scc_s = "?";
2868
2869 if (IS_GEN_I(hpriv))
2870 gen = "I";
2871 else if (IS_GEN_II(hpriv))
2872 gen = "II";
2873 else if (IS_GEN_IIE(hpriv))
2874 gen = "IIE";
2875 else
2876 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002877
Jeff Garzika9524a72005-10-30 14:39:11 -05002878 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002879 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2880 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002881 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2882}
2883
Brett Russ05b308e2005-10-05 17:08:53 -04002884/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002885 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002886 * @pdev: PCI device found
2887 * @ent: PCI device ID entry for the matched host
2888 *
2889 * LOCKING:
2890 * Inherited from caller.
2891 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002892static int mv_pci_init_one(struct pci_dev *pdev,
2893 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04002894{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002895 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002896 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002897 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2898 struct ata_host *host;
2899 struct mv_host_priv *hpriv;
2900 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002901
Jeff Garzika9524a72005-10-30 14:39:11 -05002902 if (!printed_version++)
2903 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002904
Tejun Heo4447d352007-04-17 23:44:08 +09002905 /* allocate host */
2906 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2907
2908 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2909 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2910 if (!host || !hpriv)
2911 return -ENOMEM;
2912 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002913 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09002914
2915 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002916 rc = pcim_enable_device(pdev);
2917 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002918 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002919
Tejun Heo0d5ff562007-02-01 15:06:36 +09002920 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2921 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002922 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002923 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002924 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002925 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002926 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04002927
Jeff Garzikd88184f2007-02-26 01:26:06 -05002928 rc = pci_go_64(pdev);
2929 if (rc)
2930 return rc;
2931
Mark Lordda2fa9b2008-01-26 18:32:45 -05002932 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2933 if (rc)
2934 return rc;
2935
Brett Russ20f733e2005-09-01 18:26:17 -04002936 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002937 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002938 if (rc)
2939 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002940
Brett Russ31961942005-09-30 01:36:00 -04002941 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002942 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002943 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002944
Brett Russ31961942005-09-30 01:36:00 -04002945 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002946 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002947
Tejun Heo4447d352007-04-17 23:44:08 +09002948 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002949 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002950 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002951 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002952}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002953#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002954
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002955static int mv_platform_probe(struct platform_device *pdev);
2956static int __devexit mv_platform_remove(struct platform_device *pdev);
2957
Brett Russ20f733e2005-09-01 18:26:17 -04002958static int __init mv_init(void)
2959{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002960 int rc = -ENODEV;
2961#ifdef CONFIG_PCI
2962 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002963 if (rc < 0)
2964 return rc;
2965#endif
2966 rc = platform_driver_register(&mv_platform_driver);
2967
2968#ifdef CONFIG_PCI
2969 if (rc < 0)
2970 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002971#endif
2972 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002973}
2974
2975static void __exit mv_exit(void)
2976{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002977#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04002978 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002979#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002980 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002981}
2982
2983MODULE_AUTHOR("Brett Russ");
2984MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2985MODULE_LICENSE("GPL");
2986MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2987MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04002988MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04002989
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002990#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002991module_param(msi, int, 0444);
2992MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002993#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002994
Brett Russ20f733e2005-09-01 18:26:17 -04002995module_init(mv_init);
2996module_exit(mv_exit);