blob: b822b8a40c65cd6252543ed198e2c420eac9ff59 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
46
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050080#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040081#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040082#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083
84#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050085#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040086
87enum {
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95
96 MV_PCI_REG_BASE = 0,
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040098 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
103
Brett Russ20f733e2005-09-01 18:26:17 -0400104 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500105 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400108
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113
Brett Russ31961942005-09-30 01:36:00 -0400114 MV_MAX_Q_DEPTH = 32,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500123 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400125
Brett Russ20f733e2005-09-01 18:26:17 -0400126 MV_PORTS_PER_HC = 4,
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400130 MV_PORT_MASK = 3,
131
132 /* Host Flags */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400136 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100137
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400142
Brett Russ31961942005-09-30 01:36:00 -0400143 CRQB_FLAG_READ = (1 << 0),
144 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
151
152 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400155
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
157
Brett Russ20f733e2005-09-01 18:26:17 -0400158 /* PCI interface registers */
159
Brett Russ31961942005-09-30 01:36:00 -0400160 PCI_COMMAND_OFS = 0xc00,
161
Brett Russ20f733e2005-09-01 18:26:17 -0400162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
166
Jeff Garzik522479f2005-11-12 22:14:02 -0500167 MV_PCI_MODE = 0xd00,
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
177
Mark Lord02a121d2007-12-01 13:07:22 -0500178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
181
Mark Lord02a121d2007-12-01 13:07:22 -0500182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500185
Brett Russ20f733e2005-09-01 18:26:17 -0400186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
194 PCI_ERR = (1 << 18),
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
208 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
210 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400212
213 /* SATAHC registers */
214 HC_CFG_OFS = 0,
215
216 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
220
221 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400222 SHD_BLK_OFS = 0x100,
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400224
225 /* SATA registers */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400229
Mark Lorde12bef52008-03-31 19:33:56 -0400230 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
232
Jeff Garzik47c2b672005-11-12 21:13:17 -0500233 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500234 PHY_MODE4 = 0x314,
235 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400239
Mark Lorde12bef52008-03-31 19:33:56 -0400240 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
242
Jeff Garzikc9d39132005-11-13 17:47:51 -0500243 MV5_PHY_MODE = 0x74,
244 MV5_LT_MODE = 0x30,
245 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400246 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500247
248 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400249
250 /* Port registers */
251 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400259
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
282
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
291
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500297
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
306 EDMA_ERR_PRD_PAR |
307 EDMA_ERR_DEV_DCON |
308 EDMA_ERR_DEV_CON |
309 EDMA_ERR_SERR |
310 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400311 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400312 EDMA_ERR_CRPB_PAR |
313 EDMA_ERR_INTRL_PAR |
314 EDMA_ERR_IORDY |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400319
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
321 EDMA_ERR_PRD_PAR |
322 EDMA_ERR_DEV_DCON |
323 EDMA_ERR_DEV_CON |
324 EDMA_ERR_OVERRUN_5 |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400327 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400328 EDMA_ERR_CRPB_PAR |
329 EDMA_ERR_INTRL_PAR |
330 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400334
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
337
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400341 EDMA_RSP_Q_PTR_SHIFT = 3,
342
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400347
Jeff Garzikc9d39132005-11-13 17:47:51 -0500348 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500349 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500350
Brett Russ31961942005-09-30 01:36:00 -0400351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500357 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400362
Brett Russ31961942005-09-30 01:36:00 -0400363 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400366};
367
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400368#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500370#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100371#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500372
Jeff Garzik095fec82005-11-12 09:50:49 -0500373enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
376 */
377 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500378
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
381 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
383
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400384 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
386};
387
Jeff Garzik522479f2005-11-12 22:14:02 -0500388enum chip_type {
389 chip_504x,
390 chip_508x,
391 chip_5080,
392 chip_604x,
393 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500394 chip_6042,
395 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500396 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500397};
398
Brett Russ31961942005-09-30 01:36:00 -0400399/* Command ReQuest Block: 32B */
400struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400401 __le32 sg_addr;
402 __le32 sg_addr_hi;
403 __le16 ctrl_flags;
404 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400405};
406
Jeff Garzike4e7b892006-01-31 12:18:41 -0500407struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 addr;
409 __le32 addr_hi;
410 __le32 flags;
411 __le32 len;
412 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500413};
414
Brett Russ31961942005-09-30 01:36:00 -0400415/* Command ResPonse Block: 8B */
416struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400417 __le16 id;
418 __le16 flags;
419 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400420};
421
422/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
423struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le32 addr;
425 __le32 flags_size;
426 __le32 addr_hi;
427 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400428};
429
430struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400431 struct mv_crqb *crqb;
432 dma_addr_t crqb_dma;
433 struct mv_crpb *crpb;
434 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400437
438 unsigned int req_idx;
439 unsigned int resp_idx;
440
Brett Russ31961942005-09-30 01:36:00 -0400441 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400442};
443
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500444struct mv_port_signal {
445 u32 amps;
446 u32 pre;
447};
448
Mark Lord02a121d2007-12-01 13:07:22 -0500449struct mv_host_priv {
450 u32 hp_flags;
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500453 int n_ports;
454 void __iomem *base;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500457 u32 irq_cause_ofs;
458 u32 irq_mask_ofs;
459 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500460 /*
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
464 */
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500468};
469
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480};
481
Tejun Heoda3dbb12007-07-16 14:29:40 +0900482static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400486static int mv_port_start(struct ata_port *ap);
487static void mv_port_stop(struct ata_port *ap);
488static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500489static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900490static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900491static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400493static void mv_eh_freeze(struct ata_port *ap);
494static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500495static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400496
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500497static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
498 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500499static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
501 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500502static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
503 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500504static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500507static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500509static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
511 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500512static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
513 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500514static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500515static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
516 void __iomem *mmio);
517static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
519static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
522 void __iomem *mmio);
523static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100524static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400525static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400527static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400528static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400529static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500530
Mark Lorde49856d2008-04-16 14:59:07 -0400531static void mv_pmp_select(struct ata_port *ap, int pmp);
532static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
533 unsigned long deadline);
534static int mv_softreset(struct ata_link *link, unsigned int *class,
535 unsigned long deadline);
536
Mark Lordeb73d552008-01-29 13:24:00 -0500537/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
538 * because we have to allow room for worst case splitting of
539 * PRDs for 64K boundaries in mv_fill_sg().
540 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400541static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900542 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400543 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400544 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400545};
546
547static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900548 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500549 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400550 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400551 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400552};
553
Tejun Heo029cfd62008-03-25 12:22:49 +0900554static struct ata_port_operations mv5_ops = {
555 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500556
Jeff Garzikc9d39132005-11-13 17:47:51 -0500557 .qc_prep = mv_qc_prep,
558 .qc_issue = mv_qc_issue,
559
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400560 .freeze = mv_eh_freeze,
561 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900562 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900563 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900564 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400565
Jeff Garzikc9d39132005-11-13 17:47:51 -0500566 .scr_read = mv5_scr_read,
567 .scr_write = mv5_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500571};
572
Tejun Heo029cfd62008-03-25 12:22:49 +0900573static struct ata_port_operations mv6_ops = {
574 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400575 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Tejun Heo029cfd62008-03-25 12:22:49 +0900576 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400577 .scr_read = mv_scr_read,
578 .scr_write = mv_scr_write,
Mark Lorde49856d2008-04-16 14:59:07 -0400579
580 .pmp_hardreset = mv_pmp_hardreset,
581 .pmp_softreset = mv_softreset,
582 .softreset = mv_softreset,
583 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e2005-09-01 18:26:17 -0400584};
585
Tejun Heo029cfd62008-03-25 12:22:49 +0900586static struct ata_port_operations mv_iie_ops = {
587 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400588 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900589 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500590 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500591};
592
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100593static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400594 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400595 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400599 },
600 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400601 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500604 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400605 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500606 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500610 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500611 },
Brett Russ20f733e2005-09-01 18:26:17 -0400612 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500613 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400614 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500615 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400616 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400617 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500618 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400619 },
620 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400622 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500623 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400624 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400625 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500626 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400627 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500628 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400630 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500631 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500632 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400633 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500634 .port_ops = &mv_iie_ops,
635 },
636 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400638 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500639 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500640 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400641 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500642 .port_ops = &mv_iie_ops,
643 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500644 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400645 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400646 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400647 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400648 .pio_mask = 0x1f, /* pio0-4 */
649 .udma_mask = ATA_UDMA6,
650 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500651 },
Brett Russ20f733e2005-09-01 18:26:17 -0400652};
653
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500654static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400655 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
656 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
657 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
658 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100659 /* RocketRAID 1740/174x have different identifiers */
660 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
661 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400662
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400663 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
664 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
665 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
666 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
667 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500668
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400669 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
670
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200671 /* Adaptec 1430SA */
672 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
673
Mark Lord02a121d2007-12-01 13:07:22 -0500674 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800675 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
676
Mark Lord02a121d2007-12-01 13:07:22 -0500677 /* Highpoint RocketRAID PCIe series */
678 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
679 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
680
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400681 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400682};
683
Jeff Garzik47c2b672005-11-12 21:13:17 -0500684static const struct mv_hw_ops mv5xxx_ops = {
685 .phy_errata = mv5_phy_errata,
686 .enable_leds = mv5_enable_leds,
687 .read_preamp = mv5_read_preamp,
688 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500689 .reset_flash = mv5_reset_flash,
690 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500691};
692
693static const struct mv_hw_ops mv6xxx_ops = {
694 .phy_errata = mv6_phy_errata,
695 .enable_leds = mv6_enable_leds,
696 .read_preamp = mv6_read_preamp,
697 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500698 .reset_flash = mv6_reset_flash,
699 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500700};
701
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500702static const struct mv_hw_ops mv_soc_ops = {
703 .phy_errata = mv6_phy_errata,
704 .enable_leds = mv_soc_enable_leds,
705 .read_preamp = mv_soc_read_preamp,
706 .reset_hc = mv_soc_reset_hc,
707 .reset_flash = mv_soc_reset_flash,
708 .reset_bus = mv_soc_reset_bus,
709};
710
Brett Russ20f733e2005-09-01 18:26:17 -0400711/*
712 * Functions
713 */
714
715static inline void writelfl(unsigned long data, void __iomem *addr)
716{
717 writel(data, addr);
718 (void) readl(addr); /* flush to avoid PCI posted write */
719}
720
Brett Russ20f733e2005-09-01 18:26:17 -0400721static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
722{
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
724}
725
Jeff Garzikc9d39132005-11-13 17:47:51 -0500726static inline unsigned int mv_hc_from_port(unsigned int port)
727{
728 return port >> MV_PORT_HC_SHIFT;
729}
730
731static inline unsigned int mv_hardport_from_port(unsigned int port)
732{
733 return port & MV_PORT_MASK;
734}
735
736static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
737 unsigned int port)
738{
739 return mv_hc_base(base, mv_hc_from_port(port));
740}
741
Brett Russ20f733e2005-09-01 18:26:17 -0400742static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
743{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500744 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500745 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
Mark Lorde12bef52008-03-31 19:33:56 -0400749static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
750{
751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
753
754 return hc_mmio + ofs;
755}
756
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500757static inline void __iomem *mv_host_base(struct ata_host *host)
758{
759 struct mv_host_priv *hpriv = host->private_data;
760 return hpriv->base;
761}
762
Brett Russ20f733e2005-09-01 18:26:17 -0400763static inline void __iomem *mv_ap_base(struct ata_port *ap)
764{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500765 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400766}
767
Jeff Garzikcca39742006-08-24 03:19:22 -0400768static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400769{
Jeff Garzikcca39742006-08-24 03:19:22 -0400770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400771}
772
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400773static void mv_set_edma_ptrs(void __iomem *port_mmio,
774 struct mv_host_priv *hpriv,
775 struct mv_port_priv *pp)
776{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400777 u32 index;
778
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779 /*
780 * initialize request queue
781 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
783
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784 WARN_ON(pp->crqb_dma & 0x3ff);
785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
788
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
792 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794
795 /*
796 * initialize response queue
797 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
799
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800 WARN_ON(pp->crpb_dma & 0xff);
801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
802
803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
806 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811}
812
Brett Russ05b308e2005-10-05 17:08:53 -0400813/**
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
817 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900818 * Verify the local cache of the eDMA state is accurate with a
819 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400820 *
821 * LOCKING:
822 * Inherited from caller.
823 */
Mark Lord0c589122008-01-26 18:31:16 -0500824static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500825 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400826{
Mark Lord72109162008-01-26 18:31:33 -0500827 int want_ncq = (protocol == ATA_PROT_NCQ);
828
829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400832 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500833 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500835 struct mv_host_priv *hpriv = ap->host->private_data;
836 int hard_port = mv_hardport_from_port(ap->port_no);
837 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100838 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500839 u32 hc_irq_cause, ipending;
840
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400841 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843
Mark Lord0c589122008-01-26 18:31:16 -0500844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846 ipending = (DEV_IRQ << hard_port) |
847 (CRPB_DMA_DONE << hard_port);
848 if (hc_irq_cause & ipending) {
849 writelfl(hc_irq_cause & ~ipending,
850 hc_mmio + HC_IRQ_CAUSE_OFS);
851 }
852
Mark Lorde12bef52008-03-31 19:33:56 -0400853 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500854
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
857
Mark Lordf630d562008-01-26 18:31:00 -0500858 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400859
Mark Lordf630d562008-01-26 18:31:00 -0500860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
862 }
Mark Lordf630d562008-01-26 18:31:00 -0500863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400864}
865
Brett Russ05b308e2005-10-05 17:08:53 -0400866/**
Mark Lorde12bef52008-03-31 19:33:56 -0400867 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400868 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400869 *
870 * LOCKING:
871 * Inherited from caller.
872 */
Mark Lordb5624682008-03-31 19:34:40 -0400873static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400874{
Mark Lordb5624682008-03-31 19:34:40 -0400875 int i;
Brett Russ31961942005-09-30 01:36:00 -0400876
Mark Lordb5624682008-03-31 19:34:40 -0400877 /* Disable eDMA. The disable bit auto clears. */
878 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500879
Mark Lordb5624682008-03-31 19:34:40 -0400880 /* Wait for the chip to confirm eDMA is off. */
881 for (i = 10000; i > 0; i--) {
882 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400883 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400884 return 0;
885 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400886 }
Mark Lordb5624682008-03-31 19:34:40 -0400887 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400888}
889
Mark Lorde12bef52008-03-31 19:33:56 -0400890static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400891{
Mark Lordb5624682008-03-31 19:34:40 -0400892 void __iomem *port_mmio = mv_ap_base(ap);
893 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400894
Mark Lordb5624682008-03-31 19:34:40 -0400895 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
896 return 0;
897 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
898 if (mv_stop_edma_engine(port_mmio)) {
899 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
900 return -EIO;
901 }
902 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400903}
904
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400905#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400906static void mv_dump_mem(void __iomem *start, unsigned bytes)
907{
Brett Russ31961942005-09-30 01:36:00 -0400908 int b, w;
909 for (b = 0; b < bytes; ) {
910 DPRINTK("%p: ", start + b);
911 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400912 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400913 b += sizeof(u32);
914 }
915 printk("\n");
916 }
Brett Russ31961942005-09-30 01:36:00 -0400917}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400918#endif
919
Brett Russ31961942005-09-30 01:36:00 -0400920static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
921{
922#ifdef ATA_DEBUG
923 int b, w;
924 u32 dw;
925 for (b = 0; b < bytes; ) {
926 DPRINTK("%02x: ", b);
927 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400928 (void) pci_read_config_dword(pdev, b, &dw);
929 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400930 b += sizeof(u32);
931 }
932 printk("\n");
933 }
934#endif
935}
936static void mv_dump_all_regs(void __iomem *mmio_base, int port,
937 struct pci_dev *pdev)
938{
939#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500940 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400941 port >> MV_PORT_HC_SHIFT);
942 void __iomem *port_base;
943 int start_port, num_ports, p, start_hc, num_hcs, hc;
944
945 if (0 > port) {
946 start_hc = start_port = 0;
947 num_ports = 8; /* shld be benign for 4 port devs */
948 num_hcs = 2;
949 } else {
950 start_hc = port >> MV_PORT_HC_SHIFT;
951 start_port = port;
952 num_ports = num_hcs = 1;
953 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500954 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400955 num_ports > 1 ? num_ports - 1 : start_port);
956
957 if (NULL != pdev) {
958 DPRINTK("PCI config space regs:\n");
959 mv_dump_pci_cfg(pdev, 0x68);
960 }
961 DPRINTK("PCI regs:\n");
962 mv_dump_mem(mmio_base+0xc00, 0x3c);
963 mv_dump_mem(mmio_base+0xd00, 0x34);
964 mv_dump_mem(mmio_base+0xf00, 0x4);
965 mv_dump_mem(mmio_base+0x1d00, 0x6c);
966 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700967 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400968 DPRINTK("HC regs (HC %i):\n", hc);
969 mv_dump_mem(hc_base, 0x1c);
970 }
971 for (p = start_port; p < start_port + num_ports; p++) {
972 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400973 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400974 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400975 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400976 mv_dump_mem(port_base+0x300, 0x60);
977 }
978#endif
979}
980
Brett Russ20f733e2005-09-01 18:26:17 -0400981static unsigned int mv_scr_offset(unsigned int sc_reg_in)
982{
983 unsigned int ofs;
984
985 switch (sc_reg_in) {
986 case SCR_STATUS:
987 case SCR_CONTROL:
988 case SCR_ERROR:
989 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
990 break;
991 case SCR_ACTIVE:
992 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
993 break;
994 default:
995 ofs = 0xffffffffU;
996 break;
997 }
998 return ofs;
999}
1000
Tejun Heoda3dbb12007-07-16 14:29:40 +09001001static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001002{
1003 unsigned int ofs = mv_scr_offset(sc_reg_in);
1004
Tejun Heoda3dbb12007-07-16 14:29:40 +09001005 if (ofs != 0xffffffffU) {
1006 *val = readl(mv_ap_base(ap) + ofs);
1007 return 0;
1008 } else
1009 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001010}
1011
Tejun Heoda3dbb12007-07-16 14:29:40 +09001012static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001013{
1014 unsigned int ofs = mv_scr_offset(sc_reg_in);
1015
Tejun Heoda3dbb12007-07-16 14:29:40 +09001016 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001017 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001018 return 0;
1019 } else
1020 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001021}
1022
Mark Lordf2738272008-01-26 18:32:29 -05001023static void mv6_dev_config(struct ata_device *adev)
1024{
1025 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001026 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1027 *
1028 * Gen-II does not support NCQ over a port multiplier
1029 * (no FIS-based switching).
1030 *
Mark Lordf2738272008-01-26 18:32:29 -05001031 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1032 * See mv_qc_prep() for more info.
1033 */
Mark Lorde49856d2008-04-16 14:59:07 -04001034 if (adev->flags & ATA_DFLAG_NCQ) {
1035 if (sata_pmp_attached(adev->link->ap))
1036 adev->flags &= ~ATA_DFLAG_NCQ;
1037 else if (adev->max_sectors > ATA_MAX_SECTORS)
Mark Lordf2738272008-01-26 18:32:29 -05001038 adev->max_sectors = ATA_MAX_SECTORS;
Mark Lorde49856d2008-04-16 14:59:07 -04001039 }
1040}
1041
1042static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1043{
1044 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1045 /*
1046 * Various bit settings required for operation
1047 * in FIS-based switching (fbs) mode on GenIIe:
1048 */
1049 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1050 old_ltmode = readl(port_mmio + LTMODE_OFS);
1051 if (enable_fbs) {
1052 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1053 new_ltmode = old_ltmode | LTMODE_BIT8;
1054 } else { /* disable fbs */
1055 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1056 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1057 }
1058 if (new_fcfg != old_fcfg)
1059 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1060 if (new_ltmode != old_ltmode)
1061 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lordf2738272008-01-26 18:32:29 -05001062}
1063
Mark Lorde12bef52008-03-31 19:33:56 -04001064static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001065{
Mark Lord0c589122008-01-26 18:31:16 -05001066 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001067 struct mv_port_priv *pp = ap->private_data;
1068 struct mv_host_priv *hpriv = ap->host->private_data;
1069 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001070
1071 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001072 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001073
Mark Lord0c589122008-01-26 18:31:16 -05001074 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001075 cfg |= (1 << 8); /* enab config burst size mask */
1076
Mark Lord0c589122008-01-26 18:31:16 -05001077 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001078 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1079
1080 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001081 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1082 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001083 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001084 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001085
1086 if (want_ncq && sata_pmp_attached(ap)) {
1087 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1088 mv_config_fbs(port_mmio, 1);
1089 } else {
1090 mv_config_fbs(port_mmio, 0);
1091 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001092 }
1093
Mark Lord72109162008-01-26 18:31:33 -05001094 if (want_ncq) {
1095 cfg |= EDMA_CFG_NCQ;
1096 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1097 } else
1098 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1099
Jeff Garzike4e7b892006-01-31 12:18:41 -05001100 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1101}
1102
Mark Lordda2fa9b2008-01-26 18:32:45 -05001103static void mv_port_free_dma_mem(struct ata_port *ap)
1104{
1105 struct mv_host_priv *hpriv = ap->host->private_data;
1106 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001107 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001108
1109 if (pp->crqb) {
1110 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1111 pp->crqb = NULL;
1112 }
1113 if (pp->crpb) {
1114 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1115 pp->crpb = NULL;
1116 }
Mark Lordeb73d552008-01-29 13:24:00 -05001117 /*
1118 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1119 * For later hardware, we have one unique sg_tbl per NCQ tag.
1120 */
1121 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1122 if (pp->sg_tbl[tag]) {
1123 if (tag == 0 || !IS_GEN_I(hpriv))
1124 dma_pool_free(hpriv->sg_tbl_pool,
1125 pp->sg_tbl[tag],
1126 pp->sg_tbl_dma[tag]);
1127 pp->sg_tbl[tag] = NULL;
1128 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001129 }
1130}
1131
Brett Russ05b308e2005-10-05 17:08:53 -04001132/**
1133 * mv_port_start - Port specific init/start routine.
1134 * @ap: ATA channel to manipulate
1135 *
1136 * Allocate and point to DMA memory, init port private memory,
1137 * zero indices.
1138 *
1139 * LOCKING:
1140 * Inherited from caller.
1141 */
Brett Russ31961942005-09-30 01:36:00 -04001142static int mv_port_start(struct ata_port *ap)
1143{
Jeff Garzikcca39742006-08-24 03:19:22 -04001144 struct device *dev = ap->host->dev;
1145 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001146 struct mv_port_priv *pp;
1147 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001148 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001149 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001150
Tejun Heo24dc5f32007-01-20 16:00:28 +09001151 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001152 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001153 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001154 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001155
Mark Lordda2fa9b2008-01-26 18:32:45 -05001156 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1157 if (!pp->crqb)
1158 return -ENOMEM;
1159 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001160
Mark Lordda2fa9b2008-01-26 18:32:45 -05001161 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1162 if (!pp->crpb)
1163 goto out_port_free_dma_mem;
1164 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001165
Mark Lordeb73d552008-01-29 13:24:00 -05001166 /*
1167 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1168 * For later hardware, we need one unique sg_tbl per NCQ tag.
1169 */
1170 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1171 if (tag == 0 || !IS_GEN_I(hpriv)) {
1172 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1173 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1174 if (!pp->sg_tbl[tag])
1175 goto out_port_free_dma_mem;
1176 } else {
1177 pp->sg_tbl[tag] = pp->sg_tbl[0];
1178 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1179 }
1180 }
Brett Russ31961942005-09-30 01:36:00 -04001181
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001182 spin_lock_irqsave(&ap->host->lock, flags);
1183
Mark Lorde12bef52008-03-31 19:33:56 -04001184 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001185 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001186
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001187 spin_unlock_irqrestore(&ap->host->lock, flags);
1188
Brett Russ31961942005-09-30 01:36:00 -04001189 /* Don't turn on EDMA here...do it before DMA commands only. Else
1190 * we'll be unable to send non-data, PIO, etc due to restricted access
1191 * to shadow regs.
1192 */
Brett Russ31961942005-09-30 01:36:00 -04001193 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001194
1195out_port_free_dma_mem:
1196 mv_port_free_dma_mem(ap);
1197 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001198}
1199
Brett Russ05b308e2005-10-05 17:08:53 -04001200/**
1201 * mv_port_stop - Port specific cleanup/stop routine.
1202 * @ap: ATA channel to manipulate
1203 *
1204 * Stop DMA, cleanup port memory.
1205 *
1206 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001207 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001208 */
Brett Russ31961942005-09-30 01:36:00 -04001209static void mv_port_stop(struct ata_port *ap)
1210{
Mark Lorde12bef52008-03-31 19:33:56 -04001211 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001212 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001213}
1214
Brett Russ05b308e2005-10-05 17:08:53 -04001215/**
1216 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1217 * @qc: queued command whose SG list to source from
1218 *
1219 * Populate the SG list and mark the last entry.
1220 *
1221 * LOCKING:
1222 * Inherited from caller.
1223 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001224static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001225{
1226 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001227 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001228 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001229 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001230
Mark Lordeb73d552008-01-29 13:24:00 -05001231 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001232 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001233 dma_addr_t addr = sg_dma_address(sg);
1234 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001235
Olof Johansson4007b492007-10-02 20:45:27 -05001236 while (sg_len) {
1237 u32 offset = addr & 0xffff;
1238 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001239
Olof Johansson4007b492007-10-02 20:45:27 -05001240 if ((offset + sg_len > 0x10000))
1241 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001242
Olof Johansson4007b492007-10-02 20:45:27 -05001243 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1244 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001245 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001246
1247 sg_len -= len;
1248 addr += len;
1249
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001250 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001251 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001252 }
Brett Russ31961942005-09-30 01:36:00 -04001253 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001254
1255 if (likely(last_sg))
1256 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001257}
1258
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001259static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001260{
Mark Lord559eeda2006-05-19 16:40:15 -04001261 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001262 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001263 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001264}
1265
Brett Russ05b308e2005-10-05 17:08:53 -04001266/**
1267 * mv_qc_prep - Host specific command preparation.
1268 * @qc: queued command to prepare
1269 *
1270 * This routine simply redirects to the general purpose routine
1271 * if command is not DMA. Else, it handles prep of the CRQB
1272 * (command request block), does some sanity checking, and calls
1273 * the SG load routine.
1274 *
1275 * LOCKING:
1276 * Inherited from caller.
1277 */
Brett Russ31961942005-09-30 01:36:00 -04001278static void mv_qc_prep(struct ata_queued_cmd *qc)
1279{
1280 struct ata_port *ap = qc->ap;
1281 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001282 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001283 struct ata_taskfile *tf;
1284 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001285 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001286
Mark Lord138bfdd2008-01-26 18:33:18 -05001287 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1288 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001289 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001290
Brett Russ31961942005-09-30 01:36:00 -04001291 /* Fill in command request block
1292 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001293 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001294 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001295 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001296 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001297 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001298
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001299 /* get current queue index from software */
1300 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001301
Mark Lorda6432432006-05-19 16:36:36 -04001302 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001303 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001304 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001305 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001306 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1307
1308 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001309 tf = &qc->tf;
1310
1311 /* Sadly, the CRQB cannot accomodate all registers--there are
1312 * only 11 bytes...so we must pick and choose required
1313 * registers based on the command. So, we drop feature and
1314 * hob_feature for [RW] DMA commands, but they are needed for
1315 * NCQ. NCQ will drop hob_nsect.
1316 */
1317 switch (tf->command) {
1318 case ATA_CMD_READ:
1319 case ATA_CMD_READ_EXT:
1320 case ATA_CMD_WRITE:
1321 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001322 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001323 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1324 break;
Brett Russ31961942005-09-30 01:36:00 -04001325 case ATA_CMD_FPDMA_READ:
1326 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001327 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001328 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1329 break;
Brett Russ31961942005-09-30 01:36:00 -04001330 default:
1331 /* The only other commands EDMA supports in non-queued and
1332 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1333 * of which are defined/used by Linux. If we get here, this
1334 * driver needs work.
1335 *
1336 * FIXME: modify libata to give qc_prep a return value and
1337 * return error here.
1338 */
1339 BUG_ON(tf->command);
1340 break;
1341 }
1342 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1343 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1344 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1345 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1346 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1347 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1348 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1349 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1350 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1351
Jeff Garzike4e7b892006-01-31 12:18:41 -05001352 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001353 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001354 mv_fill_sg(qc);
1355}
1356
1357/**
1358 * mv_qc_prep_iie - Host specific command preparation.
1359 * @qc: queued command to prepare
1360 *
1361 * This routine simply redirects to the general purpose routine
1362 * if command is not DMA. Else, it handles prep of the CRQB
1363 * (command request block), does some sanity checking, and calls
1364 * the SG load routine.
1365 *
1366 * LOCKING:
1367 * Inherited from caller.
1368 */
1369static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1370{
1371 struct ata_port *ap = qc->ap;
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_crqb_iie *crqb;
1374 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001375 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001376 u32 flags = 0;
1377
Mark Lord138bfdd2008-01-26 18:33:18 -05001378 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1379 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001380 return;
1381
Mark Lorde12bef52008-03-31 19:33:56 -04001382 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001383 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1384 flags |= CRQB_FLAG_READ;
1385
Tejun Heobeec7db2006-02-11 19:11:13 +09001386 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001387 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001388 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001389 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001390
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001391 /* get current queue index from software */
1392 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001393
1394 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001395 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1396 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001397 crqb->flags = cpu_to_le32(flags);
1398
1399 tf = &qc->tf;
1400 crqb->ata_cmd[0] = cpu_to_le32(
1401 (tf->command << 16) |
1402 (tf->feature << 24)
1403 );
1404 crqb->ata_cmd[1] = cpu_to_le32(
1405 (tf->lbal << 0) |
1406 (tf->lbam << 8) |
1407 (tf->lbah << 16) |
1408 (tf->device << 24)
1409 );
1410 crqb->ata_cmd[2] = cpu_to_le32(
1411 (tf->hob_lbal << 0) |
1412 (tf->hob_lbam << 8) |
1413 (tf->hob_lbah << 16) |
1414 (tf->hob_feature << 24)
1415 );
1416 crqb->ata_cmd[3] = cpu_to_le32(
1417 (tf->nsect << 0) |
1418 (tf->hob_nsect << 8)
1419 );
1420
1421 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1422 return;
Brett Russ31961942005-09-30 01:36:00 -04001423 mv_fill_sg(qc);
1424}
1425
Brett Russ05b308e2005-10-05 17:08:53 -04001426/**
1427 * mv_qc_issue - Initiate a command to the host
1428 * @qc: queued command to start
1429 *
1430 * This routine simply redirects to the general purpose routine
1431 * if command is not DMA. Else, it sanity checks our local
1432 * caches of the request producer/consumer indices then enables
1433 * DMA and bumps the request producer index.
1434 *
1435 * LOCKING:
1436 * Inherited from caller.
1437 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001438static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001439{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001440 struct ata_port *ap = qc->ap;
1441 void __iomem *port_mmio = mv_ap_base(ap);
1442 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001443 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001444
Mark Lord138bfdd2008-01-26 18:33:18 -05001445 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1446 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001447 /*
1448 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001449 * port. Turn off EDMA so there won't be problems accessing
1450 * shadow block, etc registers.
1451 */
Mark Lordb5624682008-03-31 19:34:40 -04001452 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001453 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001454 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001455 }
1456
Mark Lord72109162008-01-26 18:31:33 -05001457 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001460
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001461 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001462
1463 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1465 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001466
1467 return 0;
1468}
1469
Brett Russ05b308e2005-10-05 17:08:53 -04001470/**
Brett Russ05b308e2005-10-05 17:08:53 -04001471 * mv_err_intr - Handle error interrupts on the port
1472 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001473 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001474 *
1475 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001476 * some cases require an eDMA reset, which also performs a COMRESET.
1477 * The SERR case requires a clear of pending errors in the SATA
1478 * SERROR register. Finally, if the port disabled DMA,
1479 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001480 *
1481 * LOCKING:
1482 * Inherited from caller.
1483 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001485{
Brett Russ31961942005-09-30 01:36:00 -04001486 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001487 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1488 struct mv_port_priv *pp = ap->private_data;
1489 struct mv_host_priv *hpriv = ap->host->private_data;
1490 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1491 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001492 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001493
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001494 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001495
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001496 if (!edma_enabled) {
1497 /* just a guess: do we need to do this? should we
1498 * expand this, and do it in all cases?
1499 */
Tejun Heo936fd732007-08-06 18:36:23 +09001500 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1501 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001502 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503
1504 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1505
1506 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1507
1508 /*
1509 * all generations share these EDMA error cause bits
1510 */
1511
1512 if (edma_err_cause & EDMA_ERR_DEV)
1513 err_mask |= AC_ERR_DEV;
1514 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001515 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001516 EDMA_ERR_INTRL_PAR)) {
1517 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001518 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001519 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001520 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001521 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1522 ata_ehi_hotplugged(ehi);
1523 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001524 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001525 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001526 }
1527
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001528 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 eh_freeze_mask = EDMA_EH_FREEZE_5;
1530
1531 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001532 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001533 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001534 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 }
1536 } else {
1537 eh_freeze_mask = EDMA_EH_FREEZE;
1538
1539 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001540 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001541 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001542 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001543 }
1544
1545 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001546 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1547 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001549 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550 }
1551 }
Brett Russ20f733e2005-09-01 18:26:17 -04001552
1553 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001554 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001555
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556 if (!err_mask) {
1557 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001558 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001559 }
1560
1561 ehi->serror |= serr;
1562 ehi->action |= action;
1563
1564 if (qc)
1565 qc->err_mask |= err_mask;
1566 else
1567 ehi->err_mask |= err_mask;
1568
1569 if (edma_err_cause & eh_freeze_mask)
1570 ata_port_freeze(ap);
1571 else
1572 ata_port_abort(ap);
1573}
1574
1575static void mv_intr_pio(struct ata_port *ap)
1576{
1577 struct ata_queued_cmd *qc;
1578 u8 ata_status;
1579
1580 /* ignore spurious intr if drive still BUSY */
1581 ata_status = readb(ap->ioaddr.status_addr);
1582 if (unlikely(ata_status & ATA_BUSY))
1583 return;
1584
1585 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001586 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001587 if (unlikely(!qc)) /* no active tag */
1588 return;
1589 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1590 return;
1591
1592 /* and finally, complete the ATA command */
1593 qc->err_mask |= ac_err_mask(ata_status);
1594 ata_qc_complete(qc);
1595}
1596
1597static void mv_intr_edma(struct ata_port *ap)
1598{
1599 void __iomem *port_mmio = mv_ap_base(ap);
1600 struct mv_host_priv *hpriv = ap->host->private_data;
1601 struct mv_port_priv *pp = ap->private_data;
1602 struct ata_queued_cmd *qc;
1603 u32 out_index, in_index;
1604 bool work_done = false;
1605
1606 /* get h/w response queue pointer */
1607 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1608 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1609
1610 while (1) {
1611 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001612 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001613
1614 /* get s/w response queue last-read pointer, and compare */
1615 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1616 if (in_index == out_index)
1617 break;
1618
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001619 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001620 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001621 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001622
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001623 /* Gen II/IIE: get active ATA command via tag, to enable
1624 * support for queueing. this works transparently for
1625 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001627 else
1628 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001629
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001630 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001631
Mark Lordcb924412008-01-26 18:32:09 -05001632 /* For non-NCQ mode, the lower 8 bits of status
1633 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1634 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635 */
1636 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001637 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001638 mv_err_intr(ap, qc);
1639 return;
1640 }
1641
1642 /* and finally, complete the ATA command */
1643 if (qc) {
1644 qc->err_mask |=
1645 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1646 ata_qc_complete(qc);
1647 }
1648
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001649 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001650 * indicate (after the loop completes) to hardware
1651 * that we have consumed a response queue entry.
1652 */
1653 work_done = true;
1654 pp->resp_idx++;
1655 }
1656
1657 if (work_done)
1658 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1659 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1660 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001661}
1662
Brett Russ05b308e2005-10-05 17:08:53 -04001663/**
1664 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001665 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001666 * @relevant: port error bits relevant to this host controller
1667 * @hc: which host controller we're to look at
1668 *
1669 * Read then write clear the HC interrupt status then walk each
1670 * port connected to the HC and see if it needs servicing. Port
1671 * success ints are reported in the HC interrupt status reg, the
1672 * port error ints are reported in the higher level main
1673 * interrupt status register and thus are passed in via the
1674 * 'relevant' argument.
1675 *
1676 * LOCKING:
1677 * Inherited from caller.
1678 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001679static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001680{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001681 struct mv_host_priv *hpriv = host->private_data;
1682 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001683 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001684 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001685 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001686
Jeff Garzik35177262007-02-24 21:26:42 -05001687 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001688 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001689 else
Brett Russ20f733e2005-09-01 18:26:17 -04001690 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001691
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001692 if (HAS_PCI(host))
1693 last_port = port0 + MV_PORTS_PER_HC;
1694 else
1695 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001696 /* we'll need the HC success int register in most cases */
1697 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001698 if (!hc_irq_cause)
1699 return;
1700
1701 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001702
1703 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001704 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001705
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001706 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001707 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001708 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001709 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001710
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001712 continue;
1713
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001714 pp = ap->private_data;
1715
Brett Russ31961942005-09-30 01:36:00 -04001716 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001717 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001718 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 have_err_bits = ((PORT0_ERR << shift) & relevant);
1721
1722 if (unlikely(have_err_bits)) {
1723 struct ata_queued_cmd *qc;
1724
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001725 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001726 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1727 continue;
1728
1729 mv_err_intr(ap, qc);
1730 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001731 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001732
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001733 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1734
1735 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1736 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1737 mv_intr_edma(ap);
1738 } else {
1739 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1740 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001741 }
1742 }
1743 VPRINTK("EXIT\n");
1744}
1745
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001746static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1747{
Mark Lord02a121d2007-12-01 13:07:22 -05001748 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001749 struct ata_port *ap;
1750 struct ata_queued_cmd *qc;
1751 struct ata_eh_info *ehi;
1752 unsigned int i, err_mask, printed = 0;
1753 u32 err_cause;
1754
Mark Lord02a121d2007-12-01 13:07:22 -05001755 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001756
1757 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1758 err_cause);
1759
1760 DPRINTK("All regs @ PCI error\n");
1761 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1762
Mark Lord02a121d2007-12-01 13:07:22 -05001763 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001764
1765 for (i = 0; i < host->n_ports; i++) {
1766 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001767 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001768 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001769 ata_ehi_clear_desc(ehi);
1770 if (!printed++)
1771 ata_ehi_push_desc(ehi,
1772 "PCI err cause 0x%08x", err_cause);
1773 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001774 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001775 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001776 if (qc)
1777 qc->err_mask |= err_mask;
1778 else
1779 ehi->err_mask |= err_mask;
1780
1781 ata_port_freeze(ap);
1782 }
1783 }
1784}
1785
Brett Russ05b308e2005-10-05 17:08:53 -04001786/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001787 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001788 * @irq: unused
1789 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001790 *
1791 * Read the read only register to determine if any host
1792 * controllers have pending interrupts. If so, call lower level
1793 * routine to handle. Also check for PCI errors which are only
1794 * reported here.
1795 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001796 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001797 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001798 * interrupts.
1799 */
David Howells7d12e782006-10-05 14:55:46 +01001800static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001801{
Jeff Garzikcca39742006-08-24 03:19:22 -04001802 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001803 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001804 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001805 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001806 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001807
Mark Lorde12bef52008-03-31 19:33:56 -04001808 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001809 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001810
1811 irq_stat = readl(hpriv->main_cause_reg_addr);
1812 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001813
1814 /* check the cases where we either have nothing pending or have read
1815 * a bogus register value which can indicate HW removal or PCI fault
1816 */
Mark Lord646a4da2008-01-26 18:30:37 -05001817 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1818 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001819
Jeff Garzikcca39742006-08-24 03:19:22 -04001820 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001821
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001822 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001823 mv_pci_error(host, mmio);
1824 handled = 1;
1825 goto out_unlock; /* skip all other HC irq handling */
1826 }
1827
Brett Russ20f733e2005-09-01 18:26:17 -04001828 for (hc = 0; hc < n_hcs; hc++) {
1829 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1830 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001831 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001832 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001833 }
1834 }
Mark Lord615ab952006-05-19 16:24:56 -04001835
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001836out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001837 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001838
1839 return IRQ_RETVAL(handled);
1840}
1841
Jeff Garzikc9d39132005-11-13 17:47:51 -05001842static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1843{
1844 unsigned int ofs;
1845
1846 switch (sc_reg_in) {
1847 case SCR_STATUS:
1848 case SCR_ERROR:
1849 case SCR_CONTROL:
1850 ofs = sc_reg_in * sizeof(u32);
1851 break;
1852 default:
1853 ofs = 0xffffffffU;
1854 break;
1855 }
1856 return ofs;
1857}
1858
Tejun Heoda3dbb12007-07-16 14:29:40 +09001859static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001860{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001861 struct mv_host_priv *hpriv = ap->host->private_data;
1862 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001863 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001864 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1865
Tejun Heoda3dbb12007-07-16 14:29:40 +09001866 if (ofs != 0xffffffffU) {
1867 *val = readl(addr + ofs);
1868 return 0;
1869 } else
1870 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871}
1872
Tejun Heoda3dbb12007-07-16 14:29:40 +09001873static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001874{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001875 struct mv_host_priv *hpriv = ap->host->private_data;
1876 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001877 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001878 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1879
Tejun Heoda3dbb12007-07-16 14:29:40 +09001880 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001881 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001882 return 0;
1883 } else
1884 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885}
1886
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001887static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001888{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001889 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001890 int early_5080;
1891
Auke Kok44c10132007-06-08 15:46:36 -07001892 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001893
1894 if (!early_5080) {
1895 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1896 tmp |= (1 << 0);
1897 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1898 }
1899
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001900 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001901}
1902
1903static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1904{
1905 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1906}
1907
Jeff Garzik47c2b672005-11-12 21:13:17 -05001908static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001909 void __iomem *mmio)
1910{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001911 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1912 u32 tmp;
1913
1914 tmp = readl(phy_mmio + MV5_PHY_MODE);
1915
1916 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1917 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001918}
1919
Jeff Garzik47c2b672005-11-12 21:13:17 -05001920static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001921{
Jeff Garzik522479f2005-11-12 22:14:02 -05001922 u32 tmp;
1923
1924 writel(0, mmio + MV_GPIO_PORT_CTL);
1925
1926 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1927
1928 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1929 tmp |= ~(1 << 0);
1930 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001931}
1932
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001933static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1934 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001935{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001936 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1937 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1938 u32 tmp;
1939 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1940
1941 if (fix_apm_sq) {
1942 tmp = readl(phy_mmio + MV5_LT_MODE);
1943 tmp |= (1 << 19);
1944 writel(tmp, phy_mmio + MV5_LT_MODE);
1945
1946 tmp = readl(phy_mmio + MV5_PHY_CTL);
1947 tmp &= ~0x3;
1948 tmp |= 0x1;
1949 writel(tmp, phy_mmio + MV5_PHY_CTL);
1950 }
1951
1952 tmp = readl(phy_mmio + MV5_PHY_MODE);
1953 tmp &= ~mask;
1954 tmp |= hpriv->signal[port].pre;
1955 tmp |= hpriv->signal[port].amps;
1956 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001957}
1958
Jeff Garzikc9d39132005-11-13 17:47:51 -05001959
1960#undef ZERO
1961#define ZERO(reg) writel(0, port_mmio + (reg))
1962static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1963 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001964{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001965 void __iomem *port_mmio = mv_port_base(mmio, port);
1966
Mark Lordb5624682008-03-31 19:34:40 -04001967 /*
1968 * The datasheet warns against setting ATA_RST when EDMA is active
1969 * (but doesn't say what the problem might be). So we first try
1970 * to disable the EDMA engine before doing the ATA_RST operation.
1971 */
Mark Lorde12bef52008-03-31 19:33:56 -04001972 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001973
1974 ZERO(0x028); /* command */
1975 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1976 ZERO(0x004); /* timer */
1977 ZERO(0x008); /* irq err cause */
1978 ZERO(0x00c); /* irq err mask */
1979 ZERO(0x010); /* rq bah */
1980 ZERO(0x014); /* rq inp */
1981 ZERO(0x018); /* rq outp */
1982 ZERO(0x01c); /* respq bah */
1983 ZERO(0x024); /* respq outp */
1984 ZERO(0x020); /* respq inp */
1985 ZERO(0x02c); /* test control */
1986 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1987}
1988#undef ZERO
1989
1990#define ZERO(reg) writel(0, hc_mmio + (reg))
1991static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1992 unsigned int hc)
1993{
1994 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1995 u32 tmp;
1996
1997 ZERO(0x00c);
1998 ZERO(0x010);
1999 ZERO(0x014);
2000 ZERO(0x018);
2001
2002 tmp = readl(hc_mmio + 0x20);
2003 tmp &= 0x1c1c1c1c;
2004 tmp |= 0x03030303;
2005 writel(tmp, hc_mmio + 0x20);
2006}
2007#undef ZERO
2008
2009static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2010 unsigned int n_hc)
2011{
2012 unsigned int hc, port;
2013
2014 for (hc = 0; hc < n_hc; hc++) {
2015 for (port = 0; port < MV_PORTS_PER_HC; port++)
2016 mv5_reset_hc_port(hpriv, mmio,
2017 (hc * MV_PORTS_PER_HC) + port);
2018
2019 mv5_reset_one_hc(hpriv, mmio, hc);
2020 }
2021
2022 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002023}
2024
Jeff Garzik101ffae2005-11-12 22:17:49 -05002025#undef ZERO
2026#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002027static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002028{
Mark Lord02a121d2007-12-01 13:07:22 -05002029 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002030 u32 tmp;
2031
2032 tmp = readl(mmio + MV_PCI_MODE);
2033 tmp &= 0xff00ffff;
2034 writel(tmp, mmio + MV_PCI_MODE);
2035
2036 ZERO(MV_PCI_DISC_TIMER);
2037 ZERO(MV_PCI_MSI_TRIGGER);
2038 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2039 ZERO(HC_MAIN_IRQ_MASK_OFS);
2040 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002041 ZERO(hpriv->irq_cause_ofs);
2042 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002043 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2044 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2045 ZERO(MV_PCI_ERR_ATTRIBUTE);
2046 ZERO(MV_PCI_ERR_COMMAND);
2047}
2048#undef ZERO
2049
2050static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2051{
2052 u32 tmp;
2053
2054 mv5_reset_flash(hpriv, mmio);
2055
2056 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2057 tmp &= 0x3;
2058 tmp |= (1 << 5) | (1 << 6);
2059 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2060}
2061
2062/**
2063 * mv6_reset_hc - Perform the 6xxx global soft reset
2064 * @mmio: base address of the HBA
2065 *
2066 * This routine only applies to 6xxx parts.
2067 *
2068 * LOCKING:
2069 * Inherited from caller.
2070 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002071static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2072 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002073{
2074 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2075 int i, rc = 0;
2076 u32 t;
2077
2078 /* Following procedure defined in PCI "main command and status
2079 * register" table.
2080 */
2081 t = readl(reg);
2082 writel(t | STOP_PCI_MASTER, reg);
2083
2084 for (i = 0; i < 1000; i++) {
2085 udelay(1);
2086 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002087 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002088 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002089 }
2090 if (!(PCI_MASTER_EMPTY & t)) {
2091 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2092 rc = 1;
2093 goto done;
2094 }
2095
2096 /* set reset */
2097 i = 5;
2098 do {
2099 writel(t | GLOB_SFT_RST, reg);
2100 t = readl(reg);
2101 udelay(1);
2102 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2103
2104 if (!(GLOB_SFT_RST & t)) {
2105 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2106 rc = 1;
2107 goto done;
2108 }
2109
2110 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2111 i = 5;
2112 do {
2113 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2114 t = readl(reg);
2115 udelay(1);
2116 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2117
2118 if (GLOB_SFT_RST & t) {
2119 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2120 rc = 1;
2121 }
2122done:
2123 return rc;
2124}
2125
Jeff Garzik47c2b672005-11-12 21:13:17 -05002126static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002127 void __iomem *mmio)
2128{
2129 void __iomem *port_mmio;
2130 u32 tmp;
2131
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002132 tmp = readl(mmio + MV_RESET_CFG);
2133 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002134 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002135 hpriv->signal[idx].pre = 0x1 << 5;
2136 return;
2137 }
2138
2139 port_mmio = mv_port_base(mmio, idx);
2140 tmp = readl(port_mmio + PHY_MODE2);
2141
2142 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2143 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2144}
2145
Jeff Garzik47c2b672005-11-12 21:13:17 -05002146static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002147{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002148 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002149}
2150
Jeff Garzikc9d39132005-11-13 17:47:51 -05002151static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002152 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002154 void __iomem *port_mmio = mv_port_base(mmio, port);
2155
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002156 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002157 int fix_phy_mode2 =
2158 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002159 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002160 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2161 u32 m2, tmp;
2162
2163 if (fix_phy_mode2) {
2164 m2 = readl(port_mmio + PHY_MODE2);
2165 m2 &= ~(1 << 16);
2166 m2 |= (1 << 31);
2167 writel(m2, port_mmio + PHY_MODE2);
2168
2169 udelay(200);
2170
2171 m2 = readl(port_mmio + PHY_MODE2);
2172 m2 &= ~((1 << 16) | (1 << 31));
2173 writel(m2, port_mmio + PHY_MODE2);
2174
2175 udelay(200);
2176 }
2177
2178 /* who knows what this magic does */
2179 tmp = readl(port_mmio + PHY_MODE3);
2180 tmp &= ~0x7F800000;
2181 tmp |= 0x2A800000;
2182 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002183
2184 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002185 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002186
2187 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002188
2189 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002190 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002191
Mark Lorde12bef52008-03-31 19:33:56 -04002192 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002193 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2194
2195 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002196
2197 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002198 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002199 }
2200
2201 /* Revert values of pre-emphasis and signal amps to the saved ones */
2202 m2 = readl(port_mmio + PHY_MODE2);
2203
2204 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002205 m2 |= hpriv->signal[port].amps;
2206 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002207 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002208
Jeff Garzike4e7b892006-01-31 12:18:41 -05002209 /* according to mvSata 3.6.1, some IIE values are fixed */
2210 if (IS_GEN_IIE(hpriv)) {
2211 m2 &= ~0xC30FF01F;
2212 m2 |= 0x0000900F;
2213 }
2214
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002215 writel(m2, port_mmio + PHY_MODE2);
2216}
2217
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002218/* TODO: use the generic LED interface to configure the SATA Presence */
2219/* & Acitivy LEDs on the board */
2220static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2221 void __iomem *mmio)
2222{
2223 return;
2224}
2225
2226static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2227 void __iomem *mmio)
2228{
2229 void __iomem *port_mmio;
2230 u32 tmp;
2231
2232 port_mmio = mv_port_base(mmio, idx);
2233 tmp = readl(port_mmio + PHY_MODE2);
2234
2235 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2236 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2237}
2238
2239#undef ZERO
2240#define ZERO(reg) writel(0, port_mmio + (reg))
2241static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2242 void __iomem *mmio, unsigned int port)
2243{
2244 void __iomem *port_mmio = mv_port_base(mmio, port);
2245
Mark Lordb5624682008-03-31 19:34:40 -04002246 /*
2247 * The datasheet warns against setting ATA_RST when EDMA is active
2248 * (but doesn't say what the problem might be). So we first try
2249 * to disable the EDMA engine before doing the ATA_RST operation.
2250 */
Mark Lorde12bef52008-03-31 19:33:56 -04002251 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002252
2253 ZERO(0x028); /* command */
2254 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2255 ZERO(0x004); /* timer */
2256 ZERO(0x008); /* irq err cause */
2257 ZERO(0x00c); /* irq err mask */
2258 ZERO(0x010); /* rq bah */
2259 ZERO(0x014); /* rq inp */
2260 ZERO(0x018); /* rq outp */
2261 ZERO(0x01c); /* respq bah */
2262 ZERO(0x024); /* respq outp */
2263 ZERO(0x020); /* respq inp */
2264 ZERO(0x02c); /* test control */
2265 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2266}
2267
2268#undef ZERO
2269
2270#define ZERO(reg) writel(0, hc_mmio + (reg))
2271static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2272 void __iomem *mmio)
2273{
2274 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2275
2276 ZERO(0x00c);
2277 ZERO(0x010);
2278 ZERO(0x014);
2279
2280}
2281
2282#undef ZERO
2283
2284static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio, unsigned int n_hc)
2286{
2287 unsigned int port;
2288
2289 for (port = 0; port < hpriv->n_ports; port++)
2290 mv_soc_reset_hc_port(hpriv, mmio, port);
2291
2292 mv_soc_reset_one_hc(hpriv, mmio);
2293
2294 return 0;
2295}
2296
2297static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2298 void __iomem *mmio)
2299{
2300 return;
2301}
2302
2303static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2304{
2305 return;
2306}
2307
Mark Lordb67a1062008-03-31 19:35:13 -04002308static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2309{
2310 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2311
2312 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2313 if (want_gen2i)
2314 ifctl |= (1 << 7); /* enable gen2i speed */
2315 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2316}
2317
Mark Lordb5624682008-03-31 19:34:40 -04002318/*
2319 * Caller must ensure that EDMA is not active,
2320 * by first doing mv_stop_edma() where needed.
2321 */
Mark Lorde12bef52008-03-31 19:33:56 -04002322static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002323 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002324{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002325 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002326
Mark Lord0d8be5c2008-04-16 14:56:12 -04002327 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002328 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002329
Mark Lordb67a1062008-03-31 19:35:13 -04002330 if (!IS_GEN_I(hpriv)) {
2331 /* Enable 3.0gb/s link speed */
2332 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002333 }
Mark Lordb67a1062008-03-31 19:35:13 -04002334 /*
2335 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2336 * link, and physical layers. It resets all SATA interface registers
2337 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002338 */
Mark Lordb67a1062008-03-31 19:35:13 -04002339 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2340 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002341 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002342
Jeff Garzikc9d39132005-11-13 17:47:51 -05002343 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2344
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002345 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002346 mdelay(1);
2347}
2348
Mark Lorde49856d2008-04-16 14:59:07 -04002349static void mv_pmp_select(struct ata_port *ap, int pmp)
2350{
2351 if (sata_pmp_supported(ap)) {
2352 void __iomem *port_mmio = mv_ap_base(ap);
2353 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2354 int old = reg & 0xf;
2355
2356 if (old != pmp) {
2357 reg = (reg & ~0xf) | pmp;
2358 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2359 }
2360 }
2361}
2362
2363static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
2365{
2366 mv_pmp_select(link->ap, sata_srst_pmp(link));
2367 return sata_std_hardreset(link, class, deadline);
2368}
2369
2370static int mv_softreset(struct ata_link *link, unsigned int *class,
2371 unsigned long deadline)
2372{
2373 mv_pmp_select(link->ap, sata_srst_pmp(link));
2374 return ata_sff_softreset(link, class, deadline);
2375}
2376
Tejun Heocc0680a2007-08-06 18:36:23 +09002377static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002378 unsigned long deadline)
2379{
Tejun Heocc0680a2007-08-06 18:36:23 +09002380 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002381 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002382 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002383 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002384 int rc, attempts = 0, extra = 0;
2385 u32 sstatus;
2386 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387
Mark Lorde12bef52008-03-31 19:33:56 -04002388 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002389 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002390
Mark Lord0d8be5c2008-04-16 14:56:12 -04002391 /* Workaround for errata FEr SATA#10 (part 2) */
2392 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002393 const unsigned long *timing =
2394 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002395
Mark Lord17c5aab2008-04-16 14:56:51 -04002396 rc = sata_link_hardreset(link, timing, deadline + extra,
2397 &online, NULL);
2398 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002399 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002400 sata_scr_read(link, SCR_STATUS, &sstatus);
2401 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2402 /* Force 1.5gb/s link speed and try again */
2403 mv_setup_ifctl(mv_ap_base(ap), 0);
2404 if (time_after(jiffies + HZ, deadline))
2405 extra = HZ; /* only extend it once, max */
2406 }
2407 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408
Mark Lord17c5aab2008-04-16 14:56:51 -04002409 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002410}
2411
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002412static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002413{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002414 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002415 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2416 u32 tmp, mask;
2417 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002418
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002419 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002420
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002421 shift = ap->port_no * 2;
2422 if (hc > 0)
2423 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002424
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002425 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002426
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002428 tmp = readl(hpriv->main_mask_reg_addr);
2429 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002430}
2431
2432static void mv_eh_thaw(struct ata_port *ap)
2433{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002434 struct mv_host_priv *hpriv = ap->host->private_data;
2435 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002436 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2437 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2438 void __iomem *port_mmio = mv_ap_base(ap);
2439 u32 tmp, mask, hc_irq_cause;
2440 unsigned int shift, hc_port_no = ap->port_no;
2441
2442 /* FIXME: handle coalescing completion events properly */
2443
2444 shift = ap->port_no * 2;
2445 if (hc > 0) {
2446 shift++;
2447 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002448 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002449
2450 mask = 0x3 << shift;
2451
2452 /* clear EDMA errors on this port */
2453 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2454
2455 /* clear pending irq events */
2456 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2457 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2458 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2459 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2460
2461 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002462 tmp = readl(hpriv->main_mask_reg_addr);
2463 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002464}
2465
Brett Russ05b308e2005-10-05 17:08:53 -04002466/**
2467 * mv_port_init - Perform some early initialization on a single port.
2468 * @port: libata data structure storing shadow register addresses
2469 * @port_mmio: base address of the port
2470 *
2471 * Initialize shadow register mmio addresses, clear outstanding
2472 * interrupts on the port, and unmask interrupts for the future
2473 * start of the port.
2474 *
2475 * LOCKING:
2476 * Inherited from caller.
2477 */
Brett Russ31961942005-09-30 01:36:00 -04002478static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2479{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002480 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002481 unsigned serr_ofs;
2482
Jeff Garzik8b260242005-11-12 12:32:50 -05002483 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002484 */
2485 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002486 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002487 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2488 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2489 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2490 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2491 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2492 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002493 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002494 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2495 /* special case: control/altstatus doesn't have ATA_REG_ address */
2496 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2497
2498 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002499 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002500
Brett Russ31961942005-09-30 01:36:00 -04002501 /* Clear any currently outstanding port interrupt conditions */
2502 serr_ofs = mv_scr_offset(SCR_ERROR);
2503 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2504 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2505
Mark Lord646a4da2008-01-26 18:30:37 -05002506 /* unmask all non-transient EDMA error interrupts */
2507 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002508
Jeff Garzik8b260242005-11-12 12:32:50 -05002509 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002510 readl(port_mmio + EDMA_CFG_OFS),
2511 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2512 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002513}
2514
Tejun Heo4447d352007-04-17 23:44:08 +09002515static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002516{
Tejun Heo4447d352007-04-17 23:44:08 +09002517 struct pci_dev *pdev = to_pci_dev(host->dev);
2518 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002519 u32 hp_flags = hpriv->hp_flags;
2520
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002521 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002522 case chip_5080:
2523 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002524 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002525
Auke Kok44c10132007-06-08 15:46:36 -07002526 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002527 case 0x1:
2528 hp_flags |= MV_HP_ERRATA_50XXB0;
2529 break;
2530 case 0x3:
2531 hp_flags |= MV_HP_ERRATA_50XXB2;
2532 break;
2533 default:
2534 dev_printk(KERN_WARNING, &pdev->dev,
2535 "Applying 50XXB2 workarounds to unknown rev\n");
2536 hp_flags |= MV_HP_ERRATA_50XXB2;
2537 break;
2538 }
2539 break;
2540
2541 case chip_504x:
2542 case chip_508x:
2543 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002544 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002545
Auke Kok44c10132007-06-08 15:46:36 -07002546 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002547 case 0x0:
2548 hp_flags |= MV_HP_ERRATA_50XXB0;
2549 break;
2550 case 0x3:
2551 hp_flags |= MV_HP_ERRATA_50XXB2;
2552 break;
2553 default:
2554 dev_printk(KERN_WARNING, &pdev->dev,
2555 "Applying B2 workarounds to unknown rev\n");
2556 hp_flags |= MV_HP_ERRATA_50XXB2;
2557 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002558 }
2559 break;
2560
2561 case chip_604x:
2562 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002563 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002564 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002565
Auke Kok44c10132007-06-08 15:46:36 -07002566 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002567 case 0x7:
2568 hp_flags |= MV_HP_ERRATA_60X1B2;
2569 break;
2570 case 0x9:
2571 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002572 break;
2573 default:
2574 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002575 "Applying B2 workarounds to unknown rev\n");
2576 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002577 break;
2578 }
2579 break;
2580
Jeff Garzike4e7b892006-01-31 12:18:41 -05002581 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002582 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002583 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2584 (pdev->device == 0x2300 || pdev->device == 0x2310))
2585 {
Mark Lord4e520032007-12-11 12:58:05 -05002586 /*
2587 * Highpoint RocketRAID PCIe 23xx series cards:
2588 *
2589 * Unconfigured drives are treated as "Legacy"
2590 * by the BIOS, and it overwrites sector 8 with
2591 * a "Lgcy" metadata block prior to Linux boot.
2592 *
2593 * Configured drives (RAID or JBOD) leave sector 8
2594 * alone, but instead overwrite a high numbered
2595 * sector for the RAID metadata. This sector can
2596 * be determined exactly, by truncating the physical
2597 * drive capacity to a nice even GB value.
2598 *
2599 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2600 *
2601 * Warn the user, lest they think we're just buggy.
2602 */
2603 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2604 " BIOS CORRUPTS DATA on all attached drives,"
2605 " regardless of if/how they are configured."
2606 " BEWARE!\n");
2607 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2608 " use sectors 8-9 on \"Legacy\" drives,"
2609 " and avoid the final two gigabytes on"
2610 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002611 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002612 case chip_6042:
2613 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002614 hp_flags |= MV_HP_GEN_IIE;
2615
Auke Kok44c10132007-06-08 15:46:36 -07002616 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002617 case 0x0:
2618 hp_flags |= MV_HP_ERRATA_XX42A0;
2619 break;
2620 case 0x1:
2621 hp_flags |= MV_HP_ERRATA_60X1C0;
2622 break;
2623 default:
2624 dev_printk(KERN_WARNING, &pdev->dev,
2625 "Applying 60X1C0 workarounds to unknown rev\n");
2626 hp_flags |= MV_HP_ERRATA_60X1C0;
2627 break;
2628 }
2629 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002630 case chip_soc:
2631 hpriv->ops = &mv_soc_ops;
2632 hp_flags |= MV_HP_ERRATA_60X1C0;
2633 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002634
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002635 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002636 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002637 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002638 return 1;
2639 }
2640
2641 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002642 if (hp_flags & MV_HP_PCIE) {
2643 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2644 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2645 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2646 } else {
2647 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2648 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2649 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2650 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002651
2652 return 0;
2653}
2654
Brett Russ05b308e2005-10-05 17:08:53 -04002655/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002656 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002657 * @host: ATA host to initialize
2658 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002659 *
2660 * If possible, do an early global reset of the host. Then do
2661 * our port init and clear/unmask all/relevant host interrupts.
2662 *
2663 * LOCKING:
2664 * Inherited from caller.
2665 */
Tejun Heo4447d352007-04-17 23:44:08 +09002666static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002667{
2668 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002669 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002670 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002671
Tejun Heo4447d352007-04-17 23:44:08 +09002672 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002673 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002674 goto done;
2675
2676 if (HAS_PCI(host)) {
2677 hpriv->main_cause_reg_addr = hpriv->base +
2678 HC_MAIN_IRQ_CAUSE_OFS;
2679 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2680 } else {
2681 hpriv->main_cause_reg_addr = hpriv->base +
2682 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2683 hpriv->main_mask_reg_addr = hpriv->base +
2684 HC_SOC_MAIN_IRQ_MASK_OFS;
2685 }
2686 /* global interrupt mask */
2687 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002688
Tejun Heo4447d352007-04-17 23:44:08 +09002689 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002690
Tejun Heo4447d352007-04-17 23:44:08 +09002691 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002692 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002693
Jeff Garzikc9d39132005-11-13 17:47:51 -05002694 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002695 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002696 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002697
Jeff Garzik522479f2005-11-12 22:14:02 -05002698 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002699 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002700 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002701
Tejun Heo4447d352007-04-17 23:44:08 +09002702 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002703 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002704 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002705
2706 mv_port_init(&ap->ioaddr, port_mmio);
2707
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002708#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002709 if (HAS_PCI(host)) {
2710 unsigned int offset = port_mmio - mmio;
2711 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2712 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2713 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002714#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002715 }
2716
2717 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002718 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2719
2720 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2721 "(before clear)=0x%08x\n", hc,
2722 readl(hc_mmio + HC_CFG_OFS),
2723 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2724
2725 /* Clear any currently outstanding hc interrupt conditions */
2726 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002727 }
2728
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002729 if (HAS_PCI(host)) {
2730 /* Clear any currently outstanding host interrupt conditions */
2731 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002732
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002733 /* and unmask interrupt generation for host regs */
2734 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2735 if (IS_GEN_I(hpriv))
2736 writelfl(~HC_MAIN_MASKED_IRQS_5,
2737 hpriv->main_mask_reg_addr);
2738 else
2739 writelfl(~HC_MAIN_MASKED_IRQS,
2740 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002741
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002742 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2743 "PCI int cause/mask=0x%08x/0x%08x\n",
2744 readl(hpriv->main_cause_reg_addr),
2745 readl(hpriv->main_mask_reg_addr),
2746 readl(mmio + hpriv->irq_cause_ofs),
2747 readl(mmio + hpriv->irq_mask_ofs));
2748 } else {
2749 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2750 hpriv->main_mask_reg_addr);
2751 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2752 readl(hpriv->main_cause_reg_addr),
2753 readl(hpriv->main_mask_reg_addr));
2754 }
Brett Russ31961942005-09-30 01:36:00 -04002755done:
Brett Russ20f733e2005-09-01 18:26:17 -04002756 return rc;
2757}
2758
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002759static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2760{
2761 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2762 MV_CRQB_Q_SZ, 0);
2763 if (!hpriv->crqb_pool)
2764 return -ENOMEM;
2765
2766 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2767 MV_CRPB_Q_SZ, 0);
2768 if (!hpriv->crpb_pool)
2769 return -ENOMEM;
2770
2771 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2772 MV_SG_TBL_SZ, 0);
2773 if (!hpriv->sg_tbl_pool)
2774 return -ENOMEM;
2775
2776 return 0;
2777}
2778
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002779/**
2780 * mv_platform_probe - handle a positive probe of an soc Marvell
2781 * host
2782 * @pdev: platform device found
2783 *
2784 * LOCKING:
2785 * Inherited from caller.
2786 */
2787static int mv_platform_probe(struct platform_device *pdev)
2788{
2789 static int printed_version;
2790 const struct mv_sata_platform_data *mv_platform_data;
2791 const struct ata_port_info *ppi[] =
2792 { &mv_port_info[chip_soc], NULL };
2793 struct ata_host *host;
2794 struct mv_host_priv *hpriv;
2795 struct resource *res;
2796 int n_ports, rc;
2797
2798 if (!printed_version++)
2799 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2800
2801 /*
2802 * Simple resource validation ..
2803 */
2804 if (unlikely(pdev->num_resources != 2)) {
2805 dev_err(&pdev->dev, "invalid number of resources\n");
2806 return -EINVAL;
2807 }
2808
2809 /*
2810 * Get the register base first
2811 */
2812 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2813 if (res == NULL)
2814 return -EINVAL;
2815
2816 /* allocate host */
2817 mv_platform_data = pdev->dev.platform_data;
2818 n_ports = mv_platform_data->n_ports;
2819
2820 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2821 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2822
2823 if (!host || !hpriv)
2824 return -ENOMEM;
2825 host->private_data = hpriv;
2826 hpriv->n_ports = n_ports;
2827
2828 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002829 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2830 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002831 hpriv->base -= MV_SATAHC0_REG_BASE;
2832
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002833 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2834 if (rc)
2835 return rc;
2836
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002837 /* initialize adapter */
2838 rc = mv_init_host(host, chip_soc);
2839 if (rc)
2840 return rc;
2841
2842 dev_printk(KERN_INFO, &pdev->dev,
2843 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2844 host->n_ports);
2845
2846 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2847 IRQF_SHARED, &mv6_sht);
2848}
2849
2850/*
2851 *
2852 * mv_platform_remove - unplug a platform interface
2853 * @pdev: platform device
2854 *
2855 * A platform bus SATA device has been unplugged. Perform the needed
2856 * cleanup. Also called on module unload for any active devices.
2857 */
2858static int __devexit mv_platform_remove(struct platform_device *pdev)
2859{
2860 struct device *dev = &pdev->dev;
2861 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002862
2863 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002864 return 0;
2865}
2866
2867static struct platform_driver mv_platform_driver = {
2868 .probe = mv_platform_probe,
2869 .remove = __devexit_p(mv_platform_remove),
2870 .driver = {
2871 .name = DRV_NAME,
2872 .owner = THIS_MODULE,
2873 },
2874};
2875
2876
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002877#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002878static int mv_pci_init_one(struct pci_dev *pdev,
2879 const struct pci_device_id *ent);
2880
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002881
2882static struct pci_driver mv_pci_driver = {
2883 .name = DRV_NAME,
2884 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002885 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002886 .remove = ata_pci_remove_one,
2887};
2888
2889/*
2890 * module options
2891 */
2892static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2893
2894
2895/* move to PCI layer or libata core? */
2896static int pci_go_64(struct pci_dev *pdev)
2897{
2898 int rc;
2899
2900 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2901 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2902 if (rc) {
2903 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2904 if (rc) {
2905 dev_printk(KERN_ERR, &pdev->dev,
2906 "64-bit DMA enable failed\n");
2907 return rc;
2908 }
2909 }
2910 } else {
2911 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2912 if (rc) {
2913 dev_printk(KERN_ERR, &pdev->dev,
2914 "32-bit DMA enable failed\n");
2915 return rc;
2916 }
2917 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2918 if (rc) {
2919 dev_printk(KERN_ERR, &pdev->dev,
2920 "32-bit consistent DMA enable failed\n");
2921 return rc;
2922 }
2923 }
2924
2925 return rc;
2926}
2927
Brett Russ05b308e2005-10-05 17:08:53 -04002928/**
2929 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002930 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002931 *
2932 * FIXME: complete this.
2933 *
2934 * LOCKING:
2935 * Inherited from caller.
2936 */
Tejun Heo4447d352007-04-17 23:44:08 +09002937static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002938{
Tejun Heo4447d352007-04-17 23:44:08 +09002939 struct pci_dev *pdev = to_pci_dev(host->dev);
2940 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002941 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002942 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002943
2944 /* Use this to determine the HW stepping of the chip so we know
2945 * what errata to workaround
2946 */
Brett Russ31961942005-09-30 01:36:00 -04002947 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2948 if (scc == 0)
2949 scc_s = "SCSI";
2950 else if (scc == 0x01)
2951 scc_s = "RAID";
2952 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002953 scc_s = "?";
2954
2955 if (IS_GEN_I(hpriv))
2956 gen = "I";
2957 else if (IS_GEN_II(hpriv))
2958 gen = "II";
2959 else if (IS_GEN_IIE(hpriv))
2960 gen = "IIE";
2961 else
2962 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002963
Jeff Garzika9524a72005-10-30 14:39:11 -05002964 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002965 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2966 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002967 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2968}
2969
Brett Russ05b308e2005-10-05 17:08:53 -04002970/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002971 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002972 * @pdev: PCI device found
2973 * @ent: PCI device ID entry for the matched host
2974 *
2975 * LOCKING:
2976 * Inherited from caller.
2977 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002978static int mv_pci_init_one(struct pci_dev *pdev,
2979 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04002980{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002981 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002982 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002983 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2984 struct ata_host *host;
2985 struct mv_host_priv *hpriv;
2986 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002987
Jeff Garzika9524a72005-10-30 14:39:11 -05002988 if (!printed_version++)
2989 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002990
Tejun Heo4447d352007-04-17 23:44:08 +09002991 /* allocate host */
2992 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2993
2994 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2995 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2996 if (!host || !hpriv)
2997 return -ENOMEM;
2998 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002999 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003000
3001 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003002 rc = pcim_enable_device(pdev);
3003 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003004 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003005
Tejun Heo0d5ff562007-02-01 15:06:36 +09003006 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3007 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003008 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003009 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003010 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003011 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003012 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003013
Jeff Garzikd88184f2007-02-26 01:26:06 -05003014 rc = pci_go_64(pdev);
3015 if (rc)
3016 return rc;
3017
Mark Lordda2fa9b2008-01-26 18:32:45 -05003018 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3019 if (rc)
3020 return rc;
3021
Brett Russ20f733e2005-09-01 18:26:17 -04003022 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003023 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003024 if (rc)
3025 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003026
Brett Russ31961942005-09-30 01:36:00 -04003027 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003028 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003029 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003030
Brett Russ31961942005-09-30 01:36:00 -04003031 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003032 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003033
Tejun Heo4447d352007-04-17 23:44:08 +09003034 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003035 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003036 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003037 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003038}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003039#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003040
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003041static int mv_platform_probe(struct platform_device *pdev);
3042static int __devexit mv_platform_remove(struct platform_device *pdev);
3043
Brett Russ20f733e2005-09-01 18:26:17 -04003044static int __init mv_init(void)
3045{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003046 int rc = -ENODEV;
3047#ifdef CONFIG_PCI
3048 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003049 if (rc < 0)
3050 return rc;
3051#endif
3052 rc = platform_driver_register(&mv_platform_driver);
3053
3054#ifdef CONFIG_PCI
3055 if (rc < 0)
3056 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003057#endif
3058 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003059}
3060
3061static void __exit mv_exit(void)
3062{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003063#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003064 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003065#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003066 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003067}
3068
3069MODULE_AUTHOR("Brett Russ");
3070MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3071MODULE_LICENSE("GPL");
3072MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3073MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003074MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003075
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003076#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003077module_param(msi, int, 0444);
3078MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003079#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003080
Brett Russ20f733e2005-09-01 18:26:17 -04003081module_init(mv_init);
3082module_exit(mv_exit);