blob: 83584b6e1ba5bb9114196d86a06e42ba0b64f995 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040077#include <linux/mbus.h>
Brett Russ20f733e2005-09-01 18:26:17 -040078#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050079#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040080#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040082
83#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050084#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040085
86enum {
87 /* BAR's are enumerated in terms of pci_resource_start() terms */
88 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
89 MV_IO_BAR = 2, /* offset 0x18: IO space */
90 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91
92 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
93 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94
95 MV_PCI_REG_BASE = 0,
96 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040097 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
98 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
99 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
100 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
101 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102
Brett Russ20f733e2005-09-01 18:26:17 -0400103 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500104 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500105 MV_GPIO_PORT_CTL = 0x104f0,
106 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400107
108 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
111 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112
Brett Russ31961942005-09-30 01:36:00 -0400113 MV_MAX_Q_DEPTH = 32,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400118 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 */
120 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
121 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500122 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400123 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400124
Brett Russ20f733e2005-09-01 18:26:17 -0400125 MV_PORTS_PER_HC = 4,
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400129 MV_PORT_MASK = 3,
130
131 /* Host Flags */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100134 /* SoC integrated controllers, no PCI interface */
135 MV_FLAG_SOC = (1 << 28),
136
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400137 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400138 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
139 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500140 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400141
Brett Russ31961942005-09-30 01:36:00 -0400142 CRQB_FLAG_READ = (1 << 0),
143 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400144 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
149
150 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400153
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
155
Brett Russ20f733e2005-09-01 18:26:17 -0400156 /* PCI interface registers */
157
Brett Russ31961942005-09-30 01:36:00 -0400158 PCI_COMMAND_OFS = 0xc00,
159
Brett Russ20f733e2005-09-01 18:26:17 -0400160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
164
Jeff Garzik522479f2005-11-12 22:14:02 -0500165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
175
Mark Lord02a121d2007-12-01 13:07:22 -0500176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179
Mark Lord02a121d2007-12-01 13:07:22 -0500180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500183
Brett Russ20f733e2005-09-01 18:26:17 -0400184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400210
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
213
214 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
218
219 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500227 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500228 PHY_MODE4 = 0x314,
229 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500230 MV5_PHY_MODE = 0x74,
231 MV5_LT_MODE = 0x30,
232 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500233 SATA_INTERFACE_CTL = 0x050,
234
235 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400236
237 /* Port registers */
238 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500239 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
240 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
241 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
242 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
243 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400244
245 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
246 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400247 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
248 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
249 EDMA_ERR_DEV = (1 << 2), /* device error */
250 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
251 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
252 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400253 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
254 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400256 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
258 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
259 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
260 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500261
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500263 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
264 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
265 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
266 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
267
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400268 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500269
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400270 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500271 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
272 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
273 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
274 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
275 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400280 EDMA_ERR_OVERRUN_5 = (1 << 5),
281 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500282
283 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
284 EDMA_ERR_LNK_CTRL_RX_1 |
285 EDMA_ERR_LNK_CTRL_RX_3 |
286 EDMA_ERR_LNK_CTRL_TX,
287
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400288 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
289 EDMA_ERR_PRD_PAR |
290 EDMA_ERR_DEV_DCON |
291 EDMA_ERR_DEV_CON |
292 EDMA_ERR_SERR |
293 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_ERR_CRPB_PAR |
296 EDMA_ERR_INTRL_PAR |
297 EDMA_ERR_IORDY |
298 EDMA_ERR_LNK_CTRL_RX_2 |
299 EDMA_ERR_LNK_DATA_RX |
300 EDMA_ERR_LNK_DATA_TX |
301 EDMA_ERR_TRANS_PROTO,
302 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
303 EDMA_ERR_PRD_PAR |
304 EDMA_ERR_DEV_DCON |
305 EDMA_ERR_DEV_CON |
306 EDMA_ERR_OVERRUN_5 |
307 EDMA_ERR_UNDERRUN_5 |
308 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400309 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400310 EDMA_ERR_CRPB_PAR |
311 EDMA_ERR_INTRL_PAR |
312 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400313
Brett Russ31961942005-09-30 01:36:00 -0400314 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
315 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400316
317 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
318 EDMA_REQ_Q_PTR_SHIFT = 5,
319
320 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
321 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
322 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400323 EDMA_RSP_Q_PTR_SHIFT = 3,
324
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400325 EDMA_CMD_OFS = 0x28, /* EDMA command register */
326 EDMA_EN = (1 << 0), /* enable EDMA */
327 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
328 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400329
Jeff Garzikc9d39132005-11-13 17:47:51 -0500330 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500331 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500332
Brett Russ31961942005-09-30 01:36:00 -0400333 /* Host private flags (hp_flags) */
334 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500335 MV_HP_ERRATA_50XXB0 = (1 << 1),
336 MV_HP_ERRATA_50XXB2 = (1 << 2),
337 MV_HP_ERRATA_60X1B2 = (1 << 3),
338 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500339 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400340 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
341 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
342 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500343 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400344
Brett Russ31961942005-09-30 01:36:00 -0400345 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400346 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500347 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400349};
350
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400351#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
352#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500353#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100354#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500355
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400356#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
357#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
358
Jeff Garzik095fec82005-11-12 09:50:49 -0500359enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400360 /* DMA boundary 0xffff is required by the s/g splitting
361 * we need on /length/ in mv_fill-sg().
362 */
363 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500364
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400365 /* mask of register bits containing lower 32 bits
366 * of EDMA request queue DMA address
367 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500368 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
369
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400370 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500371 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
372};
373
Jeff Garzik522479f2005-11-12 22:14:02 -0500374enum chip_type {
375 chip_504x,
376 chip_508x,
377 chip_5080,
378 chip_604x,
379 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500380 chip_6042,
381 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500382 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500383};
384
Brett Russ31961942005-09-30 01:36:00 -0400385/* Command ReQuest Block: 32B */
386struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400387 __le32 sg_addr;
388 __le32 sg_addr_hi;
389 __le16 ctrl_flags;
390 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400391};
392
Jeff Garzike4e7b892006-01-31 12:18:41 -0500393struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400394 __le32 addr;
395 __le32 addr_hi;
396 __le32 flags;
397 __le32 len;
398 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500399};
400
Brett Russ31961942005-09-30 01:36:00 -0400401/* Command ResPonse Block: 8B */
402struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400403 __le16 id;
404 __le16 flags;
405 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400406};
407
408/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
409struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400410 __le32 addr;
411 __le32 flags_size;
412 __le32 addr_hi;
413 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400414};
415
416struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400417 struct mv_crqb *crqb;
418 dma_addr_t crqb_dma;
419 struct mv_crpb *crpb;
420 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500421 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
422 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400423
424 unsigned int req_idx;
425 unsigned int resp_idx;
426
Brett Russ31961942005-09-30 01:36:00 -0400427 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400428};
429
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500430struct mv_port_signal {
431 u32 amps;
432 u32 pre;
433};
434
Mark Lord02a121d2007-12-01 13:07:22 -0500435struct mv_host_priv {
436 u32 hp_flags;
437 struct mv_port_signal signal[8];
438 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500439 int n_ports;
440 void __iomem *base;
441 void __iomem *main_cause_reg_addr;
442 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500443 u32 irq_cause_ofs;
444 u32 irq_mask_ofs;
445 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500446 /*
447 * These consistent DMA memory pools give us guaranteed
448 * alignment for hardware-accessed data structures,
449 * and less memory waste in accomplishing the alignment.
450 */
451 struct dma_pool *crqb_pool;
452 struct dma_pool *crpb_pool;
453 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500454};
455
Jeff Garzik47c2b672005-11-12 21:13:17 -0500456struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500457 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
458 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500459 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
460 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
461 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500462 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
463 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500464 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100465 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500466};
467
Brett Russ20f733e2005-09-01 18:26:17 -0400468static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900469static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
470static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
471static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
472static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400473static int mv_port_start(struct ata_port *ap);
474static void mv_port_stop(struct ata_port *ap);
475static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500476static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900477static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400478static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400479static void mv_eh_freeze(struct ata_port *ap);
480static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500481static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400482
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500483static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500485static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
486static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
487 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500488static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500490static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100491static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500492
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500493static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500495static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
496static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
497 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500498static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
499 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500500static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500501static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
502 void __iomem *mmio);
503static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
504 void __iomem *mmio);
505static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
506 void __iomem *mmio, unsigned int n_hc);
507static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
508 void __iomem *mmio);
509static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100510static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500511static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500513static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
514 void __iomem *port_mmio, int want_ncq);
515static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500516
Mark Lordeb73d552008-01-29 13:24:00 -0500517/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
518 * because we have to allow room for worst case splitting of
519 * PRDs for 64K boundaries in mv_fill_sg().
520 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400521static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400522 .module = THIS_MODULE,
523 .name = DRV_NAME,
524 .ioctl = ata_scsi_ioctl,
525 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400526 .can_queue = ATA_DEF_QUEUE,
527 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400528 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400529 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
530 .emulated = ATA_SHT_EMULATED,
531 .use_clustering = 1,
532 .proc_name = DRV_NAME,
533 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400534 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400535 .slave_destroy = ata_scsi_slave_destroy,
536 .bios_param = ata_std_bios_param,
537};
538
539static struct scsi_host_template mv6_sht = {
540 .module = THIS_MODULE,
541 .name = DRV_NAME,
542 .ioctl = ata_scsi_ioctl,
543 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500544 .change_queue_depth = ata_scsi_change_queue_depth,
545 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400546 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400547 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400548 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
549 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500550 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400551 .proc_name = DRV_NAME,
552 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400553 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900554 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400555 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400556};
557
Jeff Garzikc9d39132005-11-13 17:47:51 -0500558static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500559 .tf_load = ata_tf_load,
560 .tf_read = ata_tf_read,
561 .check_status = ata_check_status,
562 .exec_command = ata_exec_command,
563 .dev_select = ata_std_dev_select,
564
Jeff Garzikcffacd82007-03-09 09:46:47 -0500565 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500566
567 .qc_prep = mv_qc_prep,
568 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900569 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500570
Jeff Garzikc9d39132005-11-13 17:47:51 -0500571 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900572 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400574 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400575 .freeze = mv_eh_freeze,
576 .thaw = mv_eh_thaw,
577
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578 .scr_read = mv5_scr_read,
579 .scr_write = mv5_scr_write,
580
581 .port_start = mv_port_start,
582 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500583};
584
585static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500586 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400587 .tf_load = ata_tf_load,
588 .tf_read = ata_tf_read,
589 .check_status = ata_check_status,
590 .exec_command = ata_exec_command,
591 .dev_select = ata_std_dev_select,
592
Jeff Garzikcffacd82007-03-09 09:46:47 -0500593 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400594
Brett Russ31961942005-09-30 01:36:00 -0400595 .qc_prep = mv_qc_prep,
596 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900597 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400598
Brett Russ20f733e2005-09-01 18:26:17 -0400599 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900600 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400601
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400602 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400603 .freeze = mv_eh_freeze,
604 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500605 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400606
Brett Russ20f733e2005-09-01 18:26:17 -0400607 .scr_read = mv_scr_read,
608 .scr_write = mv_scr_write,
609
Brett Russ31961942005-09-30 01:36:00 -0400610 .port_start = mv_port_start,
611 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400612};
613
Jeff Garzike4e7b892006-01-31 12:18:41 -0500614static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500615 .tf_load = ata_tf_load,
616 .tf_read = ata_tf_read,
617 .check_status = ata_check_status,
618 .exec_command = ata_exec_command,
619 .dev_select = ata_std_dev_select,
620
Jeff Garzikcffacd82007-03-09 09:46:47 -0500621 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500622
623 .qc_prep = mv_qc_prep_iie,
624 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900625 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500626
Jeff Garzike4e7b892006-01-31 12:18:41 -0500627 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900628 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500629
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400630 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400631 .freeze = mv_eh_freeze,
632 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500633 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400634
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 .scr_read = mv_scr_read,
636 .scr_write = mv_scr_write,
637
638 .port_start = mv_port_start,
639 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500640};
641
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100642static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400643 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400644 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400645 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400646 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500647 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400648 },
649 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400650 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400651 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400652 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500653 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400654 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500655 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400656 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500657 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400658 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500659 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500660 },
Brett Russ20f733e2005-09-01 18:26:17 -0400661 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500662 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
663 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400664 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400665 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500666 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400667 },
668 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400669 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500670 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400671 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400672 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500673 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400674 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500675 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500676 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
677 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500678 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400679 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500680 .port_ops = &mv_iie_ops,
681 },
682 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500683 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
684 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500685 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400686 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500687 .port_ops = &mv_iie_ops,
688 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500689 { /* chip_soc */
690 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
691 .pio_mask = 0x1f, /* pio0-4 */
692 .udma_mask = ATA_UDMA6,
693 .port_ops = &mv_iie_ops,
694 },
Brett Russ20f733e2005-09-01 18:26:17 -0400695};
696
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500697static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400698 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
699 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
700 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
701 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100702 /* RocketRAID 1740/174x have different identifiers */
703 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
704 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400705
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400706 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
707 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
708 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
709 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
710 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500711
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400712 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
713
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200714 /* Adaptec 1430SA */
715 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
716
Mark Lord02a121d2007-12-01 13:07:22 -0500717 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800718 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
719
Mark Lord02a121d2007-12-01 13:07:22 -0500720 /* Highpoint RocketRAID PCIe series */
721 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
722 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
723
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400724 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400725};
726
Jeff Garzik47c2b672005-11-12 21:13:17 -0500727static const struct mv_hw_ops mv5xxx_ops = {
728 .phy_errata = mv5_phy_errata,
729 .enable_leds = mv5_enable_leds,
730 .read_preamp = mv5_read_preamp,
731 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500732 .reset_flash = mv5_reset_flash,
733 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500734};
735
736static const struct mv_hw_ops mv6xxx_ops = {
737 .phy_errata = mv6_phy_errata,
738 .enable_leds = mv6_enable_leds,
739 .read_preamp = mv6_read_preamp,
740 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500741 .reset_flash = mv6_reset_flash,
742 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500743};
744
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500745static const struct mv_hw_ops mv_soc_ops = {
746 .phy_errata = mv6_phy_errata,
747 .enable_leds = mv_soc_enable_leds,
748 .read_preamp = mv_soc_read_preamp,
749 .reset_hc = mv_soc_reset_hc,
750 .reset_flash = mv_soc_reset_flash,
751 .reset_bus = mv_soc_reset_bus,
752};
753
Brett Russ20f733e2005-09-01 18:26:17 -0400754/*
755 * Functions
756 */
757
758static inline void writelfl(unsigned long data, void __iomem *addr)
759{
760 writel(data, addr);
761 (void) readl(addr); /* flush to avoid PCI posted write */
762}
763
Brett Russ20f733e2005-09-01 18:26:17 -0400764static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
765{
766 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
767}
768
Jeff Garzikc9d39132005-11-13 17:47:51 -0500769static inline unsigned int mv_hc_from_port(unsigned int port)
770{
771 return port >> MV_PORT_HC_SHIFT;
772}
773
774static inline unsigned int mv_hardport_from_port(unsigned int port)
775{
776 return port & MV_PORT_MASK;
777}
778
779static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
780 unsigned int port)
781{
782 return mv_hc_base(base, mv_hc_from_port(port));
783}
784
Brett Russ20f733e2005-09-01 18:26:17 -0400785static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
786{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500787 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500788 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500789 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400790}
791
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500792static inline void __iomem *mv_host_base(struct ata_host *host)
793{
794 struct mv_host_priv *hpriv = host->private_data;
795 return hpriv->base;
796}
797
Brett Russ20f733e2005-09-01 18:26:17 -0400798static inline void __iomem *mv_ap_base(struct ata_port *ap)
799{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500800 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400801}
802
Jeff Garzikcca39742006-08-24 03:19:22 -0400803static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400804{
Jeff Garzikcca39742006-08-24 03:19:22 -0400805 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400806}
807
808static void mv_irq_clear(struct ata_port *ap)
809{
810}
811
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812static void mv_set_edma_ptrs(void __iomem *port_mmio,
813 struct mv_host_priv *hpriv,
814 struct mv_port_priv *pp)
815{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 u32 index;
817
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818 /*
819 * initialize request queue
820 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400821 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
822
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 WARN_ON(pp->crqb_dma & 0x3ff);
824 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
827
828 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400829 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
831 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400832 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400833
834 /*
835 * initialize response queue
836 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400837 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
838
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400839 WARN_ON(pp->crpb_dma & 0xff);
840 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
841
842 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400844 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
845 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400846 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400847
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400848 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400849 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400850}
851
Brett Russ05b308e2005-10-05 17:08:53 -0400852/**
853 * mv_start_dma - Enable eDMA engine
854 * @base: port base address
855 * @pp: port private data
856 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900857 * Verify the local cache of the eDMA state is accurate with a
858 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400859 *
860 * LOCKING:
861 * Inherited from caller.
862 */
Mark Lord0c589122008-01-26 18:31:16 -0500863static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500864 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400865{
Mark Lord72109162008-01-26 18:31:33 -0500866 int want_ncq = (protocol == ATA_PROT_NCQ);
867
868 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
869 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
870 if (want_ncq != using_ncq)
871 __mv_stop_dma(ap);
872 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400873 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500874 struct mv_host_priv *hpriv = ap->host->private_data;
875 int hard_port = mv_hardport_from_port(ap->port_no);
876 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100877 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500878 u32 hc_irq_cause, ipending;
879
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400880 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500881 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400882
Mark Lord0c589122008-01-26 18:31:16 -0500883 /* clear EDMA interrupt indicator, if any */
884 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
885 ipending = (DEV_IRQ << hard_port) |
886 (CRPB_DMA_DONE << hard_port);
887 if (hc_irq_cause & ipending) {
888 writelfl(hc_irq_cause & ~ipending,
889 hc_mmio + HC_IRQ_CAUSE_OFS);
890 }
891
Mark Lord72109162008-01-26 18:31:33 -0500892 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500893
894 /* clear FIS IRQ Cause */
895 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
896
Mark Lordf630d562008-01-26 18:31:00 -0500897 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400898
Mark Lordf630d562008-01-26 18:31:00 -0500899 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400900 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
901 }
Mark Lordf630d562008-01-26 18:31:00 -0500902 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400903}
904
Brett Russ05b308e2005-10-05 17:08:53 -0400905/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400906 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400907 * @ap: ATA channel to manipulate
908 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900909 * Verify the local cache of the eDMA state is accurate with a
910 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400911 *
912 * LOCKING:
913 * Inherited from caller.
914 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400915static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400916{
917 void __iomem *port_mmio = mv_ap_base(ap);
918 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400919 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400920 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400921
Jeff Garzik4537deb2007-07-12 14:30:19 -0400922 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400923 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400924 */
Brett Russ31961942005-09-30 01:36:00 -0400925 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
926 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400927 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900928 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400929 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500930
Brett Russ31961942005-09-30 01:36:00 -0400931 /* now properly wait for the eDMA to stop */
932 for (i = 1000; i > 0; i--) {
933 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400934 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400935 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400936
Brett Russ31961942005-09-30 01:36:00 -0400937 udelay(100);
938 }
939
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400940 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900941 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400942 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400943 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400944
945 return err;
Brett Russ31961942005-09-30 01:36:00 -0400946}
947
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400948static int mv_stop_dma(struct ata_port *ap)
949{
950 unsigned long flags;
951 int rc;
952
953 spin_lock_irqsave(&ap->host->lock, flags);
954 rc = __mv_stop_dma(ap);
955 spin_unlock_irqrestore(&ap->host->lock, flags);
956
957 return rc;
958}
959
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400960#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400961static void mv_dump_mem(void __iomem *start, unsigned bytes)
962{
Brett Russ31961942005-09-30 01:36:00 -0400963 int b, w;
964 for (b = 0; b < bytes; ) {
965 DPRINTK("%p: ", start + b);
966 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400967 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400968 b += sizeof(u32);
969 }
970 printk("\n");
971 }
Brett Russ31961942005-09-30 01:36:00 -0400972}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400973#endif
974
Brett Russ31961942005-09-30 01:36:00 -0400975static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
976{
977#ifdef ATA_DEBUG
978 int b, w;
979 u32 dw;
980 for (b = 0; b < bytes; ) {
981 DPRINTK("%02x: ", b);
982 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400983 (void) pci_read_config_dword(pdev, b, &dw);
984 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400985 b += sizeof(u32);
986 }
987 printk("\n");
988 }
989#endif
990}
991static void mv_dump_all_regs(void __iomem *mmio_base, int port,
992 struct pci_dev *pdev)
993{
994#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500995 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400996 port >> MV_PORT_HC_SHIFT);
997 void __iomem *port_base;
998 int start_port, num_ports, p, start_hc, num_hcs, hc;
999
1000 if (0 > port) {
1001 start_hc = start_port = 0;
1002 num_ports = 8; /* shld be benign for 4 port devs */
1003 num_hcs = 2;
1004 } else {
1005 start_hc = port >> MV_PORT_HC_SHIFT;
1006 start_port = port;
1007 num_ports = num_hcs = 1;
1008 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001009 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001010 num_ports > 1 ? num_ports - 1 : start_port);
1011
1012 if (NULL != pdev) {
1013 DPRINTK("PCI config space regs:\n");
1014 mv_dump_pci_cfg(pdev, 0x68);
1015 }
1016 DPRINTK("PCI regs:\n");
1017 mv_dump_mem(mmio_base+0xc00, 0x3c);
1018 mv_dump_mem(mmio_base+0xd00, 0x34);
1019 mv_dump_mem(mmio_base+0xf00, 0x4);
1020 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1021 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001022 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001023 DPRINTK("HC regs (HC %i):\n", hc);
1024 mv_dump_mem(hc_base, 0x1c);
1025 }
1026 for (p = start_port; p < start_port + num_ports; p++) {
1027 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001028 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001029 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001030 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001031 mv_dump_mem(port_base+0x300, 0x60);
1032 }
1033#endif
1034}
1035
Brett Russ20f733e2005-09-01 18:26:17 -04001036static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1037{
1038 unsigned int ofs;
1039
1040 switch (sc_reg_in) {
1041 case SCR_STATUS:
1042 case SCR_CONTROL:
1043 case SCR_ERROR:
1044 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1045 break;
1046 case SCR_ACTIVE:
1047 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1048 break;
1049 default:
1050 ofs = 0xffffffffU;
1051 break;
1052 }
1053 return ofs;
1054}
1055
Tejun Heoda3dbb12007-07-16 14:29:40 +09001056static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001057{
1058 unsigned int ofs = mv_scr_offset(sc_reg_in);
1059
Tejun Heoda3dbb12007-07-16 14:29:40 +09001060 if (ofs != 0xffffffffU) {
1061 *val = readl(mv_ap_base(ap) + ofs);
1062 return 0;
1063 } else
1064 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001065}
1066
Tejun Heoda3dbb12007-07-16 14:29:40 +09001067static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001068{
1069 unsigned int ofs = mv_scr_offset(sc_reg_in);
1070
Tejun Heoda3dbb12007-07-16 14:29:40 +09001071 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001072 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001073 return 0;
1074 } else
1075 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001076}
1077
Mark Lordf2738272008-01-26 18:32:29 -05001078static void mv6_dev_config(struct ata_device *adev)
1079{
1080 /*
1081 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1082 * See mv_qc_prep() for more info.
1083 */
1084 if (adev->flags & ATA_DFLAG_NCQ)
1085 if (adev->max_sectors > ATA_MAX_SECTORS)
1086 adev->max_sectors = ATA_MAX_SECTORS;
1087}
1088
Mark Lord72109162008-01-26 18:31:33 -05001089static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1090 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001091{
Mark Lord0c589122008-01-26 18:31:16 -05001092 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093
1094 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001095 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001096
Mark Lord0c589122008-01-26 18:31:16 -05001097 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001098 cfg |= (1 << 8); /* enab config burst size mask */
1099
Mark Lord0c589122008-01-26 18:31:16 -05001100 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001101 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1102
1103 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001104 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1105 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001106 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001107 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001108 }
1109
Mark Lord72109162008-01-26 18:31:33 -05001110 if (want_ncq) {
1111 cfg |= EDMA_CFG_NCQ;
1112 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1113 } else
1114 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1115
Jeff Garzike4e7b892006-01-31 12:18:41 -05001116 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1117}
1118
Mark Lordda2fa9b2008-01-26 18:32:45 -05001119static void mv_port_free_dma_mem(struct ata_port *ap)
1120{
1121 struct mv_host_priv *hpriv = ap->host->private_data;
1122 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001123 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001124
1125 if (pp->crqb) {
1126 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1127 pp->crqb = NULL;
1128 }
1129 if (pp->crpb) {
1130 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1131 pp->crpb = NULL;
1132 }
Mark Lordeb73d552008-01-29 13:24:00 -05001133 /*
1134 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1135 * For later hardware, we have one unique sg_tbl per NCQ tag.
1136 */
1137 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1138 if (pp->sg_tbl[tag]) {
1139 if (tag == 0 || !IS_GEN_I(hpriv))
1140 dma_pool_free(hpriv->sg_tbl_pool,
1141 pp->sg_tbl[tag],
1142 pp->sg_tbl_dma[tag]);
1143 pp->sg_tbl[tag] = NULL;
1144 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001145 }
1146}
1147
Brett Russ05b308e2005-10-05 17:08:53 -04001148/**
1149 * mv_port_start - Port specific init/start routine.
1150 * @ap: ATA channel to manipulate
1151 *
1152 * Allocate and point to DMA memory, init port private memory,
1153 * zero indices.
1154 *
1155 * LOCKING:
1156 * Inherited from caller.
1157 */
Brett Russ31961942005-09-30 01:36:00 -04001158static int mv_port_start(struct ata_port *ap)
1159{
Jeff Garzikcca39742006-08-24 03:19:22 -04001160 struct device *dev = ap->host->dev;
1161 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001162 struct mv_port_priv *pp;
1163 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001164 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001165 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001166
Tejun Heo24dc5f32007-01-20 16:00:28 +09001167 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001168 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001169 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001170 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001171
Mark Lordda2fa9b2008-01-26 18:32:45 -05001172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb)
1174 return -ENOMEM;
1175 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001176
Mark Lordda2fa9b2008-01-26 18:32:45 -05001177 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1178 if (!pp->crpb)
1179 goto out_port_free_dma_mem;
1180 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001181
Mark Lordeb73d552008-01-29 13:24:00 -05001182 /*
1183 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1184 * For later hardware, we need one unique sg_tbl per NCQ tag.
1185 */
1186 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1187 if (tag == 0 || !IS_GEN_I(hpriv)) {
1188 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1189 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1190 if (!pp->sg_tbl[tag])
1191 goto out_port_free_dma_mem;
1192 } else {
1193 pp->sg_tbl[tag] = pp->sg_tbl[0];
1194 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1195 }
1196 }
Brett Russ31961942005-09-30 01:36:00 -04001197
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001198 spin_lock_irqsave(&ap->host->lock, flags);
1199
Mark Lord72109162008-01-26 18:31:33 -05001200 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001201 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001202
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001203 spin_unlock_irqrestore(&ap->host->lock, flags);
1204
Brett Russ31961942005-09-30 01:36:00 -04001205 /* Don't turn on EDMA here...do it before DMA commands only. Else
1206 * we'll be unable to send non-data, PIO, etc due to restricted access
1207 * to shadow regs.
1208 */
Brett Russ31961942005-09-30 01:36:00 -04001209 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001210
1211out_port_free_dma_mem:
1212 mv_port_free_dma_mem(ap);
1213 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001214}
1215
Brett Russ05b308e2005-10-05 17:08:53 -04001216/**
1217 * mv_port_stop - Port specific cleanup/stop routine.
1218 * @ap: ATA channel to manipulate
1219 *
1220 * Stop DMA, cleanup port memory.
1221 *
1222 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001223 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001224 */
Brett Russ31961942005-09-30 01:36:00 -04001225static void mv_port_stop(struct ata_port *ap)
1226{
Brett Russ31961942005-09-30 01:36:00 -04001227 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001228 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001229}
1230
Brett Russ05b308e2005-10-05 17:08:53 -04001231/**
1232 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1233 * @qc: queued command whose SG list to source from
1234 *
1235 * Populate the SG list and mark the last entry.
1236 *
1237 * LOCKING:
1238 * Inherited from caller.
1239 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001240static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001241{
1242 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001243 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001244 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001245 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001246
Mark Lordeb73d552008-01-29 13:24:00 -05001247 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001248 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001249 dma_addr_t addr = sg_dma_address(sg);
1250 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001251
Olof Johansson4007b492007-10-02 20:45:27 -05001252 while (sg_len) {
1253 u32 offset = addr & 0xffff;
1254 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001255
Olof Johansson4007b492007-10-02 20:45:27 -05001256 if ((offset + sg_len > 0x10000))
1257 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001258
Olof Johansson4007b492007-10-02 20:45:27 -05001259 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1260 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001261 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001262
1263 sg_len -= len;
1264 addr += len;
1265
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001266 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001267 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001268 }
Brett Russ31961942005-09-30 01:36:00 -04001269 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001270
1271 if (likely(last_sg))
1272 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001273}
1274
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001275static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001276{
Mark Lord559eeda2006-05-19 16:40:15 -04001277 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001278 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001279 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001280}
1281
Brett Russ05b308e2005-10-05 17:08:53 -04001282/**
1283 * mv_qc_prep - Host specific command preparation.
1284 * @qc: queued command to prepare
1285 *
1286 * This routine simply redirects to the general purpose routine
1287 * if command is not DMA. Else, it handles prep of the CRQB
1288 * (command request block), does some sanity checking, and calls
1289 * the SG load routine.
1290 *
1291 * LOCKING:
1292 * Inherited from caller.
1293 */
Brett Russ31961942005-09-30 01:36:00 -04001294static void mv_qc_prep(struct ata_queued_cmd *qc)
1295{
1296 struct ata_port *ap = qc->ap;
1297 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001298 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001299 struct ata_taskfile *tf;
1300 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001301 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001302
Mark Lord138bfdd2008-01-26 18:33:18 -05001303 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1304 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001305 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001306
Brett Russ31961942005-09-30 01:36:00 -04001307 /* Fill in command request block
1308 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001309 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001310 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001312 flags |= qc->tag << CRQB_TAG_SHIFT;
1313
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001316
Mark Lorda6432432006-05-19 16:36:36 -04001317 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001319 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1322
1323 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001324 tf = &qc->tf;
1325
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
1331 */
1332 switch (tf->command) {
1333 case ATA_CMD_READ:
1334 case ATA_CMD_READ_EXT:
1335 case ATA_CMD_WRITE:
1336 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001337 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1339 break;
Brett Russ31961942005-09-30 01:36:00 -04001340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1344 break;
Brett Russ31961942005-09-30 01:36:00 -04001345 default:
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1350 *
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1353 */
1354 BUG_ON(tf->command);
1355 break;
1356 }
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1366
Jeff Garzike4e7b892006-01-31 12:18:41 -05001367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001368 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001369 mv_fill_sg(qc);
1370}
1371
1372/**
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1375 *
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1380 *
1381 * LOCKING:
1382 * Inherited from caller.
1383 */
1384static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1385{
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001390 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001391 u32 flags = 0;
1392
Mark Lord138bfdd2008-01-26 18:33:18 -05001393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001395 return;
1396
Jeff Garzike4e7b892006-01-31 12:18:41 -05001397 /* Fill in Gen IIE command request block
1398 */
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ;
1401
Tejun Heobeec7db2006-02-11 19:11:13 +09001402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001403 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001404 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001405
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001408
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001412 crqb->flags = cpu_to_le32(flags);
1413
1414 tf = &qc->tf;
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1417 (tf->feature << 24)
1418 );
1419 crqb->ata_cmd[1] = cpu_to_le32(
1420 (tf->lbal << 0) |
1421 (tf->lbam << 8) |
1422 (tf->lbah << 16) |
1423 (tf->device << 24)
1424 );
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1430 );
1431 crqb->ata_cmd[3] = cpu_to_le32(
1432 (tf->nsect << 0) |
1433 (tf->hob_nsect << 8)
1434 );
1435
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1437 return;
Brett Russ31961942005-09-30 01:36:00 -04001438 mv_fill_sg(qc);
1439}
1440
Brett Russ05b308e2005-10-05 17:08:53 -04001441/**
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1444 *
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1449 *
1450 * LOCKING:
1451 * Inherited from caller.
1452 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001454{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001459
Mark Lord138bfdd2008-01-26 18:33:18 -05001460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001462 /* We're about to send a non-EDMA capable command to the
1463 * port. Turn off EDMA so there won't be problems accessing
1464 * shadow block, etc registers.
1465 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001466 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001467 return ata_qc_issue_prot(qc);
1468 }
1469
Mark Lord72109162008-01-26 18:31:33 -05001470 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001471
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001473
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001475
1476 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001479
1480 return 0;
1481}
1482
Brett Russ05b308e2005-10-05 17:08:53 -04001483/**
Brett Russ05b308e2005-10-05 17:08:53 -04001484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001486 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001487 *
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1493 *
1494 * LOCKING:
1495 * Inherited from caller.
1496 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001497static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001498{
Brett Russ31961942005-09-30 01:36:00 -04001499 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001505 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001506
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001508
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1512 */
Tejun Heo936fd732007-08-06 18:36:23 +09001513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001515 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001516
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1518
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1520
1521 /*
1522 * all generations share these EDMA error cause bits
1523 */
1524
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001532 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001533 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001537 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001538 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001539 }
1540
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001541 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1543
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001545 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001547 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 }
1549 } else {
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1551
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001553 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001555 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556 }
1557
1558 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1563 }
1564 }
Brett Russ20f733e2005-09-01 18:26:17 -04001565
1566 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001568
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1572 }
1573
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1576
1577 if (qc)
1578 qc->err_mask |= err_mask;
1579 else
1580 ehi->err_mask |= err_mask;
1581
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1584 else
1585 ata_port_abort(ap);
1586}
1587
1588static void mv_intr_pio(struct ata_port *ap)
1589{
1590 struct ata_queued_cmd *qc;
1591 u8 ata_status;
1592
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1596 return;
1597
1598 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001600 if (unlikely(!qc)) /* no active tag */
1601 return;
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1603 return;
1604
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1608}
1609
1610static void mv_intr_edma(struct ata_port *ap)
1611{
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1618
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1622
1623 while (1) {
1624 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001625 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1630 break;
1631
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001632 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001633 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001634 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001639 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001640 else
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001642
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001643 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644
Mark Lordcb924412008-01-26 18:32:09 -05001645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001648 */
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001651 mv_err_intr(ap, qc);
1652 return;
1653 }
1654
1655 /* and finally, complete the ATA command */
1656 if (qc) {
1657 qc->err_mask |=
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1660 }
1661
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001662 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1665 */
1666 work_done = true;
1667 pp->resp_idx++;
1668 }
1669
1670 if (work_done)
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001674}
1675
Brett Russ05b308e2005-10-05 17:08:53 -04001676/**
1677 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001678 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1681 *
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1688 *
1689 * LOCKING:
1690 * Inherited from caller.
1691 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001692static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001693{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001694 struct mv_host_priv *hpriv = host->private_data;
1695 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001697 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001698 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001699
Jeff Garzik35177262007-02-24 21:26:42 -05001700 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001701 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001702 else
Brett Russ20f733e2005-09-01 18:26:17 -04001703 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001704
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001705 if (HAS_PCI(host))
1706 last_port = port0 + MV_PORTS_PER_HC;
1707 else
1708 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001709 /* we'll need the HC success int register in most cases */
1710 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711 if (!hc_irq_cause)
1712 return;
1713
1714 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001715
1716 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001717 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001718
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001719 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001720 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001721 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001722 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001723
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001724 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001725 continue;
1726
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001727 pp = ap->private_data;
1728
Brett Russ31961942005-09-30 01:36:00 -04001729 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001730 if (port >= MV_PORTS_PER_HC) {
1731 shift++; /* skip bit 8 in the HC Main IRQ reg */
1732 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001733 have_err_bits = ((PORT0_ERR << shift) & relevant);
1734
1735 if (unlikely(have_err_bits)) {
1736 struct ata_queued_cmd *qc;
1737
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001739 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1740 continue;
1741
1742 mv_err_intr(ap, qc);
1743 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001744 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001745
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001746 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1747
1748 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1749 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1750 mv_intr_edma(ap);
1751 } else {
1752 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1753 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001754 }
1755 }
1756 VPRINTK("EXIT\n");
1757}
1758
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001759static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1760{
Mark Lord02a121d2007-12-01 13:07:22 -05001761 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001762 struct ata_port *ap;
1763 struct ata_queued_cmd *qc;
1764 struct ata_eh_info *ehi;
1765 unsigned int i, err_mask, printed = 0;
1766 u32 err_cause;
1767
Mark Lord02a121d2007-12-01 13:07:22 -05001768 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001769
1770 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1771 err_cause);
1772
1773 DPRINTK("All regs @ PCI error\n");
1774 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1775
Mark Lord02a121d2007-12-01 13:07:22 -05001776 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001777
1778 for (i = 0; i < host->n_ports; i++) {
1779 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001780 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001781 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001782 ata_ehi_clear_desc(ehi);
1783 if (!printed++)
1784 ata_ehi_push_desc(ehi,
1785 "PCI err cause 0x%08x", err_cause);
1786 err_mask = AC_ERR_HOST_BUS;
1787 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001788 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001789 if (qc)
1790 qc->err_mask |= err_mask;
1791 else
1792 ehi->err_mask |= err_mask;
1793
1794 ata_port_freeze(ap);
1795 }
1796 }
1797}
1798
Brett Russ05b308e2005-10-05 17:08:53 -04001799/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001800 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001801 * @irq: unused
1802 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001803 *
1804 * Read the read only register to determine if any host
1805 * controllers have pending interrupts. If so, call lower level
1806 * routine to handle. Also check for PCI errors which are only
1807 * reported here.
1808 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001809 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001810 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001811 * interrupts.
1812 */
David Howells7d12e782006-10-05 14:55:46 +01001813static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001814{
Jeff Garzikcca39742006-08-24 03:19:22 -04001815 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001816 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001817 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001818 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001819 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001820
Mark Lord646a4da2008-01-26 18:30:37 -05001821 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001822
1823 irq_stat = readl(hpriv->main_cause_reg_addr);
1824 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001825
1826 /* check the cases where we either have nothing pending or have read
1827 * a bogus register value which can indicate HW removal or PCI fault
1828 */
Mark Lord646a4da2008-01-26 18:30:37 -05001829 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1830 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001831
Jeff Garzikcca39742006-08-24 03:19:22 -04001832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001833
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001834 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001835 mv_pci_error(host, mmio);
1836 handled = 1;
1837 goto out_unlock; /* skip all other HC irq handling */
1838 }
1839
Brett Russ20f733e2005-09-01 18:26:17 -04001840 for (hc = 0; hc < n_hcs; hc++) {
1841 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1842 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001843 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001844 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001845 }
1846 }
Mark Lord615ab952006-05-19 16:24:56 -04001847
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001848out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001849 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001850
1851 return IRQ_RETVAL(handled);
1852}
1853
Jeff Garzikc9d39132005-11-13 17:47:51 -05001854static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1855{
1856 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1857 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1858
1859 return hc_mmio + ofs;
1860}
1861
1862static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1863{
1864 unsigned int ofs;
1865
1866 switch (sc_reg_in) {
1867 case SCR_STATUS:
1868 case SCR_ERROR:
1869 case SCR_CONTROL:
1870 ofs = sc_reg_in * sizeof(u32);
1871 break;
1872 default:
1873 ofs = 0xffffffffU;
1874 break;
1875 }
1876 return ofs;
1877}
1878
Tejun Heoda3dbb12007-07-16 14:29:40 +09001879static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001880{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001881 struct mv_host_priv *hpriv = ap->host->private_data;
1882 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001883 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001884 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1885
Tejun Heoda3dbb12007-07-16 14:29:40 +09001886 if (ofs != 0xffffffffU) {
1887 *val = readl(addr + ofs);
1888 return 0;
1889 } else
1890 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001891}
1892
Tejun Heoda3dbb12007-07-16 14:29:40 +09001893static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001895 struct mv_host_priv *hpriv = ap->host->private_data;
1896 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001897 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001898 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1899
Tejun Heoda3dbb12007-07-16 14:29:40 +09001900 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001901 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001902 return 0;
1903 } else
1904 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001905}
1906
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001907static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001908{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001909 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001910 int early_5080;
1911
Auke Kok44c10132007-06-08 15:46:36 -07001912 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001913
1914 if (!early_5080) {
1915 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1916 tmp |= (1 << 0);
1917 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1918 }
1919
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001920 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001921}
1922
1923static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1924{
1925 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1926}
1927
Jeff Garzik47c2b672005-11-12 21:13:17 -05001928static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001929 void __iomem *mmio)
1930{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001931 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1932 u32 tmp;
1933
1934 tmp = readl(phy_mmio + MV5_PHY_MODE);
1935
1936 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1937 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001938}
1939
Jeff Garzik47c2b672005-11-12 21:13:17 -05001940static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001941{
Jeff Garzik522479f2005-11-12 22:14:02 -05001942 u32 tmp;
1943
1944 writel(0, mmio + MV_GPIO_PORT_CTL);
1945
1946 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1947
1948 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1949 tmp |= ~(1 << 0);
1950 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001951}
1952
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001953static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1954 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001955{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001956 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1957 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1958 u32 tmp;
1959 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1960
1961 if (fix_apm_sq) {
1962 tmp = readl(phy_mmio + MV5_LT_MODE);
1963 tmp |= (1 << 19);
1964 writel(tmp, phy_mmio + MV5_LT_MODE);
1965
1966 tmp = readl(phy_mmio + MV5_PHY_CTL);
1967 tmp &= ~0x3;
1968 tmp |= 0x1;
1969 writel(tmp, phy_mmio + MV5_PHY_CTL);
1970 }
1971
1972 tmp = readl(phy_mmio + MV5_PHY_MODE);
1973 tmp &= ~mask;
1974 tmp |= hpriv->signal[port].pre;
1975 tmp |= hpriv->signal[port].amps;
1976 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001977}
1978
Jeff Garzikc9d39132005-11-13 17:47:51 -05001979
1980#undef ZERO
1981#define ZERO(reg) writel(0, port_mmio + (reg))
1982static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001984{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001985 void __iomem *port_mmio = mv_port_base(mmio, port);
1986
1987 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1988
1989 mv_channel_reset(hpriv, mmio, port);
1990
1991 ZERO(0x028); /* command */
1992 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1993 ZERO(0x004); /* timer */
1994 ZERO(0x008); /* irq err cause */
1995 ZERO(0x00c); /* irq err mask */
1996 ZERO(0x010); /* rq bah */
1997 ZERO(0x014); /* rq inp */
1998 ZERO(0x018); /* rq outp */
1999 ZERO(0x01c); /* respq bah */
2000 ZERO(0x024); /* respq outp */
2001 ZERO(0x020); /* respq inp */
2002 ZERO(0x02c); /* test control */
2003 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2004}
2005#undef ZERO
2006
2007#define ZERO(reg) writel(0, hc_mmio + (reg))
2008static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2009 unsigned int hc)
2010{
2011 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2012 u32 tmp;
2013
2014 ZERO(0x00c);
2015 ZERO(0x010);
2016 ZERO(0x014);
2017 ZERO(0x018);
2018
2019 tmp = readl(hc_mmio + 0x20);
2020 tmp &= 0x1c1c1c1c;
2021 tmp |= 0x03030303;
2022 writel(tmp, hc_mmio + 0x20);
2023}
2024#undef ZERO
2025
2026static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2027 unsigned int n_hc)
2028{
2029 unsigned int hc, port;
2030
2031 for (hc = 0; hc < n_hc; hc++) {
2032 for (port = 0; port < MV_PORTS_PER_HC; port++)
2033 mv5_reset_hc_port(hpriv, mmio,
2034 (hc * MV_PORTS_PER_HC) + port);
2035
2036 mv5_reset_one_hc(hpriv, mmio, hc);
2037 }
2038
2039 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002040}
2041
Jeff Garzik101ffae2005-11-12 22:17:49 -05002042#undef ZERO
2043#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002044static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002045{
Mark Lord02a121d2007-12-01 13:07:22 -05002046 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002047 u32 tmp;
2048
2049 tmp = readl(mmio + MV_PCI_MODE);
2050 tmp &= 0xff00ffff;
2051 writel(tmp, mmio + MV_PCI_MODE);
2052
2053 ZERO(MV_PCI_DISC_TIMER);
2054 ZERO(MV_PCI_MSI_TRIGGER);
2055 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2056 ZERO(HC_MAIN_IRQ_MASK_OFS);
2057 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002058 ZERO(hpriv->irq_cause_ofs);
2059 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002060 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2061 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2062 ZERO(MV_PCI_ERR_ATTRIBUTE);
2063 ZERO(MV_PCI_ERR_COMMAND);
2064}
2065#undef ZERO
2066
2067static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2068{
2069 u32 tmp;
2070
2071 mv5_reset_flash(hpriv, mmio);
2072
2073 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2074 tmp &= 0x3;
2075 tmp |= (1 << 5) | (1 << 6);
2076 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2077}
2078
2079/**
2080 * mv6_reset_hc - Perform the 6xxx global soft reset
2081 * @mmio: base address of the HBA
2082 *
2083 * This routine only applies to 6xxx parts.
2084 *
2085 * LOCKING:
2086 * Inherited from caller.
2087 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002088static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2089 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002090{
2091 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2092 int i, rc = 0;
2093 u32 t;
2094
2095 /* Following procedure defined in PCI "main command and status
2096 * register" table.
2097 */
2098 t = readl(reg);
2099 writel(t | STOP_PCI_MASTER, reg);
2100
2101 for (i = 0; i < 1000; i++) {
2102 udelay(1);
2103 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002104 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002105 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002106 }
2107 if (!(PCI_MASTER_EMPTY & t)) {
2108 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2109 rc = 1;
2110 goto done;
2111 }
2112
2113 /* set reset */
2114 i = 5;
2115 do {
2116 writel(t | GLOB_SFT_RST, reg);
2117 t = readl(reg);
2118 udelay(1);
2119 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2120
2121 if (!(GLOB_SFT_RST & t)) {
2122 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2123 rc = 1;
2124 goto done;
2125 }
2126
2127 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2128 i = 5;
2129 do {
2130 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2131 t = readl(reg);
2132 udelay(1);
2133 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2134
2135 if (GLOB_SFT_RST & t) {
2136 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2137 rc = 1;
2138 }
2139done:
2140 return rc;
2141}
2142
Jeff Garzik47c2b672005-11-12 21:13:17 -05002143static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002144 void __iomem *mmio)
2145{
2146 void __iomem *port_mmio;
2147 u32 tmp;
2148
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002149 tmp = readl(mmio + MV_RESET_CFG);
2150 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002151 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002152 hpriv->signal[idx].pre = 0x1 << 5;
2153 return;
2154 }
2155
2156 port_mmio = mv_port_base(mmio, idx);
2157 tmp = readl(port_mmio + PHY_MODE2);
2158
2159 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2160 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2161}
2162
Jeff Garzik47c2b672005-11-12 21:13:17 -05002163static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002164{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002165 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002166}
2167
Jeff Garzikc9d39132005-11-13 17:47:51 -05002168static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002169 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002170{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002171 void __iomem *port_mmio = mv_port_base(mmio, port);
2172
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002173 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002174 int fix_phy_mode2 =
2175 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002176 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002177 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2178 u32 m2, tmp;
2179
2180 if (fix_phy_mode2) {
2181 m2 = readl(port_mmio + PHY_MODE2);
2182 m2 &= ~(1 << 16);
2183 m2 |= (1 << 31);
2184 writel(m2, port_mmio + PHY_MODE2);
2185
2186 udelay(200);
2187
2188 m2 = readl(port_mmio + PHY_MODE2);
2189 m2 &= ~((1 << 16) | (1 << 31));
2190 writel(m2, port_mmio + PHY_MODE2);
2191
2192 udelay(200);
2193 }
2194
2195 /* who knows what this magic does */
2196 tmp = readl(port_mmio + PHY_MODE3);
2197 tmp &= ~0x7F800000;
2198 tmp |= 0x2A800000;
2199 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002200
2201 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002202 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002203
2204 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002205
2206 if (hp_flags & MV_HP_ERRATA_60X1B2)
2207 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002208
2209 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2210
2211 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002212
2213 if (hp_flags & MV_HP_ERRATA_60X1B2)
2214 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002215 }
2216
2217 /* Revert values of pre-emphasis and signal amps to the saved ones */
2218 m2 = readl(port_mmio + PHY_MODE2);
2219
2220 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002221 m2 |= hpriv->signal[port].amps;
2222 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002223 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002224
Jeff Garzike4e7b892006-01-31 12:18:41 -05002225 /* according to mvSata 3.6.1, some IIE values are fixed */
2226 if (IS_GEN_IIE(hpriv)) {
2227 m2 &= ~0xC30FF01F;
2228 m2 |= 0x0000900F;
2229 }
2230
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002231 writel(m2, port_mmio + PHY_MODE2);
2232}
2233
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002234/* TODO: use the generic LED interface to configure the SATA Presence */
2235/* & Acitivy LEDs on the board */
2236static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2237 void __iomem *mmio)
2238{
2239 return;
2240}
2241
2242static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2243 void __iomem *mmio)
2244{
2245 void __iomem *port_mmio;
2246 u32 tmp;
2247
2248 port_mmio = mv_port_base(mmio, idx);
2249 tmp = readl(port_mmio + PHY_MODE2);
2250
2251 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2252 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2253}
2254
2255#undef ZERO
2256#define ZERO(reg) writel(0, port_mmio + (reg))
2257static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2258 void __iomem *mmio, unsigned int port)
2259{
2260 void __iomem *port_mmio = mv_port_base(mmio, port);
2261
2262 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2263
2264 mv_channel_reset(hpriv, mmio, port);
2265
2266 ZERO(0x028); /* command */
2267 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2268 ZERO(0x004); /* timer */
2269 ZERO(0x008); /* irq err cause */
2270 ZERO(0x00c); /* irq err mask */
2271 ZERO(0x010); /* rq bah */
2272 ZERO(0x014); /* rq inp */
2273 ZERO(0x018); /* rq outp */
2274 ZERO(0x01c); /* respq bah */
2275 ZERO(0x024); /* respq outp */
2276 ZERO(0x020); /* respq inp */
2277 ZERO(0x02c); /* test control */
2278 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2279}
2280
2281#undef ZERO
2282
2283#define ZERO(reg) writel(0, hc_mmio + (reg))
2284static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio)
2286{
2287 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2288
2289 ZERO(0x00c);
2290 ZERO(0x010);
2291 ZERO(0x014);
2292
2293}
2294
2295#undef ZERO
2296
2297static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2298 void __iomem *mmio, unsigned int n_hc)
2299{
2300 unsigned int port;
2301
2302 for (port = 0; port < hpriv->n_ports; port++)
2303 mv_soc_reset_hc_port(hpriv, mmio, port);
2304
2305 mv_soc_reset_one_hc(hpriv, mmio);
2306
2307 return 0;
2308}
2309
2310static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2311 void __iomem *mmio)
2312{
2313 return;
2314}
2315
2316static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2317{
2318 return;
2319}
2320
Jeff Garzikc9d39132005-11-13 17:47:51 -05002321static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2322 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002323{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002324 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002325
Brett Russ31961942005-09-30 01:36:00 -04002326 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002327
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002328 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002329 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002330 ifctl |= (1 << 7); /* enable gen2i speed */
2331 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002332 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2333 }
2334
Brett Russ20f733e2005-09-01 18:26:17 -04002335 udelay(25); /* allow reset propagation */
2336
2337 /* Spec never mentions clearing the bit. Marvell's driver does
2338 * clear the bit, however.
2339 */
Brett Russ31961942005-09-30 01:36:00 -04002340 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002341
Jeff Garzikc9d39132005-11-13 17:47:51 -05002342 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2343
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002344 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002345 mdelay(1);
2346}
2347
Jeff Garzikc9d39132005-11-13 17:47:51 -05002348/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002350 * @ap: ATA channel to manipulate
2351 *
2352 * Part of this is taken from __sata_phy_reset and modified to
2353 * not sleep since this routine gets called from interrupt level.
2354 *
2355 * LOCKING:
2356 * Inherited from caller. This is coded to safe to call at
2357 * interrupt level, i.e. it does not sleep.
2358 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002359static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2360 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002361{
2362 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002363 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002364 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002365 int retry = 5;
2366 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002367
2368 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002369
Tejun Heoda3dbb12007-07-16 14:29:40 +09002370#ifdef DEBUG
2371 {
2372 u32 sstatus, serror, scontrol;
2373
2374 mv_scr_read(ap, SCR_STATUS, &sstatus);
2375 mv_scr_read(ap, SCR_ERROR, &serror);
2376 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2377 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002378 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002379 }
2380#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002381
Jeff Garzik22374672005-11-17 10:59:48 -05002382 /* Issue COMRESET via SControl */
2383comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002385 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002386
Tejun Heo936fd732007-08-06 18:36:23 +09002387 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002388 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002389
Brett Russ31961942005-09-30 01:36:00 -04002390 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002391 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002392 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002393 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002394
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002395 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002396 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002397
Jeff Garzik22374672005-11-17 10:59:48 -05002398 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002399 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002400 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2401 (retry-- > 0))
2402 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002403
Tejun Heoda3dbb12007-07-16 14:29:40 +09002404#ifdef DEBUG
2405 {
2406 u32 sstatus, serror, scontrol;
2407
2408 mv_scr_read(ap, SCR_STATUS, &sstatus);
2409 mv_scr_read(ap, SCR_ERROR, &serror);
2410 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2411 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2412 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2413 }
2414#endif
Brett Russ31961942005-09-30 01:36:00 -04002415
Tejun Heo936fd732007-08-06 18:36:23 +09002416 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002417 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002418 return;
2419 }
2420
Jeff Garzik22374672005-11-17 10:59:48 -05002421 /* even after SStatus reflects that device is ready,
2422 * it seems to take a while for link to be fully
2423 * established (and thus Status no longer 0x80/0x7F),
2424 * so we poll a bit for that, here.
2425 */
2426 retry = 20;
2427 while (1) {
2428 u8 drv_stat = ata_check_status(ap);
2429 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2430 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002431 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002432 if (retry-- <= 0)
2433 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002434 if (time_after(jiffies, deadline))
2435 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002436 }
2437
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002438 /* FIXME: if we passed the deadline, the following
2439 * code probably produces an invalid result
2440 */
Brett Russ20f733e2005-09-01 18:26:17 -04002441
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002442 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002443 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002444
2445 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2446
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002447 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002448
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002449 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002450}
2451
Tejun Heocc0680a2007-08-06 18:36:23 +09002452static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002453{
Tejun Heocc0680a2007-08-06 18:36:23 +09002454 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002455 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002456 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002457 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002458
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002459 rc = mv_stop_dma(ap);
2460 if (rc)
2461 ehc->i.action |= ATA_EH_HARDRESET;
2462
2463 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2464 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2465 ehc->i.action |= ATA_EH_HARDRESET;
2466 }
2467
2468 /* if we're about to do hardreset, nothing more to do */
2469 if (ehc->i.action & ATA_EH_HARDRESET)
2470 return 0;
2471
Tejun Heocc0680a2007-08-06 18:36:23 +09002472 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002473 rc = ata_wait_ready(ap, deadline);
2474 else
2475 rc = -ENODEV;
2476
2477 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002478}
2479
Tejun Heocc0680a2007-08-06 18:36:23 +09002480static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002481 unsigned long deadline)
2482{
Tejun Heocc0680a2007-08-06 18:36:23 +09002483 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002484 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002485 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002486
2487 mv_stop_dma(ap);
2488
2489 mv_channel_reset(hpriv, mmio, ap->port_no);
2490
2491 mv_phy_reset(ap, class, deadline);
2492
2493 return 0;
2494}
2495
Tejun Heocc0680a2007-08-06 18:36:23 +09002496static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002497{
Tejun Heocc0680a2007-08-06 18:36:23 +09002498 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002499 u32 serr;
2500
2501 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002502 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002503
2504 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002505 sata_scr_read(link, SCR_ERROR, &serr);
2506 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002507
2508 /* bail out if no device is present */
2509 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2510 DPRINTK("EXIT, no device\n");
2511 return;
2512 }
2513
2514 /* set up device control */
2515 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2516}
2517
2518static void mv_error_handler(struct ata_port *ap)
2519{
2520 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2521 mv_hardreset, mv_postreset);
2522}
2523
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002524static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002525{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002526 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002527 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2528 u32 tmp, mask;
2529 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002530
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002531 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002532
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002533 shift = ap->port_no * 2;
2534 if (hc > 0)
2535 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002536
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002537 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002538
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002539 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002540 tmp = readl(hpriv->main_mask_reg_addr);
2541 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002542}
2543
2544static void mv_eh_thaw(struct ata_port *ap)
2545{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002546 struct mv_host_priv *hpriv = ap->host->private_data;
2547 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002548 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2549 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2550 void __iomem *port_mmio = mv_ap_base(ap);
2551 u32 tmp, mask, hc_irq_cause;
2552 unsigned int shift, hc_port_no = ap->port_no;
2553
2554 /* FIXME: handle coalescing completion events properly */
2555
2556 shift = ap->port_no * 2;
2557 if (hc > 0) {
2558 shift++;
2559 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002560 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002561
2562 mask = 0x3 << shift;
2563
2564 /* clear EDMA errors on this port */
2565 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2566
2567 /* clear pending irq events */
2568 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2569 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2570 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2571 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2572
2573 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002574 tmp = readl(hpriv->main_mask_reg_addr);
2575 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002576}
2577
Brett Russ05b308e2005-10-05 17:08:53 -04002578/**
2579 * mv_port_init - Perform some early initialization on a single port.
2580 * @port: libata data structure storing shadow register addresses
2581 * @port_mmio: base address of the port
2582 *
2583 * Initialize shadow register mmio addresses, clear outstanding
2584 * interrupts on the port, and unmask interrupts for the future
2585 * start of the port.
2586 *
2587 * LOCKING:
2588 * Inherited from caller.
2589 */
Brett Russ31961942005-09-30 01:36:00 -04002590static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2591{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002592 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002593 unsigned serr_ofs;
2594
Jeff Garzik8b260242005-11-12 12:32:50 -05002595 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002596 */
2597 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002598 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002599 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2600 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2601 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2602 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2603 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2604 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002605 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002606 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2607 /* special case: control/altstatus doesn't have ATA_REG_ address */
2608 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2609
2610 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002611 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002612
Brett Russ31961942005-09-30 01:36:00 -04002613 /* Clear any currently outstanding port interrupt conditions */
2614 serr_ofs = mv_scr_offset(SCR_ERROR);
2615 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2616 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2617
Mark Lord646a4da2008-01-26 18:30:37 -05002618 /* unmask all non-transient EDMA error interrupts */
2619 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002620
Jeff Garzik8b260242005-11-12 12:32:50 -05002621 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002622 readl(port_mmio + EDMA_CFG_OFS),
2623 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2624 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002625}
2626
Tejun Heo4447d352007-04-17 23:44:08 +09002627static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002628{
Tejun Heo4447d352007-04-17 23:44:08 +09002629 struct pci_dev *pdev = to_pci_dev(host->dev);
2630 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002631 u32 hp_flags = hpriv->hp_flags;
2632
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002633 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002634 case chip_5080:
2635 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002636 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002637
Auke Kok44c10132007-06-08 15:46:36 -07002638 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002639 case 0x1:
2640 hp_flags |= MV_HP_ERRATA_50XXB0;
2641 break;
2642 case 0x3:
2643 hp_flags |= MV_HP_ERRATA_50XXB2;
2644 break;
2645 default:
2646 dev_printk(KERN_WARNING, &pdev->dev,
2647 "Applying 50XXB2 workarounds to unknown rev\n");
2648 hp_flags |= MV_HP_ERRATA_50XXB2;
2649 break;
2650 }
2651 break;
2652
2653 case chip_504x:
2654 case chip_508x:
2655 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002656 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002657
Auke Kok44c10132007-06-08 15:46:36 -07002658 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002659 case 0x0:
2660 hp_flags |= MV_HP_ERRATA_50XXB0;
2661 break;
2662 case 0x3:
2663 hp_flags |= MV_HP_ERRATA_50XXB2;
2664 break;
2665 default:
2666 dev_printk(KERN_WARNING, &pdev->dev,
2667 "Applying B2 workarounds to unknown rev\n");
2668 hp_flags |= MV_HP_ERRATA_50XXB2;
2669 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002670 }
2671 break;
2672
2673 case chip_604x:
2674 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002675 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002676 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002677
Auke Kok44c10132007-06-08 15:46:36 -07002678 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002679 case 0x7:
2680 hp_flags |= MV_HP_ERRATA_60X1B2;
2681 break;
2682 case 0x9:
2683 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002684 break;
2685 default:
2686 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002687 "Applying B2 workarounds to unknown rev\n");
2688 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002689 break;
2690 }
2691 break;
2692
Jeff Garzike4e7b892006-01-31 12:18:41 -05002693 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002694 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002695 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2696 (pdev->device == 0x2300 || pdev->device == 0x2310))
2697 {
Mark Lord4e520032007-12-11 12:58:05 -05002698 /*
2699 * Highpoint RocketRAID PCIe 23xx series cards:
2700 *
2701 * Unconfigured drives are treated as "Legacy"
2702 * by the BIOS, and it overwrites sector 8 with
2703 * a "Lgcy" metadata block prior to Linux boot.
2704 *
2705 * Configured drives (RAID or JBOD) leave sector 8
2706 * alone, but instead overwrite a high numbered
2707 * sector for the RAID metadata. This sector can
2708 * be determined exactly, by truncating the physical
2709 * drive capacity to a nice even GB value.
2710 *
2711 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2712 *
2713 * Warn the user, lest they think we're just buggy.
2714 */
2715 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2716 " BIOS CORRUPTS DATA on all attached drives,"
2717 " regardless of if/how they are configured."
2718 " BEWARE!\n");
2719 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2720 " use sectors 8-9 on \"Legacy\" drives,"
2721 " and avoid the final two gigabytes on"
2722 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002723 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002724 case chip_6042:
2725 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002726 hp_flags |= MV_HP_GEN_IIE;
2727
Auke Kok44c10132007-06-08 15:46:36 -07002728 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002729 case 0x0:
2730 hp_flags |= MV_HP_ERRATA_XX42A0;
2731 break;
2732 case 0x1:
2733 hp_flags |= MV_HP_ERRATA_60X1C0;
2734 break;
2735 default:
2736 dev_printk(KERN_WARNING, &pdev->dev,
2737 "Applying 60X1C0 workarounds to unknown rev\n");
2738 hp_flags |= MV_HP_ERRATA_60X1C0;
2739 break;
2740 }
2741 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002742 case chip_soc:
2743 hpriv->ops = &mv_soc_ops;
2744 hp_flags |= MV_HP_ERRATA_60X1C0;
2745 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002746
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002747 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002748 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002749 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002750 return 1;
2751 }
2752
2753 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002754 if (hp_flags & MV_HP_PCIE) {
2755 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2756 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2757 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2758 } else {
2759 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2760 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2761 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2762 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002763
2764 return 0;
2765}
2766
Brett Russ05b308e2005-10-05 17:08:53 -04002767/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002768 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002769 * @host: ATA host to initialize
2770 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002771 *
2772 * If possible, do an early global reset of the host. Then do
2773 * our port init and clear/unmask all/relevant host interrupts.
2774 *
2775 * LOCKING:
2776 * Inherited from caller.
2777 */
Tejun Heo4447d352007-04-17 23:44:08 +09002778static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002779{
2780 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002781 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002782 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002783
Tejun Heo4447d352007-04-17 23:44:08 +09002784 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002785 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002786 goto done;
2787
2788 if (HAS_PCI(host)) {
2789 hpriv->main_cause_reg_addr = hpriv->base +
2790 HC_MAIN_IRQ_CAUSE_OFS;
2791 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2792 } else {
2793 hpriv->main_cause_reg_addr = hpriv->base +
2794 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2795 hpriv->main_mask_reg_addr = hpriv->base +
2796 HC_SOC_MAIN_IRQ_MASK_OFS;
2797 }
2798 /* global interrupt mask */
2799 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002800
Tejun Heo4447d352007-04-17 23:44:08 +09002801 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002802
Tejun Heo4447d352007-04-17 23:44:08 +09002803 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002804 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002805
Jeff Garzikc9d39132005-11-13 17:47:51 -05002806 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002807 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002808 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002809
Jeff Garzik522479f2005-11-12 22:14:02 -05002810 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002811 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002812 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002813
Tejun Heo4447d352007-04-17 23:44:08 +09002814 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002815 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002816 void __iomem *port_mmio = mv_port_base(mmio, port);
2817
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002818 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002819 ifctl |= (1 << 7); /* enable gen2i speed */
2820 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002821 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2822 }
2823
Jeff Garzikc9d39132005-11-13 17:47:51 -05002824 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002825 }
2826
Tejun Heo4447d352007-04-17 23:44:08 +09002827 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002828 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002829 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002830
2831 mv_port_init(&ap->ioaddr, port_mmio);
2832
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002833#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002834 if (HAS_PCI(host)) {
2835 unsigned int offset = port_mmio - mmio;
2836 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2837 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2838 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002839#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002840 }
2841
2842 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002843 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2844
2845 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2846 "(before clear)=0x%08x\n", hc,
2847 readl(hc_mmio + HC_CFG_OFS),
2848 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2849
2850 /* Clear any currently outstanding hc interrupt conditions */
2851 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002852 }
2853
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002854 if (HAS_PCI(host)) {
2855 /* Clear any currently outstanding host interrupt conditions */
2856 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002857
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002858 /* and unmask interrupt generation for host regs */
2859 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2860 if (IS_GEN_I(hpriv))
2861 writelfl(~HC_MAIN_MASKED_IRQS_5,
2862 hpriv->main_mask_reg_addr);
2863 else
2864 writelfl(~HC_MAIN_MASKED_IRQS,
2865 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002866
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002867 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2868 "PCI int cause/mask=0x%08x/0x%08x\n",
2869 readl(hpriv->main_cause_reg_addr),
2870 readl(hpriv->main_mask_reg_addr),
2871 readl(mmio + hpriv->irq_cause_ofs),
2872 readl(mmio + hpriv->irq_mask_ofs));
2873 } else {
2874 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2875 hpriv->main_mask_reg_addr);
2876 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2877 readl(hpriv->main_cause_reg_addr),
2878 readl(hpriv->main_mask_reg_addr));
2879 }
Brett Russ31961942005-09-30 01:36:00 -04002880done:
Brett Russ20f733e2005-09-01 18:26:17 -04002881 return rc;
2882}
2883
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002884static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2885{
2886 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2887 MV_CRQB_Q_SZ, 0);
2888 if (!hpriv->crqb_pool)
2889 return -ENOMEM;
2890
2891 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2892 MV_CRPB_Q_SZ, 0);
2893 if (!hpriv->crpb_pool)
2894 return -ENOMEM;
2895
2896 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2897 MV_SG_TBL_SZ, 0);
2898 if (!hpriv->sg_tbl_pool)
2899 return -ENOMEM;
2900
2901 return 0;
2902}
2903
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002904static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2905 struct mbus_dram_target_info *dram)
2906{
2907 int i;
2908
2909 for (i = 0; i < 4; i++) {
2910 writel(0, hpriv->base + WINDOW_CTRL(i));
2911 writel(0, hpriv->base + WINDOW_BASE(i));
2912 }
2913
2914 for (i = 0; i < dram->num_cs; i++) {
2915 struct mbus_dram_window *cs = dram->cs + i;
2916
2917 writel(((cs->size - 1) & 0xffff0000) |
2918 (cs->mbus_attr << 8) |
2919 (dram->mbus_dram_target_id << 4) | 1,
2920 hpriv->base + WINDOW_CTRL(i));
2921 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2922 }
2923}
2924
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002925/**
2926 * mv_platform_probe - handle a positive probe of an soc Marvell
2927 * host
2928 * @pdev: platform device found
2929 *
2930 * LOCKING:
2931 * Inherited from caller.
2932 */
2933static int mv_platform_probe(struct platform_device *pdev)
2934{
2935 static int printed_version;
2936 const struct mv_sata_platform_data *mv_platform_data;
2937 const struct ata_port_info *ppi[] =
2938 { &mv_port_info[chip_soc], NULL };
2939 struct ata_host *host;
2940 struct mv_host_priv *hpriv;
2941 struct resource *res;
2942 int n_ports, rc;
2943
2944 if (!printed_version++)
2945 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2946
2947 /*
2948 * Simple resource validation ..
2949 */
2950 if (unlikely(pdev->num_resources != 2)) {
2951 dev_err(&pdev->dev, "invalid number of resources\n");
2952 return -EINVAL;
2953 }
2954
2955 /*
2956 * Get the register base first
2957 */
2958 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2959 if (res == NULL)
2960 return -EINVAL;
2961
2962 /* allocate host */
2963 mv_platform_data = pdev->dev.platform_data;
2964 n_ports = mv_platform_data->n_ports;
2965
2966 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2967 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2968
2969 if (!host || !hpriv)
2970 return -ENOMEM;
2971 host->private_data = hpriv;
2972 hpriv->n_ports = n_ports;
2973
2974 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002975 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2976 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002977 hpriv->base -= MV_SATAHC0_REG_BASE;
2978
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002979 /*
2980 * (Re-)program MBUS remapping windows if we are asked to.
2981 */
2982 if (mv_platform_data->dram != NULL)
2983 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2984
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002985 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2986 if (rc)
2987 return rc;
2988
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002989 /* initialize adapter */
2990 rc = mv_init_host(host, chip_soc);
2991 if (rc)
2992 return rc;
2993
2994 dev_printk(KERN_INFO, &pdev->dev,
2995 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2996 host->n_ports);
2997
2998 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2999 IRQF_SHARED, &mv6_sht);
3000}
3001
3002/*
3003 *
3004 * mv_platform_remove - unplug a platform interface
3005 * @pdev: platform device
3006 *
3007 * A platform bus SATA device has been unplugged. Perform the needed
3008 * cleanup. Also called on module unload for any active devices.
3009 */
3010static int __devexit mv_platform_remove(struct platform_device *pdev)
3011{
3012 struct device *dev = &pdev->dev;
3013 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003014
3015 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003016 return 0;
3017}
3018
3019static struct platform_driver mv_platform_driver = {
3020 .probe = mv_platform_probe,
3021 .remove = __devexit_p(mv_platform_remove),
3022 .driver = {
3023 .name = DRV_NAME,
3024 .owner = THIS_MODULE,
3025 },
3026};
3027
3028
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003029#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003030static int mv_pci_init_one(struct pci_dev *pdev,
3031 const struct pci_device_id *ent);
3032
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003033
3034static struct pci_driver mv_pci_driver = {
3035 .name = DRV_NAME,
3036 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003037 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003038 .remove = ata_pci_remove_one,
3039};
3040
3041/*
3042 * module options
3043 */
3044static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3045
3046
3047/* move to PCI layer or libata core? */
3048static int pci_go_64(struct pci_dev *pdev)
3049{
3050 int rc;
3051
3052 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3053 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3054 if (rc) {
3055 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3056 if (rc) {
3057 dev_printk(KERN_ERR, &pdev->dev,
3058 "64-bit DMA enable failed\n");
3059 return rc;
3060 }
3061 }
3062 } else {
3063 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3064 if (rc) {
3065 dev_printk(KERN_ERR, &pdev->dev,
3066 "32-bit DMA enable failed\n");
3067 return rc;
3068 }
3069 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3070 if (rc) {
3071 dev_printk(KERN_ERR, &pdev->dev,
3072 "32-bit consistent DMA enable failed\n");
3073 return rc;
3074 }
3075 }
3076
3077 return rc;
3078}
3079
Brett Russ05b308e2005-10-05 17:08:53 -04003080/**
3081 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003082 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003083 *
3084 * FIXME: complete this.
3085 *
3086 * LOCKING:
3087 * Inherited from caller.
3088 */
Tejun Heo4447d352007-04-17 23:44:08 +09003089static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003090{
Tejun Heo4447d352007-04-17 23:44:08 +09003091 struct pci_dev *pdev = to_pci_dev(host->dev);
3092 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003093 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003094 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003095
3096 /* Use this to determine the HW stepping of the chip so we know
3097 * what errata to workaround
3098 */
Brett Russ31961942005-09-30 01:36:00 -04003099 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3100 if (scc == 0)
3101 scc_s = "SCSI";
3102 else if (scc == 0x01)
3103 scc_s = "RAID";
3104 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003105 scc_s = "?";
3106
3107 if (IS_GEN_I(hpriv))
3108 gen = "I";
3109 else if (IS_GEN_II(hpriv))
3110 gen = "II";
3111 else if (IS_GEN_IIE(hpriv))
3112 gen = "IIE";
3113 else
3114 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04003115
Jeff Garzika9524a72005-10-30 14:39:11 -05003116 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003117 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3118 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003119 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3120}
3121
Brett Russ05b308e2005-10-05 17:08:53 -04003122/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003123 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003124 * @pdev: PCI device found
3125 * @ent: PCI device ID entry for the matched host
3126 *
3127 * LOCKING:
3128 * Inherited from caller.
3129 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003130static int mv_pci_init_one(struct pci_dev *pdev,
3131 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003132{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003133 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003134 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003135 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3136 struct ata_host *host;
3137 struct mv_host_priv *hpriv;
3138 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003139
Jeff Garzika9524a72005-10-30 14:39:11 -05003140 if (!printed_version++)
3141 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003142
Tejun Heo4447d352007-04-17 23:44:08 +09003143 /* allocate host */
3144 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3145
3146 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3147 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3148 if (!host || !hpriv)
3149 return -ENOMEM;
3150 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003151 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003152
3153 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003154 rc = pcim_enable_device(pdev);
3155 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003156 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003157
Tejun Heo0d5ff562007-02-01 15:06:36 +09003158 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3159 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003160 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003161 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003162 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003163 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003164 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003165
Jeff Garzikd88184f2007-02-26 01:26:06 -05003166 rc = pci_go_64(pdev);
3167 if (rc)
3168 return rc;
3169
Mark Lordda2fa9b2008-01-26 18:32:45 -05003170 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3171 if (rc)
3172 return rc;
3173
Brett Russ20f733e2005-09-01 18:26:17 -04003174 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003175 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003176 if (rc)
3177 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003178
Brett Russ31961942005-09-30 01:36:00 -04003179 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003180 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003181 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003182
Brett Russ31961942005-09-30 01:36:00 -04003183 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003184 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003185
Tejun Heo4447d352007-04-17 23:44:08 +09003186 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003187 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003188 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003189 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003190}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003191#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003192
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003193static int mv_platform_probe(struct platform_device *pdev);
3194static int __devexit mv_platform_remove(struct platform_device *pdev);
3195
Brett Russ20f733e2005-09-01 18:26:17 -04003196static int __init mv_init(void)
3197{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003198 int rc = -ENODEV;
3199#ifdef CONFIG_PCI
3200 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003201 if (rc < 0)
3202 return rc;
3203#endif
3204 rc = platform_driver_register(&mv_platform_driver);
3205
3206#ifdef CONFIG_PCI
3207 if (rc < 0)
3208 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003209#endif
3210 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003211}
3212
3213static void __exit mv_exit(void)
3214{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003215#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003216 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003217#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003218 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003219}
3220
3221MODULE_AUTHOR("Brett Russ");
3222MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3223MODULE_LICENSE("GPL");
3224MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3225MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003226MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003227
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003228#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003229module_param(msi, int, 0444);
3230MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003231#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003232
Brett Russ20f733e2005-09-01 18:26:17 -04003233module_init(mv_init);
3234module_exit(mv_exit);