blob: d52ce1188327bf2a9f89b21e9335c0442fb5f748 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
Jeff Garzik4a05e202007-05-24 23:40:15 -040046
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040079#include <linux/mbus.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050081#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040082#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040084
85#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050086#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040087
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040099 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
Brett Russ20f733e2005-09-01 18:26:17 -0400105 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500106 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500124 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400126
Brett Russ20f733e2005-09-01 18:26:17 -0400127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400131 MV_PORT_MASK = 3,
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100136 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400137 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100138
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400143
Brett Russ31961942005-09-30 01:36:00 -0400144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 /* PCI interface registers */
160
Brett Russ31961942005-09-30 01:36:00 -0400161 PCI_COMMAND_OFS = 0xc00,
162
Brett Russ20f733e2005-09-01 18:26:17 -0400163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
Jeff Garzik522479f2005-11-12 22:14:02 -0500168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
Mark Lord02a121d2007-12-01 13:07:22 -0500183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500186
Brett Russ20f733e2005-09-01 18:26:17 -0400187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400191 PORT0_ERR = (1 << 0), /* shift by port # */
192 PORT0_DONE = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400208 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500210 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500212 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400213
214 /* SATAHC registers */
215 HC_CFG_OFS = 0,
216
217 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400218 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400219 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
220 DEV_IRQ = (1 << 8), /* shift by port # */
221
222 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400223 SHD_BLK_OFS = 0x100,
224 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400225
226 /* SATA registers */
227 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
228 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500229 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400230
Mark Lorde12bef52008-03-31 19:33:56 -0400231 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400232 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
233
Jeff Garzik47c2b672005-11-12 21:13:17 -0500234 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500235 PHY_MODE4 = 0x314,
236 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400237 SATA_IFCTL_OFS = 0x344,
238 SATA_IFSTAT_OFS = 0x34c,
239 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400240
Mark Lorde12bef52008-03-31 19:33:56 -0400241 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400242 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
243
Jeff Garzikc9d39132005-11-13 17:47:51 -0500244 MV5_PHY_MODE = 0x74,
245 MV5_LT_MODE = 0x30,
246 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400247 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500248
249 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400250
251 /* Port registers */
252 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500253 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
254 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
255 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
256 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
257 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400258 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
259 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400260
261 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
262 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400263 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
264 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
265 EDMA_ERR_DEV = (1 << 2), /* device error */
266 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
267 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
268 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400269 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
270 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400271 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400272 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400273 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
274 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
275 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
276 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500279 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
280 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
281 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
282 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400284 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500285
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500287 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
288 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
289 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
290 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
291 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500294
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400295 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400296 EDMA_ERR_OVERRUN_5 = (1 << 5),
297 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500298
299 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
300 EDMA_ERR_LNK_CTRL_RX_1 |
301 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400302 EDMA_ERR_LNK_CTRL_TX |
303 /* temporary, until we fix hotplug: */
304 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500305
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400306 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
307 EDMA_ERR_PRD_PAR |
308 EDMA_ERR_DEV_DCON |
309 EDMA_ERR_DEV_CON |
310 EDMA_ERR_SERR |
311 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400312 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400313 EDMA_ERR_CRPB_PAR |
314 EDMA_ERR_INTRL_PAR |
315 EDMA_ERR_IORDY |
316 EDMA_ERR_LNK_CTRL_RX_2 |
317 EDMA_ERR_LNK_DATA_RX |
318 EDMA_ERR_LNK_DATA_TX |
319 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400320
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400321 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
322 EDMA_ERR_PRD_PAR |
323 EDMA_ERR_DEV_DCON |
324 EDMA_ERR_DEV_CON |
325 EDMA_ERR_OVERRUN_5 |
326 EDMA_ERR_UNDERRUN_5 |
327 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400328 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400329 EDMA_ERR_CRPB_PAR |
330 EDMA_ERR_INTRL_PAR |
331 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400332
Brett Russ31961942005-09-30 01:36:00 -0400333 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
334 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400335
336 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
337 EDMA_REQ_Q_PTR_SHIFT = 5,
338
339 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
340 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
341 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400342 EDMA_RSP_Q_PTR_SHIFT = 3,
343
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400344 EDMA_CMD_OFS = 0x28, /* EDMA command register */
345 EDMA_EN = (1 << 0), /* enable EDMA */
346 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
347 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400348
Jeff Garzikc9d39132005-11-13 17:47:51 -0500349 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500350 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500351
Brett Russ31961942005-09-30 01:36:00 -0400352 /* Host private flags (hp_flags) */
353 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500354 MV_HP_ERRATA_50XXB0 = (1 << 1),
355 MV_HP_ERRATA_50XXB2 = (1 << 2),
356 MV_HP_ERRATA_60X1B2 = (1 << 3),
357 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500358 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400359 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
360 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
361 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500362 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400363
Brett Russ31961942005-09-30 01:36:00 -0400364 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400365 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500366 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400367};
368
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400369#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
370#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500371#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100372#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500373
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400374#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
375#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
376
Jeff Garzik095fec82005-11-12 09:50:49 -0500377enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400378 /* DMA boundary 0xffff is required by the s/g splitting
379 * we need on /length/ in mv_fill-sg().
380 */
381 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500382
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400383 /* mask of register bits containing lower 32 bits
384 * of EDMA request queue DMA address
385 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500386 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
387
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400388 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500389 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
390};
391
Jeff Garzik522479f2005-11-12 22:14:02 -0500392enum chip_type {
393 chip_504x,
394 chip_508x,
395 chip_5080,
396 chip_604x,
397 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500398 chip_6042,
399 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500400 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500401};
402
Brett Russ31961942005-09-30 01:36:00 -0400403/* Command ReQuest Block: 32B */
404struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400405 __le32 sg_addr;
406 __le32 sg_addr_hi;
407 __le16 ctrl_flags;
408 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400409};
410
Jeff Garzike4e7b892006-01-31 12:18:41 -0500411struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400412 __le32 addr;
413 __le32 addr_hi;
414 __le32 flags;
415 __le32 len;
416 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500417};
418
Brett Russ31961942005-09-30 01:36:00 -0400419/* Command ResPonse Block: 8B */
420struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400421 __le16 id;
422 __le16 flags;
423 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400424};
425
426/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
427struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400428 __le32 addr;
429 __le32 flags_size;
430 __le32 addr_hi;
431 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400432};
433
434struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400435 struct mv_crqb *crqb;
436 dma_addr_t crqb_dma;
437 struct mv_crpb *crpb;
438 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500439 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
440 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400441
442 unsigned int req_idx;
443 unsigned int resp_idx;
444
Brett Russ31961942005-09-30 01:36:00 -0400445 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400446};
447
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500448struct mv_port_signal {
449 u32 amps;
450 u32 pre;
451};
452
Mark Lord02a121d2007-12-01 13:07:22 -0500453struct mv_host_priv {
454 u32 hp_flags;
455 struct mv_port_signal signal[8];
456 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500457 int n_ports;
458 void __iomem *base;
459 void __iomem *main_cause_reg_addr;
460 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500461 u32 irq_cause_ofs;
462 u32 irq_mask_ofs;
463 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500464 /*
465 * These consistent DMA memory pools give us guaranteed
466 * alignment for hardware-accessed data structures,
467 * and less memory waste in accomplishing the alignment.
468 */
469 struct dma_pool *crqb_pool;
470 struct dma_pool *crpb_pool;
471 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500472};
473
Jeff Garzik47c2b672005-11-12 21:13:17 -0500474struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500475 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
476 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500477 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
478 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
479 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500480 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500482 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100483 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500484};
485
Tejun Heoda3dbb12007-07-16 14:29:40 +0900486static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
487static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
488static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
489static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400490static int mv_port_start(struct ata_port *ap);
491static void mv_port_stop(struct ata_port *ap);
492static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500493static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900494static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900495static int mv_hardreset(struct ata_link *link, unsigned int *class,
496 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400497static void mv_eh_freeze(struct ata_port *ap);
498static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500499static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400500
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500501static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
502 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500503static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
504static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
505 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500506static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500508static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100509static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500510
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500511static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500513static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
514static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
515 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
517 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500518static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500519static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
520 void __iomem *mmio);
521static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
522 void __iomem *mmio);
523static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
524 void __iomem *mmio, unsigned int n_hc);
525static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
526 void __iomem *mmio);
527static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100528static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400529static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400531static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400532static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400533static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500534
Mark Lorde49856d2008-04-16 14:59:07 -0400535static void mv_pmp_select(struct ata_port *ap, int pmp);
536static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
537 unsigned long deadline);
538static int mv_softreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
Brett Russ20f733e2005-09-01 18:26:17 -0400540
Mark Lordeb73d552008-01-29 13:24:00 -0500541/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
542 * because we have to allow room for worst case splitting of
543 * PRDs for 64K boundaries in mv_fill_sg().
544 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400545static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900546 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400547 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400548 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400549};
550
551static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900552 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500553 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400554 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400555 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400556};
557
Tejun Heo029cfd62008-03-25 12:22:49 +0900558static struct ata_port_operations mv5_ops = {
559 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500560
561 .qc_prep = mv_qc_prep,
562 .qc_issue = mv_qc_issue,
563
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400564 .freeze = mv_eh_freeze,
565 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900566 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900567 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900568 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400569
Jeff Garzikc9d39132005-11-13 17:47:51 -0500570 .scr_read = mv5_scr_read,
571 .scr_write = mv5_scr_write,
572
573 .port_start = mv_port_start,
574 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500575};
576
Tejun Heo029cfd62008-03-25 12:22:49 +0900577static struct ata_port_operations mv6_ops = {
578 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400579 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Mark Lordf2738272008-01-26 18:32:29 -0500580 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400581 .scr_read = mv_scr_read,
582 .scr_write = mv_scr_write,
583
Mark Lorde49856d2008-04-16 14:59:07 -0400584 .pmp_hardreset = mv_pmp_hardreset,
585 .pmp_softreset = mv_softreset,
586 .softreset = mv_softreset,
587 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e2005-09-01 18:26:17 -0400588};
589
Tejun Heo029cfd62008-03-25 12:22:49 +0900590static struct ata_port_operations mv_iie_ops = {
591 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400592 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900593 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500594 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500595};
596
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100597static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400598 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400599 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400600 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400601 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500602 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400603 },
604 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400606 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400607 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500608 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400609 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500610 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400613 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500614 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500615 },
Brett Russ20f733e2005-09-01 18:26:17 -0400616 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400618 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500619 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500622 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400623 },
624 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400626 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500627 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400628 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400629 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500630 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400631 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500632 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400634 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500635 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500636 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400637 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500638 .port_ops = &mv_iie_ops,
639 },
640 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400642 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500643 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500644 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400645 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500646 .port_ops = &mv_iie_ops,
647 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500648 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400649 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400650 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400651 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500655 },
Brett Russ20f733e2005-09-01 18:26:17 -0400656};
657
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500658static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400659 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
660 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
661 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
662 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100663 /* RocketRAID 1740/174x have different identifiers */
664 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
665 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400666
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400667 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
668 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
669 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
670 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
671 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500672
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400673 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
674
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200675 /* Adaptec 1430SA */
676 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
677
Mark Lord02a121d2007-12-01 13:07:22 -0500678 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800679 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
680
Mark Lord02a121d2007-12-01 13:07:22 -0500681 /* Highpoint RocketRAID PCIe series */
682 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
683 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
684
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400685 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400686};
687
Jeff Garzik47c2b672005-11-12 21:13:17 -0500688static const struct mv_hw_ops mv5xxx_ops = {
689 .phy_errata = mv5_phy_errata,
690 .enable_leds = mv5_enable_leds,
691 .read_preamp = mv5_read_preamp,
692 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500693 .reset_flash = mv5_reset_flash,
694 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500695};
696
697static const struct mv_hw_ops mv6xxx_ops = {
698 .phy_errata = mv6_phy_errata,
699 .enable_leds = mv6_enable_leds,
700 .read_preamp = mv6_read_preamp,
701 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500702 .reset_flash = mv6_reset_flash,
703 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500704};
705
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500706static const struct mv_hw_ops mv_soc_ops = {
707 .phy_errata = mv6_phy_errata,
708 .enable_leds = mv_soc_enable_leds,
709 .read_preamp = mv_soc_read_preamp,
710 .reset_hc = mv_soc_reset_hc,
711 .reset_flash = mv_soc_reset_flash,
712 .reset_bus = mv_soc_reset_bus,
713};
714
Brett Russ20f733e2005-09-01 18:26:17 -0400715/*
716 * Functions
717 */
718
719static inline void writelfl(unsigned long data, void __iomem *addr)
720{
721 writel(data, addr);
722 (void) readl(addr); /* flush to avoid PCI posted write */
723}
724
Brett Russ20f733e2005-09-01 18:26:17 -0400725static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726{
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728}
729
Jeff Garzikc9d39132005-11-13 17:47:51 -0500730static inline unsigned int mv_hc_from_port(unsigned int port)
731{
732 return port >> MV_PORT_HC_SHIFT;
733}
734
735static inline unsigned int mv_hardport_from_port(unsigned int port)
736{
737 return port & MV_PORT_MASK;
738}
739
740static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port)
742{
743 return mv_hc_base(base, mv_hc_from_port(port));
744}
745
Brett Russ20f733e2005-09-01 18:26:17 -0400746static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500748 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500749 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400751}
752
Mark Lorde12bef52008-03-31 19:33:56 -0400753static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
754{
755 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
756 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
757
758 return hc_mmio + ofs;
759}
760
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500761static inline void __iomem *mv_host_base(struct ata_host *host)
762{
763 struct mv_host_priv *hpriv = host->private_data;
764 return hpriv->base;
765}
766
Brett Russ20f733e2005-09-01 18:26:17 -0400767static inline void __iomem *mv_ap_base(struct ata_port *ap)
768{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500769 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400770}
771
Jeff Garzikcca39742006-08-24 03:19:22 -0400772static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400773{
Jeff Garzikcca39742006-08-24 03:19:22 -0400774 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400775}
776
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400777static void mv_set_edma_ptrs(void __iomem *port_mmio,
778 struct mv_host_priv *hpriv,
779 struct mv_port_priv *pp)
780{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400781 u32 index;
782
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400783 /*
784 * initialize request queue
785 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400786 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
787
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400788 WARN_ON(pp->crqb_dma & 0x3ff);
789 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
792
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400794 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400795 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
796 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400797 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798
799 /*
800 * initialize response queue
801 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400802 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
803
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400804 WARN_ON(pp->crpb_dma & 0xff);
805 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
806
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400808 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
810 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400815}
816
Brett Russ05b308e2005-10-05 17:08:53 -0400817/**
818 * mv_start_dma - Enable eDMA engine
819 * @base: port base address
820 * @pp: port private data
821 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900822 * Verify the local cache of the eDMA state is accurate with a
823 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400824 *
825 * LOCKING:
826 * Inherited from caller.
827 */
Mark Lord0c589122008-01-26 18:31:16 -0500828static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500829 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400830{
Mark Lord72109162008-01-26 18:31:33 -0500831 int want_ncq = (protocol == ATA_PROT_NCQ);
832
833 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
834 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
835 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400836 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500837 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400838 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500839 struct mv_host_priv *hpriv = ap->host->private_data;
840 int hard_port = mv_hardport_from_port(ap->port_no);
841 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100842 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500843 u32 hc_irq_cause, ipending;
844
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400845 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500846 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400847
Mark Lord0c589122008-01-26 18:31:16 -0500848 /* clear EDMA interrupt indicator, if any */
849 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
850 ipending = (DEV_IRQ << hard_port) |
851 (CRPB_DMA_DONE << hard_port);
852 if (hc_irq_cause & ipending) {
853 writelfl(hc_irq_cause & ~ipending,
854 hc_mmio + HC_IRQ_CAUSE_OFS);
855 }
856
Mark Lorde12bef52008-03-31 19:33:56 -0400857 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500858
859 /* clear FIS IRQ Cause */
860 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
861
Mark Lordf630d562008-01-26 18:31:00 -0500862 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400863
Mark Lordf630d562008-01-26 18:31:00 -0500864 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400865 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
866 }
Mark Lordf630d562008-01-26 18:31:00 -0500867 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400868}
869
Brett Russ05b308e2005-10-05 17:08:53 -0400870/**
Mark Lorde12bef52008-03-31 19:33:56 -0400871 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400872 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400873 *
874 * LOCKING:
875 * Inherited from caller.
876 */
Mark Lordb5624682008-03-31 19:34:40 -0400877static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400878{
Mark Lordb5624682008-03-31 19:34:40 -0400879 int i;
Brett Russ31961942005-09-30 01:36:00 -0400880
Mark Lordb5624682008-03-31 19:34:40 -0400881 /* Disable eDMA. The disable bit auto clears. */
882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500883
Mark Lordb5624682008-03-31 19:34:40 -0400884 /* Wait for the chip to confirm eDMA is off. */
885 for (i = 10000; i > 0; i--) {
886 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400887 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400888 return 0;
889 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400890 }
Mark Lordb5624682008-03-31 19:34:40 -0400891 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400892}
893
Mark Lorde12bef52008-03-31 19:33:56 -0400894static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400895{
Mark Lordb5624682008-03-31 19:34:40 -0400896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400898
Mark Lordb5624682008-03-31 19:34:40 -0400899 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
900 return 0;
901 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
902 if (mv_stop_edma_engine(port_mmio)) {
903 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
904 return -EIO;
905 }
906 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400907}
908
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400909#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400910static void mv_dump_mem(void __iomem *start, unsigned bytes)
911{
Brett Russ31961942005-09-30 01:36:00 -0400912 int b, w;
913 for (b = 0; b < bytes; ) {
914 DPRINTK("%p: ", start + b);
915 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400916 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400917 b += sizeof(u32);
918 }
919 printk("\n");
920 }
Brett Russ31961942005-09-30 01:36:00 -0400921}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400922#endif
923
Brett Russ31961942005-09-30 01:36:00 -0400924static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
925{
926#ifdef ATA_DEBUG
927 int b, w;
928 u32 dw;
929 for (b = 0; b < bytes; ) {
930 DPRINTK("%02x: ", b);
931 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400932 (void) pci_read_config_dword(pdev, b, &dw);
933 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400934 b += sizeof(u32);
935 }
936 printk("\n");
937 }
938#endif
939}
940static void mv_dump_all_regs(void __iomem *mmio_base, int port,
941 struct pci_dev *pdev)
942{
943#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500944 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400945 port >> MV_PORT_HC_SHIFT);
946 void __iomem *port_base;
947 int start_port, num_ports, p, start_hc, num_hcs, hc;
948
949 if (0 > port) {
950 start_hc = start_port = 0;
951 num_ports = 8; /* shld be benign for 4 port devs */
952 num_hcs = 2;
953 } else {
954 start_hc = port >> MV_PORT_HC_SHIFT;
955 start_port = port;
956 num_ports = num_hcs = 1;
957 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500958 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400959 num_ports > 1 ? num_ports - 1 : start_port);
960
961 if (NULL != pdev) {
962 DPRINTK("PCI config space regs:\n");
963 mv_dump_pci_cfg(pdev, 0x68);
964 }
965 DPRINTK("PCI regs:\n");
966 mv_dump_mem(mmio_base+0xc00, 0x3c);
967 mv_dump_mem(mmio_base+0xd00, 0x34);
968 mv_dump_mem(mmio_base+0xf00, 0x4);
969 mv_dump_mem(mmio_base+0x1d00, 0x6c);
970 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700971 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400972 DPRINTK("HC regs (HC %i):\n", hc);
973 mv_dump_mem(hc_base, 0x1c);
974 }
975 for (p = start_port; p < start_port + num_ports; p++) {
976 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400977 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400978 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400979 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400980 mv_dump_mem(port_base+0x300, 0x60);
981 }
982#endif
983}
984
Brett Russ20f733e2005-09-01 18:26:17 -0400985static unsigned int mv_scr_offset(unsigned int sc_reg_in)
986{
987 unsigned int ofs;
988
989 switch (sc_reg_in) {
990 case SCR_STATUS:
991 case SCR_CONTROL:
992 case SCR_ERROR:
993 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
994 break;
995 case SCR_ACTIVE:
996 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
997 break;
998 default:
999 ofs = 0xffffffffU;
1000 break;
1001 }
1002 return ofs;
1003}
1004
Tejun Heoda3dbb12007-07-16 14:29:40 +09001005static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001006{
1007 unsigned int ofs = mv_scr_offset(sc_reg_in);
1008
Tejun Heoda3dbb12007-07-16 14:29:40 +09001009 if (ofs != 0xffffffffU) {
1010 *val = readl(mv_ap_base(ap) + ofs);
1011 return 0;
1012 } else
1013 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001014}
1015
Tejun Heoda3dbb12007-07-16 14:29:40 +09001016static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001017{
1018 unsigned int ofs = mv_scr_offset(sc_reg_in);
1019
Tejun Heoda3dbb12007-07-16 14:29:40 +09001020 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001021 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001022 return 0;
1023 } else
1024 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001025}
1026
Mark Lordf2738272008-01-26 18:32:29 -05001027static void mv6_dev_config(struct ata_device *adev)
1028{
1029 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001030 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1031 *
1032 * Gen-II does not support NCQ over a port multiplier
1033 * (no FIS-based switching).
1034 *
Mark Lordf2738272008-01-26 18:32:29 -05001035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1037 */
Mark Lorde49856d2008-04-16 14:59:07 -04001038 if (adev->flags & ATA_DFLAG_NCQ) {
1039 if (sata_pmp_attached(adev->link->ap))
1040 adev->flags &= ~ATA_DFLAG_NCQ;
1041 else if (adev->max_sectors > ATA_MAX_SECTORS)
Mark Lordf2738272008-01-26 18:32:29 -05001042 adev->max_sectors = ATA_MAX_SECTORS;
Mark Lorde49856d2008-04-16 14:59:07 -04001043 }
Mark Lordf2738272008-01-26 18:32:29 -05001044}
1045
Mark Lorde49856d2008-04-16 14:59:07 -04001046static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1047{
1048 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1049 /*
1050 * Various bit settings required for operation
1051 * in FIS-based switching (fbs) mode on GenIIe:
1052 */
1053 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1054 old_ltmode = readl(port_mmio + LTMODE_OFS);
1055 if (enable_fbs) {
1056 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1057 new_ltmode = old_ltmode | LTMODE_BIT8;
1058 } else { /* disable fbs */
1059 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1060 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1061 }
1062 if (new_fcfg != old_fcfg)
1063 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1064 if (new_ltmode != old_ltmode)
1065 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lord0c589122008-01-26 18:31:16 -05001066}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001067
Mark Lorde12bef52008-03-31 19:33:56 -04001068static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001069{
1070 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001071 struct mv_port_priv *pp = ap->private_data;
1072 struct mv_host_priv *hpriv = ap->host->private_data;
1073 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001074
1075 /* set up non-NCQ EDMA configuration */
1076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1077
1078 if (IS_GEN_I(hpriv))
1079 cfg |= (1 << 8); /* enab config burst size mask */
1080
1081 else if (IS_GEN_II(hpriv))
1082 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1083
1084 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001085 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001087 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001089
1090 if (want_ncq && sata_pmp_attached(ap)) {
1091 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1092 mv_config_fbs(port_mmio, 1);
1093 } else {
1094 mv_config_fbs(port_mmio, 0);
1095 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001096 }
1097
Mark Lord72109162008-01-26 18:31:33 -05001098 if (want_ncq) {
1099 cfg |= EDMA_CFG_NCQ;
1100 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1101 } else
1102 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1103
Jeff Garzike4e7b892006-01-31 12:18:41 -05001104 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1105}
1106
Mark Lordda2fa9b2008-01-26 18:32:45 -05001107static void mv_port_free_dma_mem(struct ata_port *ap)
1108{
1109 struct mv_host_priv *hpriv = ap->host->private_data;
1110 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001111 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001112
1113 if (pp->crqb) {
1114 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1115 pp->crqb = NULL;
1116 }
1117 if (pp->crpb) {
1118 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1119 pp->crpb = NULL;
1120 }
Mark Lordeb73d552008-01-29 13:24:00 -05001121 /*
1122 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1123 * For later hardware, we have one unique sg_tbl per NCQ tag.
1124 */
1125 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1126 if (pp->sg_tbl[tag]) {
1127 if (tag == 0 || !IS_GEN_I(hpriv))
1128 dma_pool_free(hpriv->sg_tbl_pool,
1129 pp->sg_tbl[tag],
1130 pp->sg_tbl_dma[tag]);
1131 pp->sg_tbl[tag] = NULL;
1132 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001133 }
1134}
1135
Brett Russ05b308e2005-10-05 17:08:53 -04001136/**
1137 * mv_port_start - Port specific init/start routine.
1138 * @ap: ATA channel to manipulate
1139 *
1140 * Allocate and point to DMA memory, init port private memory,
1141 * zero indices.
1142 *
1143 * LOCKING:
1144 * Inherited from caller.
1145 */
Brett Russ31961942005-09-30 01:36:00 -04001146static int mv_port_start(struct ata_port *ap)
1147{
Jeff Garzikcca39742006-08-24 03:19:22 -04001148 struct device *dev = ap->host->dev;
1149 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001150 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001151 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001152
Tejun Heo24dc5f32007-01-20 16:00:28 +09001153 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001154 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001155 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001156 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001157
Mark Lordda2fa9b2008-01-26 18:32:45 -05001158 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1159 if (!pp->crqb)
1160 return -ENOMEM;
1161 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001162
Mark Lordda2fa9b2008-01-26 18:32:45 -05001163 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1164 if (!pp->crpb)
1165 goto out_port_free_dma_mem;
1166 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001167
Mark Lordeb73d552008-01-29 13:24:00 -05001168 /*
1169 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1170 * For later hardware, we need one unique sg_tbl per NCQ tag.
1171 */
1172 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1173 if (tag == 0 || !IS_GEN_I(hpriv)) {
1174 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1175 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1176 if (!pp->sg_tbl[tag])
1177 goto out_port_free_dma_mem;
1178 } else {
1179 pp->sg_tbl[tag] = pp->sg_tbl[0];
1180 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1181 }
1182 }
Brett Russ31961942005-09-30 01:36:00 -04001183 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001184
1185out_port_free_dma_mem:
1186 mv_port_free_dma_mem(ap);
1187 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001188}
1189
Brett Russ05b308e2005-10-05 17:08:53 -04001190/**
1191 * mv_port_stop - Port specific cleanup/stop routine.
1192 * @ap: ATA channel to manipulate
1193 *
1194 * Stop DMA, cleanup port memory.
1195 *
1196 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001197 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001198 */
Brett Russ31961942005-09-30 01:36:00 -04001199static void mv_port_stop(struct ata_port *ap)
1200{
Mark Lorde12bef52008-03-31 19:33:56 -04001201 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001202 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001203}
1204
Brett Russ05b308e2005-10-05 17:08:53 -04001205/**
1206 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1207 * @qc: queued command whose SG list to source from
1208 *
1209 * Populate the SG list and mark the last entry.
1210 *
1211 * LOCKING:
1212 * Inherited from caller.
1213 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001214static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001215{
1216 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001217 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001218 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001219 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001220
Mark Lordeb73d552008-01-29 13:24:00 -05001221 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001222 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001223 dma_addr_t addr = sg_dma_address(sg);
1224 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001225
Olof Johansson4007b492007-10-02 20:45:27 -05001226 while (sg_len) {
1227 u32 offset = addr & 0xffff;
1228 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001229
Olof Johansson4007b492007-10-02 20:45:27 -05001230 if ((offset + sg_len > 0x10000))
1231 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001232
Olof Johansson4007b492007-10-02 20:45:27 -05001233 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1234 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001235 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001236
1237 sg_len -= len;
1238 addr += len;
1239
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001240 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001241 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001242 }
Brett Russ31961942005-09-30 01:36:00 -04001243 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001244
1245 if (likely(last_sg))
1246 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001247}
1248
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001249static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001250{
Mark Lord559eeda2006-05-19 16:40:15 -04001251 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001252 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001253 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001254}
1255
Brett Russ05b308e2005-10-05 17:08:53 -04001256/**
1257 * mv_qc_prep - Host specific command preparation.
1258 * @qc: queued command to prepare
1259 *
1260 * This routine simply redirects to the general purpose routine
1261 * if command is not DMA. Else, it handles prep of the CRQB
1262 * (command request block), does some sanity checking, and calls
1263 * the SG load routine.
1264 *
1265 * LOCKING:
1266 * Inherited from caller.
1267 */
Brett Russ31961942005-09-30 01:36:00 -04001268static void mv_qc_prep(struct ata_queued_cmd *qc)
1269{
1270 struct ata_port *ap = qc->ap;
1271 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001272 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001273 struct ata_taskfile *tf;
1274 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001275 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001276
Mark Lord138bfdd2008-01-26 18:33:18 -05001277 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1278 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001279 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001280
Brett Russ31961942005-09-30 01:36:00 -04001281 /* Fill in command request block
1282 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001283 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001284 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001285 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001286 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001287 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001288
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001291
Mark Lorda6432432006-05-19 16:36:36 -04001292 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001293 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001294 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001295 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001296 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1297
1298 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001299 tf = &qc->tf;
1300
1301 /* Sadly, the CRQB cannot accomodate all registers--there are
1302 * only 11 bytes...so we must pick and choose required
1303 * registers based on the command. So, we drop feature and
1304 * hob_feature for [RW] DMA commands, but they are needed for
1305 * NCQ. NCQ will drop hob_nsect.
1306 */
1307 switch (tf->command) {
1308 case ATA_CMD_READ:
1309 case ATA_CMD_READ_EXT:
1310 case ATA_CMD_WRITE:
1311 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001312 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001313 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1314 break;
Brett Russ31961942005-09-30 01:36:00 -04001315 case ATA_CMD_FPDMA_READ:
1316 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001317 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001318 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1319 break;
Brett Russ31961942005-09-30 01:36:00 -04001320 default:
1321 /* The only other commands EDMA supports in non-queued and
1322 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1323 * of which are defined/used by Linux. If we get here, this
1324 * driver needs work.
1325 *
1326 * FIXME: modify libata to give qc_prep a return value and
1327 * return error here.
1328 */
1329 BUG_ON(tf->command);
1330 break;
1331 }
1332 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1335 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1336 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1337 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1338 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1339 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1340 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1341
Jeff Garzike4e7b892006-01-31 12:18:41 -05001342 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001343 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001344 mv_fill_sg(qc);
1345}
1346
1347/**
1348 * mv_qc_prep_iie - Host specific command preparation.
1349 * @qc: queued command to prepare
1350 *
1351 * This routine simply redirects to the general purpose routine
1352 * if command is not DMA. Else, it handles prep of the CRQB
1353 * (command request block), does some sanity checking, and calls
1354 * the SG load routine.
1355 *
1356 * LOCKING:
1357 * Inherited from caller.
1358 */
1359static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1360{
1361 struct ata_port *ap = qc->ap;
1362 struct mv_port_priv *pp = ap->private_data;
1363 struct mv_crqb_iie *crqb;
1364 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001365 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001366 u32 flags = 0;
1367
Mark Lord138bfdd2008-01-26 18:33:18 -05001368 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1369 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001370 return;
1371
Mark Lorde12bef52008-03-31 19:33:56 -04001372 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001373 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1374 flags |= CRQB_FLAG_READ;
1375
Tejun Heobeec7db2006-02-11 19:11:13 +09001376 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001377 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001378 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001379 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001380
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001381 /* get current queue index from software */
1382 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001383
1384 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001385 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1386 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001387 crqb->flags = cpu_to_le32(flags);
1388
1389 tf = &qc->tf;
1390 crqb->ata_cmd[0] = cpu_to_le32(
1391 (tf->command << 16) |
1392 (tf->feature << 24)
1393 );
1394 crqb->ata_cmd[1] = cpu_to_le32(
1395 (tf->lbal << 0) |
1396 (tf->lbam << 8) |
1397 (tf->lbah << 16) |
1398 (tf->device << 24)
1399 );
1400 crqb->ata_cmd[2] = cpu_to_le32(
1401 (tf->hob_lbal << 0) |
1402 (tf->hob_lbam << 8) |
1403 (tf->hob_lbah << 16) |
1404 (tf->hob_feature << 24)
1405 );
1406 crqb->ata_cmd[3] = cpu_to_le32(
1407 (tf->nsect << 0) |
1408 (tf->hob_nsect << 8)
1409 );
1410
1411 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1412 return;
Brett Russ31961942005-09-30 01:36:00 -04001413 mv_fill_sg(qc);
1414}
1415
Brett Russ05b308e2005-10-05 17:08:53 -04001416/**
1417 * mv_qc_issue - Initiate a command to the host
1418 * @qc: queued command to start
1419 *
1420 * This routine simply redirects to the general purpose routine
1421 * if command is not DMA. Else, it sanity checks our local
1422 * caches of the request producer/consumer indices then enables
1423 * DMA and bumps the request producer index.
1424 *
1425 * LOCKING:
1426 * Inherited from caller.
1427 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001428static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001429{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001430 struct ata_port *ap = qc->ap;
1431 void __iomem *port_mmio = mv_ap_base(ap);
1432 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001433 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001434
Mark Lord138bfdd2008-01-26 18:33:18 -05001435 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1436 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001437 /*
1438 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001439 * port. Turn off EDMA so there won't be problems accessing
1440 * shadow block, etc registers.
1441 */
Mark Lordb5624682008-03-31 19:34:40 -04001442 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001443 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001444 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001445 }
1446
Mark Lord72109162008-01-26 18:31:33 -05001447 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001448
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001449 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001450
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001451 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001452
1453 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1455 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001456
1457 return 0;
1458}
1459
Brett Russ05b308e2005-10-05 17:08:53 -04001460/**
Brett Russ05b308e2005-10-05 17:08:53 -04001461 * mv_err_intr - Handle error interrupts on the port
1462 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001463 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001464 *
1465 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001466 * some cases require an eDMA reset, which also performs a COMRESET.
1467 * The SERR case requires a clear of pending errors in the SATA
1468 * SERROR register. Finally, if the port disabled DMA,
1469 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001470 *
1471 * LOCKING:
1472 * Inherited from caller.
1473 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001475{
Brett Russ31961942005-09-30 01:36:00 -04001476 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1478 struct mv_port_priv *pp = ap->private_data;
1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1481 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001482 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001483
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001485
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001486 if (!edma_enabled) {
1487 /* just a guess: do we need to do this? should we
1488 * expand this, and do it in all cases?
1489 */
Tejun Heo936fd732007-08-06 18:36:23 +09001490 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1491 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001492 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001493
1494 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1495
1496 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1497
1498 /*
1499 * all generations share these EDMA error cause bits
1500 */
1501
1502 if (edma_err_cause & EDMA_ERR_DEV)
1503 err_mask |= AC_ERR_DEV;
1504 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001505 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001506 EDMA_ERR_INTRL_PAR)) {
1507 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001508 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001509 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001510 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001511 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1512 ata_ehi_hotplugged(ehi);
1513 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001514 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001515 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001516 }
1517
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001518 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001519 eh_freeze_mask = EDMA_EH_FREEZE_5;
1520
1521 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001522 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001523 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001524 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001525 }
1526 } else {
1527 eh_freeze_mask = EDMA_EH_FREEZE;
1528
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001530 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001532 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001533 }
1534
1535 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001536 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1537 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001539 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540 }
1541 }
Brett Russ20f733e2005-09-01 18:26:17 -04001542
1543 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001544 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001545
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001546 if (!err_mask) {
1547 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001548 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001549 }
1550
1551 ehi->serror |= serr;
1552 ehi->action |= action;
1553
1554 if (qc)
1555 qc->err_mask |= err_mask;
1556 else
1557 ehi->err_mask |= err_mask;
1558
1559 if (edma_err_cause & eh_freeze_mask)
1560 ata_port_freeze(ap);
1561 else
1562 ata_port_abort(ap);
1563}
1564
1565static void mv_intr_pio(struct ata_port *ap)
1566{
1567 struct ata_queued_cmd *qc;
1568 u8 ata_status;
1569
1570 /* ignore spurious intr if drive still BUSY */
1571 ata_status = readb(ap->ioaddr.status_addr);
1572 if (unlikely(ata_status & ATA_BUSY))
1573 return;
1574
1575 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001576 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001577 if (unlikely(!qc)) /* no active tag */
1578 return;
1579 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1580 return;
1581
1582 /* and finally, complete the ATA command */
1583 qc->err_mask |= ac_err_mask(ata_status);
1584 ata_qc_complete(qc);
1585}
1586
1587static void mv_intr_edma(struct ata_port *ap)
1588{
1589 void __iomem *port_mmio = mv_ap_base(ap);
1590 struct mv_host_priv *hpriv = ap->host->private_data;
1591 struct mv_port_priv *pp = ap->private_data;
1592 struct ata_queued_cmd *qc;
1593 u32 out_index, in_index;
1594 bool work_done = false;
1595
1596 /* get h/w response queue pointer */
1597 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1598 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1599
1600 while (1) {
1601 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001602 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001603
1604 /* get s/w response queue last-read pointer, and compare */
1605 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1606 if (in_index == out_index)
1607 break;
1608
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001609 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001610 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001611 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001612
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001613 /* Gen II/IIE: get active ATA command via tag, to enable
1614 * support for queueing. this works transparently for
1615 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001616 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001617 else
1618 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001619
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001620 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001621
Mark Lordcb924412008-01-26 18:32:09 -05001622 /* For non-NCQ mode, the lower 8 bits of status
1623 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1624 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001625 */
1626 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001627 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001628 mv_err_intr(ap, qc);
1629 return;
1630 }
1631
1632 /* and finally, complete the ATA command */
1633 if (qc) {
1634 qc->err_mask |=
1635 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1636 ata_qc_complete(qc);
1637 }
1638
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001639 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001640 * indicate (after the loop completes) to hardware
1641 * that we have consumed a response queue entry.
1642 */
1643 work_done = true;
1644 pp->resp_idx++;
1645 }
1646
1647 if (work_done)
1648 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1649 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1650 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001651}
1652
Brett Russ05b308e2005-10-05 17:08:53 -04001653/**
1654 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001655 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001656 * @relevant: port error bits relevant to this host controller
1657 * @hc: which host controller we're to look at
1658 *
1659 * Read then write clear the HC interrupt status then walk each
1660 * port connected to the HC and see if it needs servicing. Port
1661 * success ints are reported in the HC interrupt status reg, the
1662 * port error ints are reported in the higher level main
1663 * interrupt status register and thus are passed in via the
1664 * 'relevant' argument.
1665 *
1666 * LOCKING:
1667 * Inherited from caller.
1668 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001669static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001670{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001671 struct mv_host_priv *hpriv = host->private_data;
1672 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001674 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001675 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001676
Jeff Garzik35177262007-02-24 21:26:42 -05001677 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001678 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001679 else
Brett Russ20f733e2005-09-01 18:26:17 -04001680 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001681
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001682 if (HAS_PCI(host))
1683 last_port = port0 + MV_PORTS_PER_HC;
1684 else
1685 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001686 /* we'll need the HC success int register in most cases */
1687 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001688 if (!hc_irq_cause)
1689 return;
1690
1691 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001692
1693 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001694 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001695
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001696 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001697 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001698 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001699 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001700
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001701 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001702 continue;
1703
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001704 pp = ap->private_data;
1705
Brett Russ31961942005-09-30 01:36:00 -04001706 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001707 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001708 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001709
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001710 have_err_bits = ((PORT0_ERR << shift) & relevant);
1711
1712 if (unlikely(have_err_bits)) {
1713 struct ata_queued_cmd *qc;
1714
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001715 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001716 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1717 continue;
1718
1719 mv_err_intr(ap, qc);
1720 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001721 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001722
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001723 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1724
1725 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1726 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1727 mv_intr_edma(ap);
1728 } else {
1729 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1730 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001731 }
1732 }
1733 VPRINTK("EXIT\n");
1734}
1735
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001736static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1737{
Mark Lord02a121d2007-12-01 13:07:22 -05001738 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001739 struct ata_port *ap;
1740 struct ata_queued_cmd *qc;
1741 struct ata_eh_info *ehi;
1742 unsigned int i, err_mask, printed = 0;
1743 u32 err_cause;
1744
Mark Lord02a121d2007-12-01 13:07:22 -05001745 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001746
1747 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1748 err_cause);
1749
1750 DPRINTK("All regs @ PCI error\n");
1751 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1752
Mark Lord02a121d2007-12-01 13:07:22 -05001753 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001754
1755 for (i = 0; i < host->n_ports; i++) {
1756 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001757 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001758 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001759 ata_ehi_clear_desc(ehi);
1760 if (!printed++)
1761 ata_ehi_push_desc(ehi,
1762 "PCI err cause 0x%08x", err_cause);
1763 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001764 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001765 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001766 if (qc)
1767 qc->err_mask |= err_mask;
1768 else
1769 ehi->err_mask |= err_mask;
1770
1771 ata_port_freeze(ap);
1772 }
1773 }
1774}
1775
Brett Russ05b308e2005-10-05 17:08:53 -04001776/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001777 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001778 * @irq: unused
1779 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001780 *
1781 * Read the read only register to determine if any host
1782 * controllers have pending interrupts. If so, call lower level
1783 * routine to handle. Also check for PCI errors which are only
1784 * reported here.
1785 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001786 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001787 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001788 * interrupts.
1789 */
David Howells7d12e782006-10-05 14:55:46 +01001790static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001791{
Jeff Garzikcca39742006-08-24 03:19:22 -04001792 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001793 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001794 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001795 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001796 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001797
Mark Lorde12bef52008-03-31 19:33:56 -04001798 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001799 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001800
1801 irq_stat = readl(hpriv->main_cause_reg_addr);
1802 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001803
1804 /* check the cases where we either have nothing pending or have read
1805 * a bogus register value which can indicate HW removal or PCI fault
1806 */
Mark Lord646a4da2008-01-26 18:30:37 -05001807 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1808 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001809
Jeff Garzikcca39742006-08-24 03:19:22 -04001810 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001811
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001812 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001813 mv_pci_error(host, mmio);
1814 handled = 1;
1815 goto out_unlock; /* skip all other HC irq handling */
1816 }
1817
Brett Russ20f733e2005-09-01 18:26:17 -04001818 for (hc = 0; hc < n_hcs; hc++) {
1819 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1820 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001821 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001822 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001823 }
1824 }
Mark Lord615ab952006-05-19 16:24:56 -04001825
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001826out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001827 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001828
1829 return IRQ_RETVAL(handled);
1830}
1831
Jeff Garzikc9d39132005-11-13 17:47:51 -05001832static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1833{
1834 unsigned int ofs;
1835
1836 switch (sc_reg_in) {
1837 case SCR_STATUS:
1838 case SCR_ERROR:
1839 case SCR_CONTROL:
1840 ofs = sc_reg_in * sizeof(u32);
1841 break;
1842 default:
1843 ofs = 0xffffffffU;
1844 break;
1845 }
1846 return ofs;
1847}
1848
Tejun Heoda3dbb12007-07-16 14:29:40 +09001849static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001850{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001851 struct mv_host_priv *hpriv = ap->host->private_data;
1852 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001853 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001854 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1855
Tejun Heoda3dbb12007-07-16 14:29:40 +09001856 if (ofs != 0xffffffffU) {
1857 *val = readl(addr + ofs);
1858 return 0;
1859 } else
1860 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001861}
1862
Tejun Heoda3dbb12007-07-16 14:29:40 +09001863static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001864{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001865 struct mv_host_priv *hpriv = ap->host->private_data;
1866 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001867 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001868 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1869
Tejun Heoda3dbb12007-07-16 14:29:40 +09001870 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001871 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001872 return 0;
1873 } else
1874 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001875}
1876
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001877static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001878{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001879 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001880 int early_5080;
1881
Auke Kok44c10132007-06-08 15:46:36 -07001882 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001883
1884 if (!early_5080) {
1885 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1886 tmp |= (1 << 0);
1887 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1888 }
1889
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001890 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001891}
1892
1893static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1894{
1895 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1896}
1897
Jeff Garzik47c2b672005-11-12 21:13:17 -05001898static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001899 void __iomem *mmio)
1900{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001901 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1902 u32 tmp;
1903
1904 tmp = readl(phy_mmio + MV5_PHY_MODE);
1905
1906 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1907 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001908}
1909
Jeff Garzik47c2b672005-11-12 21:13:17 -05001910static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001911{
Jeff Garzik522479f2005-11-12 22:14:02 -05001912 u32 tmp;
1913
1914 writel(0, mmio + MV_GPIO_PORT_CTL);
1915
1916 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1917
1918 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1919 tmp |= ~(1 << 0);
1920 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001921}
1922
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001923static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001925{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001926 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1927 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1928 u32 tmp;
1929 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1930
1931 if (fix_apm_sq) {
1932 tmp = readl(phy_mmio + MV5_LT_MODE);
1933 tmp |= (1 << 19);
1934 writel(tmp, phy_mmio + MV5_LT_MODE);
1935
1936 tmp = readl(phy_mmio + MV5_PHY_CTL);
1937 tmp &= ~0x3;
1938 tmp |= 0x1;
1939 writel(tmp, phy_mmio + MV5_PHY_CTL);
1940 }
1941
1942 tmp = readl(phy_mmio + MV5_PHY_MODE);
1943 tmp &= ~mask;
1944 tmp |= hpriv->signal[port].pre;
1945 tmp |= hpriv->signal[port].amps;
1946 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001947}
1948
Jeff Garzikc9d39132005-11-13 17:47:51 -05001949
1950#undef ZERO
1951#define ZERO(reg) writel(0, port_mmio + (reg))
1952static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001954{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001955 void __iomem *port_mmio = mv_port_base(mmio, port);
1956
Mark Lordb5624682008-03-31 19:34:40 -04001957 /*
1958 * The datasheet warns against setting ATA_RST when EDMA is active
1959 * (but doesn't say what the problem might be). So we first try
1960 * to disable the EDMA engine before doing the ATA_RST operation.
1961 */
Mark Lorde12bef52008-03-31 19:33:56 -04001962 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001963
1964 ZERO(0x028); /* command */
1965 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1966 ZERO(0x004); /* timer */
1967 ZERO(0x008); /* irq err cause */
1968 ZERO(0x00c); /* irq err mask */
1969 ZERO(0x010); /* rq bah */
1970 ZERO(0x014); /* rq inp */
1971 ZERO(0x018); /* rq outp */
1972 ZERO(0x01c); /* respq bah */
1973 ZERO(0x024); /* respq outp */
1974 ZERO(0x020); /* respq inp */
1975 ZERO(0x02c); /* test control */
1976 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1977}
1978#undef ZERO
1979
1980#define ZERO(reg) writel(0, hc_mmio + (reg))
1981static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1982 unsigned int hc)
1983{
1984 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1985 u32 tmp;
1986
1987 ZERO(0x00c);
1988 ZERO(0x010);
1989 ZERO(0x014);
1990 ZERO(0x018);
1991
1992 tmp = readl(hc_mmio + 0x20);
1993 tmp &= 0x1c1c1c1c;
1994 tmp |= 0x03030303;
1995 writel(tmp, hc_mmio + 0x20);
1996}
1997#undef ZERO
1998
1999static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2000 unsigned int n_hc)
2001{
2002 unsigned int hc, port;
2003
2004 for (hc = 0; hc < n_hc; hc++) {
2005 for (port = 0; port < MV_PORTS_PER_HC; port++)
2006 mv5_reset_hc_port(hpriv, mmio,
2007 (hc * MV_PORTS_PER_HC) + port);
2008
2009 mv5_reset_one_hc(hpriv, mmio, hc);
2010 }
2011
2012 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002013}
2014
Jeff Garzik101ffae2005-11-12 22:17:49 -05002015#undef ZERO
2016#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002017static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002018{
Mark Lord02a121d2007-12-01 13:07:22 -05002019 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002020 u32 tmp;
2021
2022 tmp = readl(mmio + MV_PCI_MODE);
2023 tmp &= 0xff00ffff;
2024 writel(tmp, mmio + MV_PCI_MODE);
2025
2026 ZERO(MV_PCI_DISC_TIMER);
2027 ZERO(MV_PCI_MSI_TRIGGER);
2028 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2029 ZERO(HC_MAIN_IRQ_MASK_OFS);
2030 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002031 ZERO(hpriv->irq_cause_ofs);
2032 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002033 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2034 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2035 ZERO(MV_PCI_ERR_ATTRIBUTE);
2036 ZERO(MV_PCI_ERR_COMMAND);
2037}
2038#undef ZERO
2039
2040static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2041{
2042 u32 tmp;
2043
2044 mv5_reset_flash(hpriv, mmio);
2045
2046 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2047 tmp &= 0x3;
2048 tmp |= (1 << 5) | (1 << 6);
2049 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2050}
2051
2052/**
2053 * mv6_reset_hc - Perform the 6xxx global soft reset
2054 * @mmio: base address of the HBA
2055 *
2056 * This routine only applies to 6xxx parts.
2057 *
2058 * LOCKING:
2059 * Inherited from caller.
2060 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002061static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2062 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002063{
2064 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2065 int i, rc = 0;
2066 u32 t;
2067
2068 /* Following procedure defined in PCI "main command and status
2069 * register" table.
2070 */
2071 t = readl(reg);
2072 writel(t | STOP_PCI_MASTER, reg);
2073
2074 for (i = 0; i < 1000; i++) {
2075 udelay(1);
2076 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002077 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002078 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002079 }
2080 if (!(PCI_MASTER_EMPTY & t)) {
2081 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2082 rc = 1;
2083 goto done;
2084 }
2085
2086 /* set reset */
2087 i = 5;
2088 do {
2089 writel(t | GLOB_SFT_RST, reg);
2090 t = readl(reg);
2091 udelay(1);
2092 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2093
2094 if (!(GLOB_SFT_RST & t)) {
2095 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2096 rc = 1;
2097 goto done;
2098 }
2099
2100 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2101 i = 5;
2102 do {
2103 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2104 t = readl(reg);
2105 udelay(1);
2106 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2107
2108 if (GLOB_SFT_RST & t) {
2109 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2110 rc = 1;
2111 }
Mark Lord094e50b2008-04-16 15:01:19 -04002112 /*
2113 * Temporary: wait 3 seconds before port-probing can happen,
2114 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2115 * This can go away once hotplug is fully/correctly implemented.
2116 */
2117 if (rc == 0)
2118 msleep(3000);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002119done:
2120 return rc;
2121}
2122
Jeff Garzik47c2b672005-11-12 21:13:17 -05002123static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002124 void __iomem *mmio)
2125{
2126 void __iomem *port_mmio;
2127 u32 tmp;
2128
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002129 tmp = readl(mmio + MV_RESET_CFG);
2130 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002131 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002132 hpriv->signal[idx].pre = 0x1 << 5;
2133 return;
2134 }
2135
2136 port_mmio = mv_port_base(mmio, idx);
2137 tmp = readl(port_mmio + PHY_MODE2);
2138
2139 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2140 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2141}
2142
Jeff Garzik47c2b672005-11-12 21:13:17 -05002143static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002144{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002145 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002146}
2147
Jeff Garzikc9d39132005-11-13 17:47:51 -05002148static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002149 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002150{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002151 void __iomem *port_mmio = mv_port_base(mmio, port);
2152
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002154 int fix_phy_mode2 =
2155 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002156 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002157 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2158 u32 m2, tmp;
2159
2160 if (fix_phy_mode2) {
2161 m2 = readl(port_mmio + PHY_MODE2);
2162 m2 &= ~(1 << 16);
2163 m2 |= (1 << 31);
2164 writel(m2, port_mmio + PHY_MODE2);
2165
2166 udelay(200);
2167
2168 m2 = readl(port_mmio + PHY_MODE2);
2169 m2 &= ~((1 << 16) | (1 << 31));
2170 writel(m2, port_mmio + PHY_MODE2);
2171
2172 udelay(200);
2173 }
2174
2175 /* who knows what this magic does */
2176 tmp = readl(port_mmio + PHY_MODE3);
2177 tmp &= ~0x7F800000;
2178 tmp |= 0x2A800000;
2179 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002180
2181 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002182 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002183
2184 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002185
2186 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002187 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002188
Mark Lorde12bef52008-03-31 19:33:56 -04002189 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2191
2192 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002193
2194 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002195 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002196 }
2197
2198 /* Revert values of pre-emphasis and signal amps to the saved ones */
2199 m2 = readl(port_mmio + PHY_MODE2);
2200
2201 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002202 m2 |= hpriv->signal[port].amps;
2203 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002204 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002205
Jeff Garzike4e7b892006-01-31 12:18:41 -05002206 /* according to mvSata 3.6.1, some IIE values are fixed */
2207 if (IS_GEN_IIE(hpriv)) {
2208 m2 &= ~0xC30FF01F;
2209 m2 |= 0x0000900F;
2210 }
2211
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002212 writel(m2, port_mmio + PHY_MODE2);
2213}
2214
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002215/* TODO: use the generic LED interface to configure the SATA Presence */
2216/* & Acitivy LEDs on the board */
2217static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2218 void __iomem *mmio)
2219{
2220 return;
2221}
2222
2223static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2224 void __iomem *mmio)
2225{
2226 void __iomem *port_mmio;
2227 u32 tmp;
2228
2229 port_mmio = mv_port_base(mmio, idx);
2230 tmp = readl(port_mmio + PHY_MODE2);
2231
2232 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2233 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2234}
2235
2236#undef ZERO
2237#define ZERO(reg) writel(0, port_mmio + (reg))
2238static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2239 void __iomem *mmio, unsigned int port)
2240{
2241 void __iomem *port_mmio = mv_port_base(mmio, port);
2242
Mark Lordb5624682008-03-31 19:34:40 -04002243 /*
2244 * The datasheet warns against setting ATA_RST when EDMA is active
2245 * (but doesn't say what the problem might be). So we first try
2246 * to disable the EDMA engine before doing the ATA_RST operation.
2247 */
Mark Lorde12bef52008-03-31 19:33:56 -04002248 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002249
2250 ZERO(0x028); /* command */
2251 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2252 ZERO(0x004); /* timer */
2253 ZERO(0x008); /* irq err cause */
2254 ZERO(0x00c); /* irq err mask */
2255 ZERO(0x010); /* rq bah */
2256 ZERO(0x014); /* rq inp */
2257 ZERO(0x018); /* rq outp */
2258 ZERO(0x01c); /* respq bah */
2259 ZERO(0x024); /* respq outp */
2260 ZERO(0x020); /* respq inp */
2261 ZERO(0x02c); /* test control */
2262 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2263}
2264
2265#undef ZERO
2266
2267#define ZERO(reg) writel(0, hc_mmio + (reg))
2268static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2269 void __iomem *mmio)
2270{
2271 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2272
2273 ZERO(0x00c);
2274 ZERO(0x010);
2275 ZERO(0x014);
2276
2277}
2278
2279#undef ZERO
2280
2281static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2282 void __iomem *mmio, unsigned int n_hc)
2283{
2284 unsigned int port;
2285
2286 for (port = 0; port < hpriv->n_ports; port++)
2287 mv_soc_reset_hc_port(hpriv, mmio, port);
2288
2289 mv_soc_reset_one_hc(hpriv, mmio);
2290
2291 return 0;
2292}
2293
2294static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2295 void __iomem *mmio)
2296{
2297 return;
2298}
2299
2300static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2301{
2302 return;
2303}
2304
Mark Lordb67a1062008-03-31 19:35:13 -04002305static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2306{
2307 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2308
2309 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2310 if (want_gen2i)
2311 ifctl |= (1 << 7); /* enable gen2i speed */
2312 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2313}
2314
Mark Lordb5624682008-03-31 19:34:40 -04002315/*
2316 * Caller must ensure that EDMA is not active,
2317 * by first doing mv_stop_edma() where needed.
2318 */
Mark Lorde12bef52008-03-31 19:33:56 -04002319static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002320 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002321{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002322 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002323
Mark Lord0d8be5c2008-04-16 14:56:12 -04002324 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002325 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002326
Mark Lordb67a1062008-03-31 19:35:13 -04002327 if (!IS_GEN_I(hpriv)) {
2328 /* Enable 3.0gb/s link speed */
2329 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002330 }
Mark Lordb67a1062008-03-31 19:35:13 -04002331 /*
2332 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2333 * link, and physical layers. It resets all SATA interface registers
2334 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002335 */
Mark Lordb67a1062008-03-31 19:35:13 -04002336 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2337 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002338 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002339
Jeff Garzikc9d39132005-11-13 17:47:51 -05002340 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2341
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002342 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002343 mdelay(1);
2344}
2345
Mark Lorde49856d2008-04-16 14:59:07 -04002346static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002347{
Mark Lorde49856d2008-04-16 14:59:07 -04002348 if (sata_pmp_supported(ap)) {
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2351 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002352
Mark Lorde49856d2008-04-16 14:59:07 -04002353 if (old != pmp) {
2354 reg = (reg & ~0xf) | pmp;
2355 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2356 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09002357 }
Brett Russ20f733e2005-09-01 18:26:17 -04002358}
2359
Mark Lorde49856d2008-04-16 14:59:07 -04002360static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2361 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002362{
Mark Lorde49856d2008-04-16 14:59:07 -04002363 mv_pmp_select(link->ap, sata_srst_pmp(link));
2364 return sata_std_hardreset(link, class, deadline);
2365}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002366
Mark Lorde49856d2008-04-16 14:59:07 -04002367static int mv_softreset(struct ata_link *link, unsigned int *class,
2368 unsigned long deadline)
2369{
2370 mv_pmp_select(link->ap, sata_srst_pmp(link));
2371 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05002372}
2373
Tejun Heocc0680a2007-08-06 18:36:23 +09002374static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002375 unsigned long deadline)
2376{
Tejun Heocc0680a2007-08-06 18:36:23 +09002377 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002378 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002379 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002380 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002381 int rc, attempts = 0, extra = 0;
2382 u32 sstatus;
2383 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384
Mark Lorde12bef52008-03-31 19:33:56 -04002385 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002386 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387
Mark Lord0d8be5c2008-04-16 14:56:12 -04002388 /* Workaround for errata FEr SATA#10 (part 2) */
2389 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002390 const unsigned long *timing =
2391 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002392
Mark Lord17c5aab2008-04-16 14:56:51 -04002393 rc = sata_link_hardreset(link, timing, deadline + extra,
2394 &online, NULL);
2395 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002396 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002397 sata_scr_read(link, SCR_STATUS, &sstatus);
2398 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2399 /* Force 1.5gb/s link speed and try again */
2400 mv_setup_ifctl(mv_ap_base(ap), 0);
2401 if (time_after(jiffies + HZ, deadline))
2402 extra = HZ; /* only extend it once, max */
2403 }
2404 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405
Mark Lord17c5aab2008-04-16 14:56:51 -04002406 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002407}
2408
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002409static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002410{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002411 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002412 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2413 u32 tmp, mask;
2414 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002415
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002416 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002417
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002418 shift = ap->port_no * 2;
2419 if (hc > 0)
2420 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002421
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002422 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002423
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002424 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002425 tmp = readl(hpriv->main_mask_reg_addr);
2426 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427}
2428
2429static void mv_eh_thaw(struct ata_port *ap)
2430{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002431 struct mv_host_priv *hpriv = ap->host->private_data;
2432 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002433 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2434 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2435 void __iomem *port_mmio = mv_ap_base(ap);
2436 u32 tmp, mask, hc_irq_cause;
2437 unsigned int shift, hc_port_no = ap->port_no;
2438
2439 /* FIXME: handle coalescing completion events properly */
2440
2441 shift = ap->port_no * 2;
2442 if (hc > 0) {
2443 shift++;
2444 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002445 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002446
2447 mask = 0x3 << shift;
2448
2449 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2451
2452 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2454 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2455 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2456 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2457
2458 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002459 tmp = readl(hpriv->main_mask_reg_addr);
2460 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002461}
2462
Brett Russ05b308e2005-10-05 17:08:53 -04002463/**
2464 * mv_port_init - Perform some early initialization on a single port.
2465 * @port: libata data structure storing shadow register addresses
2466 * @port_mmio: base address of the port
2467 *
2468 * Initialize shadow register mmio addresses, clear outstanding
2469 * interrupts on the port, and unmask interrupts for the future
2470 * start of the port.
2471 *
2472 * LOCKING:
2473 * Inherited from caller.
2474 */
Brett Russ31961942005-09-30 01:36:00 -04002475static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2476{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002477 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002478 unsigned serr_ofs;
2479
Jeff Garzik8b260242005-11-12 12:32:50 -05002480 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002481 */
2482 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002483 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002484 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2485 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2486 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2487 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2488 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2489 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002490 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002491 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2492 /* special case: control/altstatus doesn't have ATA_REG_ address */
2493 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2494
2495 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002496 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002497
Brett Russ31961942005-09-30 01:36:00 -04002498 /* Clear any currently outstanding port interrupt conditions */
2499 serr_ofs = mv_scr_offset(SCR_ERROR);
2500 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2501 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2502
Mark Lord646a4da2008-01-26 18:30:37 -05002503 /* unmask all non-transient EDMA error interrupts */
2504 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002505
Jeff Garzik8b260242005-11-12 12:32:50 -05002506 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002507 readl(port_mmio + EDMA_CFG_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2509 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002510}
2511
Tejun Heo4447d352007-04-17 23:44:08 +09002512static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002513{
Tejun Heo4447d352007-04-17 23:44:08 +09002514 struct pci_dev *pdev = to_pci_dev(host->dev);
2515 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002516 u32 hp_flags = hpriv->hp_flags;
2517
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002518 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002519 case chip_5080:
2520 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002521 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002522
Auke Kok44c10132007-06-08 15:46:36 -07002523 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002524 case 0x1:
2525 hp_flags |= MV_HP_ERRATA_50XXB0;
2526 break;
2527 case 0x3:
2528 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 break;
2530 default:
2531 dev_printk(KERN_WARNING, &pdev->dev,
2532 "Applying 50XXB2 workarounds to unknown rev\n");
2533 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 break;
2535 }
2536 break;
2537
2538 case chip_504x:
2539 case chip_508x:
2540 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002541 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002542
Auke Kok44c10132007-06-08 15:46:36 -07002543 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002544 case 0x0:
2545 hp_flags |= MV_HP_ERRATA_50XXB0;
2546 break;
2547 case 0x3:
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 default:
2551 dev_printk(KERN_WARNING, &pdev->dev,
2552 "Applying B2 workarounds to unknown rev\n");
2553 hp_flags |= MV_HP_ERRATA_50XXB2;
2554 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002555 }
2556 break;
2557
2558 case chip_604x:
2559 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002560 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002561 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002562
Auke Kok44c10132007-06-08 15:46:36 -07002563 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002564 case 0x7:
2565 hp_flags |= MV_HP_ERRATA_60X1B2;
2566 break;
2567 case 0x9:
2568 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002569 break;
2570 default:
2571 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002572 "Applying B2 workarounds to unknown rev\n");
2573 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002574 break;
2575 }
2576 break;
2577
Jeff Garzike4e7b892006-01-31 12:18:41 -05002578 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002579 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002580 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2581 (pdev->device == 0x2300 || pdev->device == 0x2310))
2582 {
Mark Lord4e520032007-12-11 12:58:05 -05002583 /*
2584 * Highpoint RocketRAID PCIe 23xx series cards:
2585 *
2586 * Unconfigured drives are treated as "Legacy"
2587 * by the BIOS, and it overwrites sector 8 with
2588 * a "Lgcy" metadata block prior to Linux boot.
2589 *
2590 * Configured drives (RAID or JBOD) leave sector 8
2591 * alone, but instead overwrite a high numbered
2592 * sector for the RAID metadata. This sector can
2593 * be determined exactly, by truncating the physical
2594 * drive capacity to a nice even GB value.
2595 *
2596 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2597 *
2598 * Warn the user, lest they think we're just buggy.
2599 */
2600 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2601 " BIOS CORRUPTS DATA on all attached drives,"
2602 " regardless of if/how they are configured."
2603 " BEWARE!\n");
2604 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2605 " use sectors 8-9 on \"Legacy\" drives,"
2606 " and avoid the final two gigabytes on"
2607 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002608 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002609 case chip_6042:
2610 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002611 hp_flags |= MV_HP_GEN_IIE;
2612
Auke Kok44c10132007-06-08 15:46:36 -07002613 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002614 case 0x0:
2615 hp_flags |= MV_HP_ERRATA_XX42A0;
2616 break;
2617 case 0x1:
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2619 break;
2620 default:
2621 dev_printk(KERN_WARNING, &pdev->dev,
2622 "Applying 60X1C0 workarounds to unknown rev\n");
2623 hp_flags |= MV_HP_ERRATA_60X1C0;
2624 break;
2625 }
2626 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002627 case chip_soc:
2628 hpriv->ops = &mv_soc_ops;
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2630 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002631
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002632 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002633 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002634 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002635 return 1;
2636 }
2637
2638 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002639 if (hp_flags & MV_HP_PCIE) {
2640 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2643 } else {
2644 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2645 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2646 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2647 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002648
2649 return 0;
2650}
2651
Brett Russ05b308e2005-10-05 17:08:53 -04002652/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002653 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002654 * @host: ATA host to initialize
2655 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002656 *
2657 * If possible, do an early global reset of the host. Then do
2658 * our port init and clear/unmask all/relevant host interrupts.
2659 *
2660 * LOCKING:
2661 * Inherited from caller.
2662 */
Tejun Heo4447d352007-04-17 23:44:08 +09002663static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002664{
2665 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002666 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002667 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002668
Tejun Heo4447d352007-04-17 23:44:08 +09002669 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002670 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002671 goto done;
2672
2673 if (HAS_PCI(host)) {
2674 hpriv->main_cause_reg_addr = hpriv->base +
2675 HC_MAIN_IRQ_CAUSE_OFS;
2676 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2677 } else {
2678 hpriv->main_cause_reg_addr = hpriv->base +
2679 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2680 hpriv->main_mask_reg_addr = hpriv->base +
2681 HC_SOC_MAIN_IRQ_MASK_OFS;
2682 }
2683 /* global interrupt mask */
2684 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002685
Tejun Heo4447d352007-04-17 23:44:08 +09002686 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002687
Tejun Heo4447d352007-04-17 23:44:08 +09002688 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002689 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002690
Jeff Garzikc9d39132005-11-13 17:47:51 -05002691 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002692 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002693 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002694
Jeff Garzik522479f2005-11-12 22:14:02 -05002695 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002696 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002697 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002698
Tejun Heo4447d352007-04-17 23:44:08 +09002699 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002700 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002701 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002702
2703 mv_port_init(&ap->ioaddr, port_mmio);
2704
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002705#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002706 if (HAS_PCI(host)) {
2707 unsigned int offset = port_mmio - mmio;
2708 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2709 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2710 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002711#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002712 }
2713
2714 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002715 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2716
2717 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2718 "(before clear)=0x%08x\n", hc,
2719 readl(hc_mmio + HC_CFG_OFS),
2720 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2721
2722 /* Clear any currently outstanding hc interrupt conditions */
2723 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002724 }
2725
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002726 if (HAS_PCI(host)) {
2727 /* Clear any currently outstanding host interrupt conditions */
2728 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002729
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002730 /* and unmask interrupt generation for host regs */
2731 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2732 if (IS_GEN_I(hpriv))
2733 writelfl(~HC_MAIN_MASKED_IRQS_5,
2734 hpriv->main_mask_reg_addr);
2735 else
2736 writelfl(~HC_MAIN_MASKED_IRQS,
2737 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002738
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002739 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2740 "PCI int cause/mask=0x%08x/0x%08x\n",
2741 readl(hpriv->main_cause_reg_addr),
2742 readl(hpriv->main_mask_reg_addr),
2743 readl(mmio + hpriv->irq_cause_ofs),
2744 readl(mmio + hpriv->irq_mask_ofs));
2745 } else {
2746 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2747 hpriv->main_mask_reg_addr);
2748 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr));
2751 }
Brett Russ31961942005-09-30 01:36:00 -04002752done:
Brett Russ20f733e2005-09-01 18:26:17 -04002753 return rc;
2754}
2755
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002756static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2757{
2758 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2759 MV_CRQB_Q_SZ, 0);
2760 if (!hpriv->crqb_pool)
2761 return -ENOMEM;
2762
2763 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2764 MV_CRPB_Q_SZ, 0);
2765 if (!hpriv->crpb_pool)
2766 return -ENOMEM;
2767
2768 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2769 MV_SG_TBL_SZ, 0);
2770 if (!hpriv->sg_tbl_pool)
2771 return -ENOMEM;
2772
2773 return 0;
2774}
2775
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002776static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2777 struct mbus_dram_target_info *dram)
2778{
2779 int i;
2780
2781 for (i = 0; i < 4; i++) {
2782 writel(0, hpriv->base + WINDOW_CTRL(i));
2783 writel(0, hpriv->base + WINDOW_BASE(i));
2784 }
2785
2786 for (i = 0; i < dram->num_cs; i++) {
2787 struct mbus_dram_window *cs = dram->cs + i;
2788
2789 writel(((cs->size - 1) & 0xffff0000) |
2790 (cs->mbus_attr << 8) |
2791 (dram->mbus_dram_target_id << 4) | 1,
2792 hpriv->base + WINDOW_CTRL(i));
2793 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2794 }
2795}
2796
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002797/**
2798 * mv_platform_probe - handle a positive probe of an soc Marvell
2799 * host
2800 * @pdev: platform device found
2801 *
2802 * LOCKING:
2803 * Inherited from caller.
2804 */
2805static int mv_platform_probe(struct platform_device *pdev)
2806{
2807 static int printed_version;
2808 const struct mv_sata_platform_data *mv_platform_data;
2809 const struct ata_port_info *ppi[] =
2810 { &mv_port_info[chip_soc], NULL };
2811 struct ata_host *host;
2812 struct mv_host_priv *hpriv;
2813 struct resource *res;
2814 int n_ports, rc;
2815
2816 if (!printed_version++)
2817 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2818
2819 /*
2820 * Simple resource validation ..
2821 */
2822 if (unlikely(pdev->num_resources != 2)) {
2823 dev_err(&pdev->dev, "invalid number of resources\n");
2824 return -EINVAL;
2825 }
2826
2827 /*
2828 * Get the register base first
2829 */
2830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2831 if (res == NULL)
2832 return -EINVAL;
2833
2834 /* allocate host */
2835 mv_platform_data = pdev->dev.platform_data;
2836 n_ports = mv_platform_data->n_ports;
2837
2838 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2839 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2840
2841 if (!host || !hpriv)
2842 return -ENOMEM;
2843 host->private_data = hpriv;
2844 hpriv->n_ports = n_ports;
2845
2846 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002847 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2848 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002849 hpriv->base -= MV_SATAHC0_REG_BASE;
2850
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002851 /*
2852 * (Re-)program MBUS remapping windows if we are asked to.
2853 */
2854 if (mv_platform_data->dram != NULL)
2855 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2856
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002857 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2858 if (rc)
2859 return rc;
2860
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002861 /* initialize adapter */
2862 rc = mv_init_host(host, chip_soc);
2863 if (rc)
2864 return rc;
2865
2866 dev_printk(KERN_INFO, &pdev->dev,
2867 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2868 host->n_ports);
2869
2870 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2871 IRQF_SHARED, &mv6_sht);
2872}
2873
2874/*
2875 *
2876 * mv_platform_remove - unplug a platform interface
2877 * @pdev: platform device
2878 *
2879 * A platform bus SATA device has been unplugged. Perform the needed
2880 * cleanup. Also called on module unload for any active devices.
2881 */
2882static int __devexit mv_platform_remove(struct platform_device *pdev)
2883{
2884 struct device *dev = &pdev->dev;
2885 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002886
2887 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002888 return 0;
2889}
2890
2891static struct platform_driver mv_platform_driver = {
2892 .probe = mv_platform_probe,
2893 .remove = __devexit_p(mv_platform_remove),
2894 .driver = {
2895 .name = DRV_NAME,
2896 .owner = THIS_MODULE,
2897 },
2898};
2899
2900
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002901#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002902static int mv_pci_init_one(struct pci_dev *pdev,
2903 const struct pci_device_id *ent);
2904
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002905
2906static struct pci_driver mv_pci_driver = {
2907 .name = DRV_NAME,
2908 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002909 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002910 .remove = ata_pci_remove_one,
2911};
2912
2913/*
2914 * module options
2915 */
2916static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2917
2918
2919/* move to PCI layer or libata core? */
2920static int pci_go_64(struct pci_dev *pdev)
2921{
2922 int rc;
2923
2924 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2926 if (rc) {
2927 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2928 if (rc) {
2929 dev_printk(KERN_ERR, &pdev->dev,
2930 "64-bit DMA enable failed\n");
2931 return rc;
2932 }
2933 }
2934 } else {
2935 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2936 if (rc) {
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "32-bit DMA enable failed\n");
2939 return rc;
2940 }
2941 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2942 if (rc) {
2943 dev_printk(KERN_ERR, &pdev->dev,
2944 "32-bit consistent DMA enable failed\n");
2945 return rc;
2946 }
2947 }
2948
2949 return rc;
2950}
2951
Brett Russ05b308e2005-10-05 17:08:53 -04002952/**
2953 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002954 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002955 *
2956 * FIXME: complete this.
2957 *
2958 * LOCKING:
2959 * Inherited from caller.
2960 */
Tejun Heo4447d352007-04-17 23:44:08 +09002961static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002962{
Tejun Heo4447d352007-04-17 23:44:08 +09002963 struct pci_dev *pdev = to_pci_dev(host->dev);
2964 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002965 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002966 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002967
2968 /* Use this to determine the HW stepping of the chip so we know
2969 * what errata to workaround
2970 */
Brett Russ31961942005-09-30 01:36:00 -04002971 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2972 if (scc == 0)
2973 scc_s = "SCSI";
2974 else if (scc == 0x01)
2975 scc_s = "RAID";
2976 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002977 scc_s = "?";
2978
2979 if (IS_GEN_I(hpriv))
2980 gen = "I";
2981 else if (IS_GEN_II(hpriv))
2982 gen = "II";
2983 else if (IS_GEN_IIE(hpriv))
2984 gen = "IIE";
2985 else
2986 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002987
Jeff Garzika9524a72005-10-30 14:39:11 -05002988 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002989 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2990 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002991 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2992}
2993
Brett Russ05b308e2005-10-05 17:08:53 -04002994/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002995 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002996 * @pdev: PCI device found
2997 * @ent: PCI device ID entry for the matched host
2998 *
2999 * LOCKING:
3000 * Inherited from caller.
3001 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003002static int mv_pci_init_one(struct pci_dev *pdev,
3003 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003004{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003005 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003006 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003007 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3008 struct ata_host *host;
3009 struct mv_host_priv *hpriv;
3010 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003011
Jeff Garzika9524a72005-10-30 14:39:11 -05003012 if (!printed_version++)
3013 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003014
Tejun Heo4447d352007-04-17 23:44:08 +09003015 /* allocate host */
3016 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3017
3018 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3019 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3020 if (!host || !hpriv)
3021 return -ENOMEM;
3022 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003023 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003024
3025 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003026 rc = pcim_enable_device(pdev);
3027 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003028 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003029
Tejun Heo0d5ff562007-02-01 15:06:36 +09003030 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3031 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003032 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003033 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003034 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003035 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003036 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003037
Jeff Garzikd88184f2007-02-26 01:26:06 -05003038 rc = pci_go_64(pdev);
3039 if (rc)
3040 return rc;
3041
Mark Lordda2fa9b2008-01-26 18:32:45 -05003042 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3043 if (rc)
3044 return rc;
3045
Brett Russ20f733e2005-09-01 18:26:17 -04003046 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003047 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003048 if (rc)
3049 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003050
Brett Russ31961942005-09-30 01:36:00 -04003051 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003052 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003053 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003054
Brett Russ31961942005-09-30 01:36:00 -04003055 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003056 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003057
Tejun Heo4447d352007-04-17 23:44:08 +09003058 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003059 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003060 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003061 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003062}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003063#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003064
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003065static int mv_platform_probe(struct platform_device *pdev);
3066static int __devexit mv_platform_remove(struct platform_device *pdev);
3067
Brett Russ20f733e2005-09-01 18:26:17 -04003068static int __init mv_init(void)
3069{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003070 int rc = -ENODEV;
3071#ifdef CONFIG_PCI
3072 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003073 if (rc < 0)
3074 return rc;
3075#endif
3076 rc = platform_driver_register(&mv_platform_driver);
3077
3078#ifdef CONFIG_PCI
3079 if (rc < 0)
3080 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003081#endif
3082 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003083}
3084
3085static void __exit mv_exit(void)
3086{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003087#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003088 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003089#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003090 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003091}
3092
3093MODULE_AUTHOR("Brett Russ");
3094MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3095MODULE_LICENSE("GPL");
3096MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3097MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003098MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003099
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003100#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003101module_param(msi, int, 0444);
3102MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003103#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003104
Brett Russ20f733e2005-09-01 18:26:17 -04003105module_init(mv_init);
3106module_exit(mv_exit);