blob: cee78f9e9d1bd2000143afffc494004271d66547 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
Jeff Garzik4a05e202007-05-24 23:40:15 -040046
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040079#include <linux/mbus.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050081#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040082#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040084
85#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050086#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040087
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040099 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
Brett Russ20f733e2005-09-01 18:26:17 -0400105 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500106 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500124 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400126
Mark Lord352fab72008-04-19 14:43:42 -0400127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100136 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400137 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100138
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400143
Brett Russ31961942005-09-30 01:36:00 -0400144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 /* PCI interface registers */
160
Brett Russ31961942005-09-30 01:36:00 -0400161 PCI_COMMAND_OFS = 0xc00,
162
Brett Russ20f733e2005-09-01 18:26:17 -0400163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
Jeff Garzik522479f2005-11-12 22:14:02 -0500168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
Mark Lord02a121d2007-12-01 13:07:22 -0500183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500186
Brett Russ20f733e2005-09-01 18:26:17 -0400187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Mark Lord352fab72008-04-19 14:43:42 -0400191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Mark Lordf9f7fe02008-04-19 14:44:42 -0400208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400214
215 /* SATAHC registers */
216 HC_CFG_OFS = 0,
217
218 HC_IRQ_CAUSE_OFS = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400221 DEV_IRQ = (1 << 8), /* shift by port # */
222
223 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400224 SHD_BLK_OFS = 0x100,
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 /* SATA registers */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231
Mark Lorde12bef52008-03-31 19:33:56 -0400232 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234
Jeff Garzik47c2b672005-11-12 21:13:17 -0500235 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500236 PHY_MODE4 = 0x314,
237 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241
Mark Lorde12bef52008-03-31 19:33:56 -0400242 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244
Jeff Garzikc9d39132005-11-13 17:47:51 -0500245 MV5_PHY_MODE = 0x74,
246 MV5_LT_MODE = 0x30,
247 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400248 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500249
250 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400251
252 /* Port registers */
253 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400261
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500278
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500295
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500299
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500306
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
308 EDMA_ERR_PRD_PAR |
309 EDMA_ERR_DEV_DCON |
310 EDMA_ERR_DEV_CON |
311 EDMA_ERR_SERR |
312 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400313 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400314 EDMA_ERR_CRPB_PAR |
315 EDMA_ERR_INTRL_PAR |
316 EDMA_ERR_IORDY |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400321
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
323 EDMA_ERR_PRD_PAR |
324 EDMA_ERR_DEV_DCON |
325 EDMA_ERR_DEV_CON |
326 EDMA_ERR_OVERRUN_5 |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400329 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400330 EDMA_ERR_CRPB_PAR |
331 EDMA_ERR_INTRL_PAR |
332 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400333
Brett Russ31961942005-09-30 01:36:00 -0400334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400336
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
339
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400343 EDMA_RSP_Q_PTR_SHIFT = 3,
344
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400349
Jeff Garzikc9d39132005-11-13 17:47:51 -0500350 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500351 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500352
Mark Lord352fab72008-04-19 14:43:42 -0400353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354
Brett Russ31961942005-09-30 01:36:00 -0400355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400366
Brett Russ31961942005-09-30 01:36:00 -0400367 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400370};
371
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400372#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500374#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100375#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500376
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
379
Jeff Garzik095fec82005-11-12 09:50:49 -0500380enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
383 */
384 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500385
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
388 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400391 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
393};
394
Jeff Garzik522479f2005-11-12 22:14:02 -0500395enum chip_type {
396 chip_504x,
397 chip_508x,
398 chip_5080,
399 chip_604x,
400 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500401 chip_6042,
402 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500403 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500404};
405
Brett Russ31961942005-09-30 01:36:00 -0400406/* Command ReQuest Block: 32B */
407struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 sg_addr;
409 __le32 sg_addr_hi;
410 __le16 ctrl_flags;
411 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400412};
413
Jeff Garzike4e7b892006-01-31 12:18:41 -0500414struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400415 __le32 addr;
416 __le32 addr_hi;
417 __le32 flags;
418 __le32 len;
419 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500420};
421
Brett Russ31961942005-09-30 01:36:00 -0400422/* Command ResPonse Block: 8B */
423struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le16 id;
425 __le16 flags;
426 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400427};
428
429/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400431 __le32 addr;
432 __le32 flags_size;
433 __le32 addr_hi;
434 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400435};
436
437struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400438 struct mv_crqb *crqb;
439 dma_addr_t crqb_dma;
440 struct mv_crpb *crpb;
441 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400444
445 unsigned int req_idx;
446 unsigned int resp_idx;
447
Brett Russ31961942005-09-30 01:36:00 -0400448 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400449};
450
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500451struct mv_port_signal {
452 u32 amps;
453 u32 pre;
454};
455
Mark Lord02a121d2007-12-01 13:07:22 -0500456struct mv_host_priv {
457 u32 hp_flags;
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500460 int n_ports;
461 void __iomem *base;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500464 u32 irq_cause_ofs;
465 u32 irq_mask_ofs;
466 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500467 /*
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
471 */
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500475};
476
Jeff Garzik47c2b672005-11-12 21:13:17 -0500477struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487};
488
Tejun Heoda3dbb12007-07-16 14:29:40 +0900489static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap);
495static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500496static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900498static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400500static void mv_eh_freeze(struct ata_port *ap);
501static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500502static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400503
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500504static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500511static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100512static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500513
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500514static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500516static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500519static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500521static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500522static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 void __iomem *mmio);
524static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
526static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 void __iomem *mmio);
530static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100531static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400532static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500533 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400534static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400535static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400536static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500537
Mark Lorde49856d2008-04-16 14:59:07 -0400538static void mv_pmp_select(struct ata_port *ap, int pmp);
539static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
Brett Russ20f733e2005-09-01 18:26:17 -0400543
Mark Lordeb73d552008-01-29 13:24:00 -0500544/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
547 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400548static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900549 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400550 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400551 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400552};
553
554static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900555 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500556 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400557 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400558 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400559};
560
Tejun Heo029cfd62008-03-25 12:22:49 +0900561static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500563
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
566
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400567 .freeze = mv_eh_freeze,
568 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900569 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900571 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400572
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578};
579
Tejun Heo029cfd62008-03-25 12:22:49 +0900580static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Mark Lordf2738272008-01-26 18:32:29 -0500583 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
586
Mark Lorde49856d2008-04-16 14:59:07 -0400587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e2005-09-01 18:26:17 -0400591};
592
Tejun Heo029cfd62008-03-25 12:22:49 +0900593static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900596 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500598};
599
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100600static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400601 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400602 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500605 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400606 },
607 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400609 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400610 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500611 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400612 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500613 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500615 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400616 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500617 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500618 },
Brett Russ20f733e2005-09-01 18:26:17 -0400619 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500622 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400623 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400624 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500625 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400626 },
627 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400631 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400632 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500633 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400634 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500638 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .port_ops = &mv_iie_ops,
642 },
643 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500646 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400648 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500649 .port_ops = &mv_iie_ops,
650 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500651 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400654 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500658 },
Brett Russ20f733e2005-09-01 18:26:17 -0400659};
660
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500661static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400669
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500675
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200678 /* Adaptec 1430SA */
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680
Mark Lord02a121d2007-12-01 13:07:22 -0500681 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683
Mark Lord02a121d2007-12-01 13:07:22 -0500684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400688 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400689};
690
Jeff Garzik47c2b672005-11-12 21:13:17 -0500691static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500698};
699
700static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500707};
708
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500709static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
716};
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718/*
719 * Functions
720 */
721
722static inline void writelfl(unsigned long data, void __iomem *addr)
723{
724 writel(data, addr);
725 (void) readl(addr); /* flush to avoid PCI posted write */
726}
727
Jeff Garzikc9d39132005-11-13 17:47:51 -0500728static inline unsigned int mv_hc_from_port(unsigned int port)
729{
730 return port >> MV_PORT_HC_SHIFT;
731}
732
733static inline unsigned int mv_hardport_from_port(unsigned int port)
734{
735 return port & MV_PORT_MASK;
736}
737
Mark Lord1cfd19a2008-04-19 15:05:50 -0400738/*
739 * Consolidate some rather tricky bit shift calculations.
740 * This is hot-path stuff, so not a function.
741 * Simple code, with two return values, so macro rather than inline.
742 *
743 * port is the sole input, in range 0..7.
744 * shift is one output, for use with the main_cause and main_mask registers.
745 * hardport is the other output, in range 0..3
746 *
747 * Note that port and hardport may be the same variable in some cases.
748 */
749#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
750{ \
751 shift = mv_hc_from_port(port) * HC_SHIFT; \
752 hardport = mv_hardport_from_port(port); \
753 shift += hardport * 2; \
754}
755
Mark Lord352fab72008-04-19 14:43:42 -0400756static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757{
758 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
759}
760
Jeff Garzikc9d39132005-11-13 17:47:51 -0500761static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 unsigned int port)
763{
764 return mv_hc_base(base, mv_hc_from_port(port));
765}
766
Brett Russ20f733e2005-09-01 18:26:17 -0400767static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500769 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500770 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500771 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400772}
773
Mark Lorde12bef52008-03-31 19:33:56 -0400774static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775{
776 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778
779 return hc_mmio + ofs;
780}
781
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500782static inline void __iomem *mv_host_base(struct ata_host *host)
783{
784 struct mv_host_priv *hpriv = host->private_data;
785 return hpriv->base;
786}
787
Brett Russ20f733e2005-09-01 18:26:17 -0400788static inline void __iomem *mv_ap_base(struct ata_port *ap)
789{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500790 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400791}
792
Jeff Garzikcca39742006-08-24 03:19:22 -0400793static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400794{
Jeff Garzikcca39742006-08-24 03:19:22 -0400795 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400796}
797
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 struct mv_host_priv *hpriv,
800 struct mv_port_priv *pp)
801{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400802 u32 index;
803
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400804 /*
805 * initialize request queue
806 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400807 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
808 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400809
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 WARN_ON(pp->crqb_dma & 0x3ff);
811 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400812 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400813 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814
815 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400820
821 /*
822 * initialize response queue
823 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400824 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
825 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400826
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400827 WARN_ON(pp->crpb_dma & 0xff);
828 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829
830 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400831 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400832 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
833 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400836 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400837 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400838}
839
Brett Russ05b308e2005-10-05 17:08:53 -0400840/**
841 * mv_start_dma - Enable eDMA engine
842 * @base: port base address
843 * @pp: port private data
844 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900845 * Verify the local cache of the eDMA state is accurate with a
846 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400847 *
848 * LOCKING:
849 * Inherited from caller.
850 */
Mark Lord0c589122008-01-26 18:31:16 -0500851static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500852 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400853{
Mark Lord72109162008-01-26 18:31:33 -0500854 int want_ncq = (protocol == ATA_PROT_NCQ);
855
856 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
857 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
858 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400859 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500860 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400861 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500862 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord352fab72008-04-19 14:43:42 -0400863 int hardport = mv_hardport_from_port(ap->port_no);
Mark Lord0c589122008-01-26 18:31:16 -0500864 void __iomem *hc_mmio = mv_hc_base_from_port(
Mark Lord352fab72008-04-19 14:43:42 -0400865 mv_host_base(ap->host), hardport);
Mark Lord0c589122008-01-26 18:31:16 -0500866 u32 hc_irq_cause, ipending;
867
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400868 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500869 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400870
Mark Lord0c589122008-01-26 18:31:16 -0500871 /* clear EDMA interrupt indicator, if any */
872 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord352fab72008-04-19 14:43:42 -0400873 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
Mark Lord0c589122008-01-26 18:31:16 -0500874 if (hc_irq_cause & ipending) {
875 writelfl(hc_irq_cause & ~ipending,
876 hc_mmio + HC_IRQ_CAUSE_OFS);
877 }
878
Mark Lorde12bef52008-03-31 19:33:56 -0400879 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500880
881 /* clear FIS IRQ Cause */
882 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
883
Mark Lordf630d562008-01-26 18:31:00 -0500884 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400885
Mark Lordf630d562008-01-26 18:31:00 -0500886 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400887 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
888 }
Brett Russ31961942005-09-30 01:36:00 -0400889}
890
Brett Russ05b308e2005-10-05 17:08:53 -0400891/**
Mark Lorde12bef52008-03-31 19:33:56 -0400892 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400893 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400894 *
895 * LOCKING:
896 * Inherited from caller.
897 */
Mark Lordb5624682008-03-31 19:34:40 -0400898static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400899{
Mark Lordb5624682008-03-31 19:34:40 -0400900 int i;
Brett Russ31961942005-09-30 01:36:00 -0400901
Mark Lordb5624682008-03-31 19:34:40 -0400902 /* Disable eDMA. The disable bit auto clears. */
903 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500904
Mark Lordb5624682008-03-31 19:34:40 -0400905 /* Wait for the chip to confirm eDMA is off. */
906 for (i = 10000; i > 0; i--) {
907 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400908 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400909 return 0;
910 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400911 }
Mark Lordb5624682008-03-31 19:34:40 -0400912 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400913}
914
Mark Lorde12bef52008-03-31 19:33:56 -0400915static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400916{
Mark Lordb5624682008-03-31 19:34:40 -0400917 void __iomem *port_mmio = mv_ap_base(ap);
918 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400919
Mark Lordb5624682008-03-31 19:34:40 -0400920 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
921 return 0;
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 if (mv_stop_edma_engine(port_mmio)) {
924 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
925 return -EIO;
926 }
927 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400928}
929
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400930#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400931static void mv_dump_mem(void __iomem *start, unsigned bytes)
932{
Brett Russ31961942005-09-30 01:36:00 -0400933 int b, w;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400937 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400938 b += sizeof(u32);
939 }
940 printk("\n");
941 }
Brett Russ31961942005-09-30 01:36:00 -0400942}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400943#endif
944
Brett Russ31961942005-09-30 01:36:00 -0400945static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946{
947#ifdef ATA_DEBUG
948 int b, w;
949 u32 dw;
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400953 (void) pci_read_config_dword(pdev, b, &dw);
954 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400955 b += sizeof(u32);
956 }
957 printk("\n");
958 }
959#endif
960}
961static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
963{
964#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500965 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
969
970 if (0 > port) {
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
973 num_hcs = 2;
974 } else {
975 start_hc = port >> MV_PORT_HC_SHIFT;
976 start_port = port;
977 num_ports = num_hcs = 1;
978 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400980 num_ports > 1 ? num_ports - 1 : start_port);
981
982 if (NULL != pdev) {
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
985 }
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700992 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
995 }
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400998 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400999 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001000 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001001 mv_dump_mem(port_base+0x300, 0x60);
1002 }
1003#endif
1004}
1005
Brett Russ20f733e2005-09-01 18:26:17 -04001006static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007{
1008 unsigned int ofs;
1009
1010 switch (sc_reg_in) {
1011 case SCR_STATUS:
1012 case SCR_CONTROL:
1013 case SCR_ERROR:
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 break;
1016 case SCR_ACTIVE:
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1018 break;
1019 default:
1020 ofs = 0xffffffffU;
1021 break;
1022 }
1023 return ofs;
1024}
1025
Tejun Heoda3dbb12007-07-16 14:29:40 +09001026static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001027{
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1029
Tejun Heoda3dbb12007-07-16 14:29:40 +09001030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1032 return 0;
1033 } else
1034 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001035}
1036
Tejun Heoda3dbb12007-07-16 14:29:40 +09001037static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001038{
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040
Tejun Heoda3dbb12007-07-16 14:29:40 +09001041 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001042 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001043 return 0;
1044 } else
1045 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001046}
1047
Mark Lordf2738272008-01-26 18:32:29 -05001048static void mv6_dev_config(struct ata_device *adev)
1049{
1050 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001051 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1052 *
1053 * Gen-II does not support NCQ over a port multiplier
1054 * (no FIS-based switching).
1055 *
Mark Lordf2738272008-01-26 18:32:29 -05001056 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1057 * See mv_qc_prep() for more info.
1058 */
Mark Lorde49856d2008-04-16 14:59:07 -04001059 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001060 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001061 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001062 ata_dev_printk(adev, KERN_INFO,
1063 "NCQ disabled for command-based switching\n");
1064 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1065 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1066 ata_dev_printk(adev, KERN_INFO,
1067 "max_sectors limited to %u for NCQ\n",
1068 adev->max_sectors);
1069 }
Mark Lorde49856d2008-04-16 14:59:07 -04001070 }
Mark Lordf2738272008-01-26 18:32:29 -05001071}
1072
Mark Lorde49856d2008-04-16 14:59:07 -04001073static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1074{
1075 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1076 /*
1077 * Various bit settings required for operation
1078 * in FIS-based switching (fbs) mode on GenIIe:
1079 */
1080 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1081 old_ltmode = readl(port_mmio + LTMODE_OFS);
1082 if (enable_fbs) {
1083 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1084 new_ltmode = old_ltmode | LTMODE_BIT8;
1085 } else { /* disable fbs */
1086 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1087 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1088 }
1089 if (new_fcfg != old_fcfg)
1090 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1091 if (new_ltmode != old_ltmode)
1092 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lord0c589122008-01-26 18:31:16 -05001093}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001094
Mark Lorde12bef52008-03-31 19:33:56 -04001095static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001096{
1097 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001098 struct mv_port_priv *pp = ap->private_data;
1099 struct mv_host_priv *hpriv = ap->host->private_data;
1100 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001101
1102 /* set up non-NCQ EDMA configuration */
1103 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1104
1105 if (IS_GEN_I(hpriv))
1106 cfg |= (1 << 8); /* enab config burst size mask */
1107
1108 else if (IS_GEN_II(hpriv))
1109 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1110
1111 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001112 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1113 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001114 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001115 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001116
1117 if (want_ncq && sata_pmp_attached(ap)) {
1118 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1119 mv_config_fbs(port_mmio, 1);
1120 } else {
1121 mv_config_fbs(port_mmio, 0);
1122 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001123 }
1124
Mark Lord72109162008-01-26 18:31:33 -05001125 if (want_ncq) {
1126 cfg |= EDMA_CFG_NCQ;
1127 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1128 } else
1129 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1130
Jeff Garzike4e7b892006-01-31 12:18:41 -05001131 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1132}
1133
Mark Lordda2fa9b2008-01-26 18:32:45 -05001134static void mv_port_free_dma_mem(struct ata_port *ap)
1135{
1136 struct mv_host_priv *hpriv = ap->host->private_data;
1137 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001138 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001139
1140 if (pp->crqb) {
1141 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1142 pp->crqb = NULL;
1143 }
1144 if (pp->crpb) {
1145 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1146 pp->crpb = NULL;
1147 }
Mark Lordeb73d552008-01-29 13:24:00 -05001148 /*
1149 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1150 * For later hardware, we have one unique sg_tbl per NCQ tag.
1151 */
1152 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1153 if (pp->sg_tbl[tag]) {
1154 if (tag == 0 || !IS_GEN_I(hpriv))
1155 dma_pool_free(hpriv->sg_tbl_pool,
1156 pp->sg_tbl[tag],
1157 pp->sg_tbl_dma[tag]);
1158 pp->sg_tbl[tag] = NULL;
1159 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001160 }
1161}
1162
Brett Russ05b308e2005-10-05 17:08:53 -04001163/**
1164 * mv_port_start - Port specific init/start routine.
1165 * @ap: ATA channel to manipulate
1166 *
1167 * Allocate and point to DMA memory, init port private memory,
1168 * zero indices.
1169 *
1170 * LOCKING:
1171 * Inherited from caller.
1172 */
Brett Russ31961942005-09-30 01:36:00 -04001173static int mv_port_start(struct ata_port *ap)
1174{
Jeff Garzikcca39742006-08-24 03:19:22 -04001175 struct device *dev = ap->host->dev;
1176 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001177 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001178 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001179
Tejun Heo24dc5f32007-01-20 16:00:28 +09001180 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001181 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001182 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001183 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001184
Mark Lordda2fa9b2008-01-26 18:32:45 -05001185 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1186 if (!pp->crqb)
1187 return -ENOMEM;
1188 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001189
Mark Lordda2fa9b2008-01-26 18:32:45 -05001190 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1191 if (!pp->crpb)
1192 goto out_port_free_dma_mem;
1193 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001194
Mark Lordeb73d552008-01-29 13:24:00 -05001195 /*
1196 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1197 * For later hardware, we need one unique sg_tbl per NCQ tag.
1198 */
1199 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1200 if (tag == 0 || !IS_GEN_I(hpriv)) {
1201 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1202 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1203 if (!pp->sg_tbl[tag])
1204 goto out_port_free_dma_mem;
1205 } else {
1206 pp->sg_tbl[tag] = pp->sg_tbl[0];
1207 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1208 }
1209 }
Brett Russ31961942005-09-30 01:36:00 -04001210 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001211
1212out_port_free_dma_mem:
1213 mv_port_free_dma_mem(ap);
1214 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001215}
1216
Brett Russ05b308e2005-10-05 17:08:53 -04001217/**
1218 * mv_port_stop - Port specific cleanup/stop routine.
1219 * @ap: ATA channel to manipulate
1220 *
1221 * Stop DMA, cleanup port memory.
1222 *
1223 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001224 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001225 */
Brett Russ31961942005-09-30 01:36:00 -04001226static void mv_port_stop(struct ata_port *ap)
1227{
Mark Lorde12bef52008-03-31 19:33:56 -04001228 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001229 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001230}
1231
Brett Russ05b308e2005-10-05 17:08:53 -04001232/**
1233 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234 * @qc: queued command whose SG list to source from
1235 *
1236 * Populate the SG list and mark the last entry.
1237 *
1238 * LOCKING:
1239 * Inherited from caller.
1240 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001241static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001242{
1243 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001244 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001245 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001246 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001247
Mark Lordeb73d552008-01-29 13:24:00 -05001248 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001249 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001250 dma_addr_t addr = sg_dma_address(sg);
1251 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001252
Olof Johansson4007b492007-10-02 20:45:27 -05001253 while (sg_len) {
1254 u32 offset = addr & 0xffff;
1255 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001256
Olof Johansson4007b492007-10-02 20:45:27 -05001257 if ((offset + sg_len > 0x10000))
1258 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001259
Olof Johansson4007b492007-10-02 20:45:27 -05001260 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001262 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001263
1264 sg_len -= len;
1265 addr += len;
1266
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001267 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001268 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001269 }
Brett Russ31961942005-09-30 01:36:00 -04001270 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001271
1272 if (likely(last_sg))
1273 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001274}
1275
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001276static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001277{
Mark Lord559eeda2006-05-19 16:40:15 -04001278 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001279 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001280 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001281}
1282
Brett Russ05b308e2005-10-05 17:08:53 -04001283/**
1284 * mv_qc_prep - Host specific command preparation.
1285 * @qc: queued command to prepare
1286 *
1287 * This routine simply redirects to the general purpose routine
1288 * if command is not DMA. Else, it handles prep of the CRQB
1289 * (command request block), does some sanity checking, and calls
1290 * the SG load routine.
1291 *
1292 * LOCKING:
1293 * Inherited from caller.
1294 */
Brett Russ31961942005-09-30 01:36:00 -04001295static void mv_qc_prep(struct ata_queued_cmd *qc)
1296{
1297 struct ata_port *ap = qc->ap;
1298 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001299 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001300 struct ata_taskfile *tf;
1301 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001302 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001303
Mark Lord138bfdd2008-01-26 18:33:18 -05001304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001306 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001307
Brett Russ31961942005-09-30 01:36:00 -04001308 /* Fill in command request block
1309 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001310 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001311 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001312 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001313 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001314 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001315
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001316 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001317 in_index = pp->req_idx;
Brett Russ31961942005-09-30 01:36:00 -04001318
Mark Lorda6432432006-05-19 16:36:36 -04001319 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001320 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001321 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001322 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001323 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1324
1325 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001326 tf = &qc->tf;
1327
1328 /* Sadly, the CRQB cannot accomodate all registers--there are
1329 * only 11 bytes...so we must pick and choose required
1330 * registers based on the command. So, we drop feature and
1331 * hob_feature for [RW] DMA commands, but they are needed for
1332 * NCQ. NCQ will drop hob_nsect.
1333 */
1334 switch (tf->command) {
1335 case ATA_CMD_READ:
1336 case ATA_CMD_READ_EXT:
1337 case ATA_CMD_WRITE:
1338 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001339 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001340 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1341 break;
Brett Russ31961942005-09-30 01:36:00 -04001342 case ATA_CMD_FPDMA_READ:
1343 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001344 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001345 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 break;
Brett Russ31961942005-09-30 01:36:00 -04001347 default:
1348 /* The only other commands EDMA supports in non-queued and
1349 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1350 * of which are defined/used by Linux. If we get here, this
1351 * driver needs work.
1352 *
1353 * FIXME: modify libata to give qc_prep a return value and
1354 * return error here.
1355 */
1356 BUG_ON(tf->command);
1357 break;
1358 }
1359 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1364 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1366 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1367 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1368
Jeff Garzike4e7b892006-01-31 12:18:41 -05001369 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001370 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001371 mv_fill_sg(qc);
1372}
1373
1374/**
1375 * mv_qc_prep_iie - Host specific command preparation.
1376 * @qc: queued command to prepare
1377 *
1378 * This routine simply redirects to the general purpose routine
1379 * if command is not DMA. Else, it handles prep of the CRQB
1380 * (command request block), does some sanity checking, and calls
1381 * the SG load routine.
1382 *
1383 * LOCKING:
1384 * Inherited from caller.
1385 */
1386static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1387{
1388 struct ata_port *ap = qc->ap;
1389 struct mv_port_priv *pp = ap->private_data;
1390 struct mv_crqb_iie *crqb;
1391 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001392 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001393 u32 flags = 0;
1394
Mark Lord138bfdd2008-01-26 18:33:18 -05001395 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1396 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001397 return;
1398
Mark Lorde12bef52008-03-31 19:33:56 -04001399 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001400 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 flags |= CRQB_FLAG_READ;
1402
Tejun Heobeec7db2006-02-11 19:11:13 +09001403 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001404 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001405 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001406 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001407
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001408 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001409 in_index = pp->req_idx;
Mark Lorda6432432006-05-19 16:36:36 -04001410
1411 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001412 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1413 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001414 crqb->flags = cpu_to_le32(flags);
1415
1416 tf = &qc->tf;
1417 crqb->ata_cmd[0] = cpu_to_le32(
1418 (tf->command << 16) |
1419 (tf->feature << 24)
1420 );
1421 crqb->ata_cmd[1] = cpu_to_le32(
1422 (tf->lbal << 0) |
1423 (tf->lbam << 8) |
1424 (tf->lbah << 16) |
1425 (tf->device << 24)
1426 );
1427 crqb->ata_cmd[2] = cpu_to_le32(
1428 (tf->hob_lbal << 0) |
1429 (tf->hob_lbam << 8) |
1430 (tf->hob_lbah << 16) |
1431 (tf->hob_feature << 24)
1432 );
1433 crqb->ata_cmd[3] = cpu_to_le32(
1434 (tf->nsect << 0) |
1435 (tf->hob_nsect << 8)
1436 );
1437
1438 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1439 return;
Brett Russ31961942005-09-30 01:36:00 -04001440 mv_fill_sg(qc);
1441}
1442
Brett Russ05b308e2005-10-05 17:08:53 -04001443/**
1444 * mv_qc_issue - Initiate a command to the host
1445 * @qc: queued command to start
1446 *
1447 * This routine simply redirects to the general purpose routine
1448 * if command is not DMA. Else, it sanity checks our local
1449 * caches of the request producer/consumer indices then enables
1450 * DMA and bumps the request producer index.
1451 *
1452 * LOCKING:
1453 * Inherited from caller.
1454 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001455static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001456{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001457 struct ata_port *ap = qc->ap;
1458 void __iomem *port_mmio = mv_ap_base(ap);
1459 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001461
Mark Lord138bfdd2008-01-26 18:33:18 -05001462 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1463 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001464 /*
1465 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001466 * port. Turn off EDMA so there won't be problems accessing
1467 * shadow block, etc registers.
1468 */
Mark Lordb5624682008-03-31 19:34:40 -04001469 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001470 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001471 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001472 }
1473
Mark Lord72109162008-01-26 18:31:33 -05001474 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001475
Mark Lordfcfb1f72008-04-19 15:06:40 -04001476 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1477 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001478
1479 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001480 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1481 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001482
1483 return 0;
1484}
1485
Brett Russ05b308e2005-10-05 17:08:53 -04001486/**
Brett Russ05b308e2005-10-05 17:08:53 -04001487 * mv_err_intr - Handle error interrupts on the port
1488 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001489 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001490 *
1491 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001492 * some cases require an eDMA reset, which also performs a COMRESET.
1493 * The SERR case requires a clear of pending errors in the SATA
1494 * SERROR register. Finally, if the port disabled DMA,
1495 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001496 *
1497 * LOCKING:
1498 * Inherited from caller.
1499 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001501{
Brett Russ31961942005-09-30 01:36:00 -04001502 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1504 struct mv_port_priv *pp = ap->private_data;
1505 struct mv_host_priv *hpriv = ap->host->private_data;
1506 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1507 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001508 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001509
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001510 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001511
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001512 if (!edma_enabled) {
1513 /* just a guess: do we need to do this? should we
1514 * expand this, and do it in all cases?
1515 */
Tejun Heo936fd732007-08-06 18:36:23 +09001516 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1517 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001518 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001519
1520 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1521
Mark Lord352fab72008-04-19 14:43:42 -04001522 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001523
1524 /*
Mark Lord352fab72008-04-19 14:43:42 -04001525 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001526 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001527 if (edma_err_cause & EDMA_ERR_DEV)
1528 err_mask |= AC_ERR_DEV;
1529 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001530 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001531 EDMA_ERR_INTRL_PAR)) {
1532 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001533 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001534 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001535 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001536 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1537 ata_ehi_hotplugged(ehi);
1538 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001539 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001540 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001541 }
1542
Mark Lord352fab72008-04-19 14:43:42 -04001543 /*
1544 * Gen-I has a different SELF_DIS bit,
1545 * different FREEZE bits, and no SERR bit:
1546 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001547 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001549 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001551 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001552 }
1553 } else {
1554 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001555 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001557 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001558 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001559 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001560 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001562 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001563 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001564 }
1565 }
Brett Russ20f733e2005-09-01 18:26:17 -04001566
1567 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001568 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001569
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570 if (!err_mask) {
1571 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001572 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001573 }
1574
1575 ehi->serror |= serr;
1576 ehi->action |= action;
1577
1578 if (qc)
1579 qc->err_mask |= err_mask;
1580 else
1581 ehi->err_mask |= err_mask;
1582
1583 if (edma_err_cause & eh_freeze_mask)
1584 ata_port_freeze(ap);
1585 else
1586 ata_port_abort(ap);
1587}
1588
1589static void mv_intr_pio(struct ata_port *ap)
1590{
1591 struct ata_queued_cmd *qc;
1592 u8 ata_status;
1593
1594 /* ignore spurious intr if drive still BUSY */
1595 ata_status = readb(ap->ioaddr.status_addr);
1596 if (unlikely(ata_status & ATA_BUSY))
1597 return;
1598
1599 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001600 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001601 if (unlikely(!qc)) /* no active tag */
1602 return;
1603 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1604 return;
1605
1606 /* and finally, complete the ATA command */
1607 qc->err_mask |= ac_err_mask(ata_status);
1608 ata_qc_complete(qc);
1609}
1610
Mark Lordfcfb1f72008-04-19 15:06:40 -04001611static void mv_process_crpb_response(struct ata_port *ap,
1612 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1613{
1614 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1615
1616 if (qc) {
1617 u8 ata_status;
1618 u16 edma_status = le16_to_cpu(response->flags);
1619 /*
1620 * edma_status from a response queue entry:
1621 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1622 * MSB is saved ATA status from command completion.
1623 */
1624 if (!ncq_enabled) {
1625 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1626 if (err_cause) {
1627 /*
1628 * Error will be seen/handled by mv_err_intr().
1629 * So do nothing at all here.
1630 */
1631 return;
1632 }
1633 }
1634 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1635 qc->err_mask |= ac_err_mask(ata_status);
1636 ata_qc_complete(qc);
1637 } else {
1638 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1639 __func__, tag);
1640 }
1641}
1642
1643static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644{
1645 void __iomem *port_mmio = mv_ap_base(ap);
1646 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordfcfb1f72008-04-19 15:06:40 -04001647 u32 in_index;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001648 bool work_done = false;
Mark Lordfcfb1f72008-04-19 15:06:40 -04001649 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001650
Mark Lordfcfb1f72008-04-19 15:06:40 -04001651 /* Get the hardware queue position index */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001652 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1653 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1654
Mark Lordfcfb1f72008-04-19 15:06:40 -04001655 /* Process new responses from since the last time we looked */
1656 while (in_index != pp->resp_idx) {
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001657 unsigned int tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04001658 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001659
Mark Lordfcfb1f72008-04-19 15:06:40 -04001660 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001661
Mark Lordfcfb1f72008-04-19 15:06:40 -04001662 if (IS_GEN_I(hpriv)) {
1663 /* 50xx: no NCQ, only one command active at a time */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001664 tag = ap->link.active_tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04001665 } else {
1666 /* Gen II/IIE: get command tag from CRPB entry */
1667 tag = le16_to_cpu(response->id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001668 }
Mark Lordfcfb1f72008-04-19 15:06:40 -04001669 mv_process_crpb_response(ap, response, tag, ncq_enabled);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001670 work_done = true;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001671 }
1672
Mark Lord352fab72008-04-19 14:43:42 -04001673 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001674 if (work_done)
1675 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
Mark Lordfcfb1f72008-04-19 15:06:40 -04001676 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001677 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001678}
1679
Brett Russ05b308e2005-10-05 17:08:53 -04001680/**
1681 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001682 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001683 * @relevant: port error bits relevant to this host controller
1684 * @hc: which host controller we're to look at
1685 *
1686 * Read then write clear the HC interrupt status then walk each
1687 * port connected to the HC and see if it needs servicing. Port
1688 * success ints are reported in the HC interrupt status reg, the
1689 * port error ints are reported in the higher level main
1690 * interrupt status register and thus are passed in via the
1691 * 'relevant' argument.
1692 *
1693 * LOCKING:
1694 * Inherited from caller.
1695 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001696static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001697{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001698 struct mv_host_priv *hpriv = host->private_data;
1699 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001700 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001701 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001702 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001703
Jeff Garzik35177262007-02-24 21:26:42 -05001704 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001705 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001706 else
Brett Russ20f733e2005-09-01 18:26:17 -04001707 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001708
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001709 if (HAS_PCI(host))
1710 last_port = port0 + MV_PORTS_PER_HC;
1711 else
1712 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001713 /* we'll need the HC success int register in most cases */
1714 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001715 if (!hc_irq_cause)
1716 return;
1717
1718 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001719
1720 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001721 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001722
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001723 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001724 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001725 struct mv_port_priv *pp;
Mark Lord352fab72008-04-19 14:43:42 -04001726 int have_err_bits, hardport, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001727
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001728 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001729 continue;
1730
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001731 pp = ap->private_data;
1732
Brett Russ31961942005-09-30 01:36:00 -04001733 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001734 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001735 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001736
Mark Lord352fab72008-04-19 14:43:42 -04001737 have_err_bits = ((ERR_IRQ << shift) & relevant);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001738
1739 if (unlikely(have_err_bits)) {
1740 struct ata_queued_cmd *qc;
1741
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001742 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001743 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1744 continue;
1745
1746 mv_err_intr(ap, qc);
1747 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001748 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001749
Mark Lord352fab72008-04-19 14:43:42 -04001750 hardport = mv_hardport_from_port(port); /* range 0..3 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001751
1752 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Mark Lord352fab72008-04-19 14:43:42 -04001753 if ((DMA_IRQ << hardport) & hc_irq_cause)
Mark Lordfcfb1f72008-04-19 15:06:40 -04001754 mv_process_crpb_entries(ap, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001755 } else {
Mark Lord352fab72008-04-19 14:43:42 -04001756 if ((DEV_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001757 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001758 }
1759 }
1760 VPRINTK("EXIT\n");
1761}
1762
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001763static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1764{
Mark Lord02a121d2007-12-01 13:07:22 -05001765 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001766 struct ata_port *ap;
1767 struct ata_queued_cmd *qc;
1768 struct ata_eh_info *ehi;
1769 unsigned int i, err_mask, printed = 0;
1770 u32 err_cause;
1771
Mark Lord02a121d2007-12-01 13:07:22 -05001772 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001773
1774 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1775 err_cause);
1776
1777 DPRINTK("All regs @ PCI error\n");
1778 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1779
Mark Lord02a121d2007-12-01 13:07:22 -05001780 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001781
1782 for (i = 0; i < host->n_ports; i++) {
1783 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001784 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001785 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001786 ata_ehi_clear_desc(ehi);
1787 if (!printed++)
1788 ata_ehi_push_desc(ehi,
1789 "PCI err cause 0x%08x", err_cause);
1790 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001791 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001792 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001793 if (qc)
1794 qc->err_mask |= err_mask;
1795 else
1796 ehi->err_mask |= err_mask;
1797
1798 ata_port_freeze(ap);
1799 }
1800 }
1801}
1802
Brett Russ05b308e2005-10-05 17:08:53 -04001803/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001804 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001805 * @irq: unused
1806 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001807 *
1808 * Read the read only register to determine if any host
1809 * controllers have pending interrupts. If so, call lower level
1810 * routine to handle. Also check for PCI errors which are only
1811 * reported here.
1812 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001813 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001814 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001815 * interrupts.
1816 */
David Howells7d12e782006-10-05 14:55:46 +01001817static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001818{
Jeff Garzikcca39742006-08-24 03:19:22 -04001819 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001820 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001821 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001822 void __iomem *mmio = hpriv->base;
Mark Lord352fab72008-04-19 14:43:42 -04001823 u32 main_cause, main_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001824
Mark Lord646a4da2008-01-26 18:30:37 -05001825 spin_lock(&host->lock);
Mark Lord352fab72008-04-19 14:43:42 -04001826 main_cause = readl(hpriv->main_cause_reg_addr);
1827 main_mask = readl(hpriv->main_mask_reg_addr);
1828 /*
1829 * Deal with cases where we either have nothing pending, or have read
1830 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04001831 */
Mark Lord352fab72008-04-19 14:43:42 -04001832 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
Mark Lord646a4da2008-01-26 18:30:37 -05001833 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001834
Jeff Garzikcca39742006-08-24 03:19:22 -04001835 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001836
Mark Lord352fab72008-04-19 14:43:42 -04001837 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001838 mv_pci_error(host, mmio);
1839 handled = 1;
1840 goto out_unlock; /* skip all other HC irq handling */
1841 }
1842
Brett Russ20f733e2005-09-01 18:26:17 -04001843 for (hc = 0; hc < n_hcs; hc++) {
Mark Lord352fab72008-04-19 14:43:42 -04001844 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
Brett Russ20f733e2005-09-01 18:26:17 -04001845 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001846 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001847 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001848 }
1849 }
Mark Lord615ab952006-05-19 16:24:56 -04001850
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001851out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001852 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001853 return IRQ_RETVAL(handled);
1854}
1855
Jeff Garzikc9d39132005-11-13 17:47:51 -05001856static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1857{
1858 unsigned int ofs;
1859
1860 switch (sc_reg_in) {
1861 case SCR_STATUS:
1862 case SCR_ERROR:
1863 case SCR_CONTROL:
1864 ofs = sc_reg_in * sizeof(u32);
1865 break;
1866 default:
1867 ofs = 0xffffffffU;
1868 break;
1869 }
1870 return ofs;
1871}
1872
Tejun Heoda3dbb12007-07-16 14:29:40 +09001873static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001874{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001875 struct mv_host_priv *hpriv = ap->host->private_data;
1876 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001877 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001878 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1879
Tejun Heoda3dbb12007-07-16 14:29:40 +09001880 if (ofs != 0xffffffffU) {
1881 *val = readl(addr + ofs);
1882 return 0;
1883 } else
1884 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885}
1886
Tejun Heoda3dbb12007-07-16 14:29:40 +09001887static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001888{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001889 struct mv_host_priv *hpriv = ap->host->private_data;
1890 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001891 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001892 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1893
Tejun Heoda3dbb12007-07-16 14:29:40 +09001894 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001895 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001896 return 0;
1897 } else
1898 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001899}
1900
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001901static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001902{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001903 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001904 int early_5080;
1905
Auke Kok44c10132007-06-08 15:46:36 -07001906 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001907
1908 if (!early_5080) {
1909 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1910 tmp |= (1 << 0);
1911 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 }
1913
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001914 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001915}
1916
1917static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1918{
1919 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1920}
1921
Jeff Garzik47c2b672005-11-12 21:13:17 -05001922static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001923 void __iomem *mmio)
1924{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001925 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1926 u32 tmp;
1927
1928 tmp = readl(phy_mmio + MV5_PHY_MODE);
1929
1930 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1931 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001932}
1933
Jeff Garzik47c2b672005-11-12 21:13:17 -05001934static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001935{
Jeff Garzik522479f2005-11-12 22:14:02 -05001936 u32 tmp;
1937
1938 writel(0, mmio + MV_GPIO_PORT_CTL);
1939
1940 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1941
1942 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1943 tmp |= ~(1 << 0);
1944 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001945}
1946
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001947static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001949{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001950 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1951 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1952 u32 tmp;
1953 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1954
1955 if (fix_apm_sq) {
1956 tmp = readl(phy_mmio + MV5_LT_MODE);
1957 tmp |= (1 << 19);
1958 writel(tmp, phy_mmio + MV5_LT_MODE);
1959
1960 tmp = readl(phy_mmio + MV5_PHY_CTL);
1961 tmp &= ~0x3;
1962 tmp |= 0x1;
1963 writel(tmp, phy_mmio + MV5_PHY_CTL);
1964 }
1965
1966 tmp = readl(phy_mmio + MV5_PHY_MODE);
1967 tmp &= ~mask;
1968 tmp |= hpriv->signal[port].pre;
1969 tmp |= hpriv->signal[port].amps;
1970 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001971}
1972
Jeff Garzikc9d39132005-11-13 17:47:51 -05001973
1974#undef ZERO
1975#define ZERO(reg) writel(0, port_mmio + (reg))
1976static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1977 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001978{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001979 void __iomem *port_mmio = mv_port_base(mmio, port);
1980
Mark Lordb5624682008-03-31 19:34:40 -04001981 /*
1982 * The datasheet warns against setting ATA_RST when EDMA is active
1983 * (but doesn't say what the problem might be). So we first try
1984 * to disable the EDMA engine before doing the ATA_RST operation.
1985 */
Mark Lorde12bef52008-03-31 19:33:56 -04001986 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001987
1988 ZERO(0x028); /* command */
1989 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1990 ZERO(0x004); /* timer */
1991 ZERO(0x008); /* irq err cause */
1992 ZERO(0x00c); /* irq err mask */
1993 ZERO(0x010); /* rq bah */
1994 ZERO(0x014); /* rq inp */
1995 ZERO(0x018); /* rq outp */
1996 ZERO(0x01c); /* respq bah */
1997 ZERO(0x024); /* respq outp */
1998 ZERO(0x020); /* respq inp */
1999 ZERO(0x02c); /* test control */
2000 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2001}
2002#undef ZERO
2003
2004#define ZERO(reg) writel(0, hc_mmio + (reg))
2005static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2006 unsigned int hc)
2007{
2008 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2009 u32 tmp;
2010
2011 ZERO(0x00c);
2012 ZERO(0x010);
2013 ZERO(0x014);
2014 ZERO(0x018);
2015
2016 tmp = readl(hc_mmio + 0x20);
2017 tmp &= 0x1c1c1c1c;
2018 tmp |= 0x03030303;
2019 writel(tmp, hc_mmio + 0x20);
2020}
2021#undef ZERO
2022
2023static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2024 unsigned int n_hc)
2025{
2026 unsigned int hc, port;
2027
2028 for (hc = 0; hc < n_hc; hc++) {
2029 for (port = 0; port < MV_PORTS_PER_HC; port++)
2030 mv5_reset_hc_port(hpriv, mmio,
2031 (hc * MV_PORTS_PER_HC) + port);
2032
2033 mv5_reset_one_hc(hpriv, mmio, hc);
2034 }
2035
2036 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002037}
2038
Jeff Garzik101ffae2005-11-12 22:17:49 -05002039#undef ZERO
2040#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002041static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002042{
Mark Lord02a121d2007-12-01 13:07:22 -05002043 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002044 u32 tmp;
2045
2046 tmp = readl(mmio + MV_PCI_MODE);
2047 tmp &= 0xff00ffff;
2048 writel(tmp, mmio + MV_PCI_MODE);
2049
2050 ZERO(MV_PCI_DISC_TIMER);
2051 ZERO(MV_PCI_MSI_TRIGGER);
2052 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2053 ZERO(HC_MAIN_IRQ_MASK_OFS);
2054 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002055 ZERO(hpriv->irq_cause_ofs);
2056 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002057 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2058 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2059 ZERO(MV_PCI_ERR_ATTRIBUTE);
2060 ZERO(MV_PCI_ERR_COMMAND);
2061}
2062#undef ZERO
2063
2064static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2065{
2066 u32 tmp;
2067
2068 mv5_reset_flash(hpriv, mmio);
2069
2070 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2071 tmp &= 0x3;
2072 tmp |= (1 << 5) | (1 << 6);
2073 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2074}
2075
2076/**
2077 * mv6_reset_hc - Perform the 6xxx global soft reset
2078 * @mmio: base address of the HBA
2079 *
2080 * This routine only applies to 6xxx parts.
2081 *
2082 * LOCKING:
2083 * Inherited from caller.
2084 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002085static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2086 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002087{
2088 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2089 int i, rc = 0;
2090 u32 t;
2091
2092 /* Following procedure defined in PCI "main command and status
2093 * register" table.
2094 */
2095 t = readl(reg);
2096 writel(t | STOP_PCI_MASTER, reg);
2097
2098 for (i = 0; i < 1000; i++) {
2099 udelay(1);
2100 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002101 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002102 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002103 }
2104 if (!(PCI_MASTER_EMPTY & t)) {
2105 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2106 rc = 1;
2107 goto done;
2108 }
2109
2110 /* set reset */
2111 i = 5;
2112 do {
2113 writel(t | GLOB_SFT_RST, reg);
2114 t = readl(reg);
2115 udelay(1);
2116 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2117
2118 if (!(GLOB_SFT_RST & t)) {
2119 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2120 rc = 1;
2121 goto done;
2122 }
2123
2124 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2125 i = 5;
2126 do {
2127 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2128 t = readl(reg);
2129 udelay(1);
2130 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2131
2132 if (GLOB_SFT_RST & t) {
2133 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2134 rc = 1;
2135 }
Mark Lord094e50b2008-04-16 15:01:19 -04002136 /*
2137 * Temporary: wait 3 seconds before port-probing can happen,
2138 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2139 * This can go away once hotplug is fully/correctly implemented.
2140 */
2141 if (rc == 0)
2142 msleep(3000);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002143done:
2144 return rc;
2145}
2146
Jeff Garzik47c2b672005-11-12 21:13:17 -05002147static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002148 void __iomem *mmio)
2149{
2150 void __iomem *port_mmio;
2151 u32 tmp;
2152
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002153 tmp = readl(mmio + MV_RESET_CFG);
2154 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002155 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002156 hpriv->signal[idx].pre = 0x1 << 5;
2157 return;
2158 }
2159
2160 port_mmio = mv_port_base(mmio, idx);
2161 tmp = readl(port_mmio + PHY_MODE2);
2162
2163 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2164 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2165}
2166
Jeff Garzik47c2b672005-11-12 21:13:17 -05002167static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002168{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002169 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002170}
2171
Jeff Garzikc9d39132005-11-13 17:47:51 -05002172static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002173 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002174{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002175 void __iomem *port_mmio = mv_port_base(mmio, port);
2176
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002177 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002178 int fix_phy_mode2 =
2179 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002180 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002181 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2182 u32 m2, tmp;
2183
2184 if (fix_phy_mode2) {
2185 m2 = readl(port_mmio + PHY_MODE2);
2186 m2 &= ~(1 << 16);
2187 m2 |= (1 << 31);
2188 writel(m2, port_mmio + PHY_MODE2);
2189
2190 udelay(200);
2191
2192 m2 = readl(port_mmio + PHY_MODE2);
2193 m2 &= ~((1 << 16) | (1 << 31));
2194 writel(m2, port_mmio + PHY_MODE2);
2195
2196 udelay(200);
2197 }
2198
2199 /* who knows what this magic does */
2200 tmp = readl(port_mmio + PHY_MODE3);
2201 tmp &= ~0x7F800000;
2202 tmp |= 0x2A800000;
2203 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002204
2205 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002206 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002207
2208 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002209
2210 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002211 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002212
Mark Lorde12bef52008-03-31 19:33:56 -04002213 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002214 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2215
2216 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002217
2218 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002219 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002220 }
2221
2222 /* Revert values of pre-emphasis and signal amps to the saved ones */
2223 m2 = readl(port_mmio + PHY_MODE2);
2224
2225 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002226 m2 |= hpriv->signal[port].amps;
2227 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002228 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002229
Jeff Garzike4e7b892006-01-31 12:18:41 -05002230 /* according to mvSata 3.6.1, some IIE values are fixed */
2231 if (IS_GEN_IIE(hpriv)) {
2232 m2 &= ~0xC30FF01F;
2233 m2 |= 0x0000900F;
2234 }
2235
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002236 writel(m2, port_mmio + PHY_MODE2);
2237}
2238
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002239/* TODO: use the generic LED interface to configure the SATA Presence */
2240/* & Acitivy LEDs on the board */
2241static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2242 void __iomem *mmio)
2243{
2244 return;
2245}
2246
2247static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2248 void __iomem *mmio)
2249{
2250 void __iomem *port_mmio;
2251 u32 tmp;
2252
2253 port_mmio = mv_port_base(mmio, idx);
2254 tmp = readl(port_mmio + PHY_MODE2);
2255
2256 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2257 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2258}
2259
2260#undef ZERO
2261#define ZERO(reg) writel(0, port_mmio + (reg))
2262static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2263 void __iomem *mmio, unsigned int port)
2264{
2265 void __iomem *port_mmio = mv_port_base(mmio, port);
2266
Mark Lordb5624682008-03-31 19:34:40 -04002267 /*
2268 * The datasheet warns against setting ATA_RST when EDMA is active
2269 * (but doesn't say what the problem might be). So we first try
2270 * to disable the EDMA engine before doing the ATA_RST operation.
2271 */
Mark Lorde12bef52008-03-31 19:33:56 -04002272 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002273
2274 ZERO(0x028); /* command */
2275 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2276 ZERO(0x004); /* timer */
2277 ZERO(0x008); /* irq err cause */
2278 ZERO(0x00c); /* irq err mask */
2279 ZERO(0x010); /* rq bah */
2280 ZERO(0x014); /* rq inp */
2281 ZERO(0x018); /* rq outp */
2282 ZERO(0x01c); /* respq bah */
2283 ZERO(0x024); /* respq outp */
2284 ZERO(0x020); /* respq inp */
2285 ZERO(0x02c); /* test control */
2286 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2287}
2288
2289#undef ZERO
2290
2291#define ZERO(reg) writel(0, hc_mmio + (reg))
2292static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2293 void __iomem *mmio)
2294{
2295 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2296
2297 ZERO(0x00c);
2298 ZERO(0x010);
2299 ZERO(0x014);
2300
2301}
2302
2303#undef ZERO
2304
2305static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2306 void __iomem *mmio, unsigned int n_hc)
2307{
2308 unsigned int port;
2309
2310 for (port = 0; port < hpriv->n_ports; port++)
2311 mv_soc_reset_hc_port(hpriv, mmio, port);
2312
2313 mv_soc_reset_one_hc(hpriv, mmio);
2314
2315 return 0;
2316}
2317
2318static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2319 void __iomem *mmio)
2320{
2321 return;
2322}
2323
2324static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2325{
2326 return;
2327}
2328
Mark Lordb67a1062008-03-31 19:35:13 -04002329static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2330{
2331 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2332
2333 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2334 if (want_gen2i)
2335 ifctl |= (1 << 7); /* enable gen2i speed */
2336 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2337}
2338
Mark Lordb5624682008-03-31 19:34:40 -04002339/*
2340 * Caller must ensure that EDMA is not active,
2341 * by first doing mv_stop_edma() where needed.
2342 */
Mark Lorde12bef52008-03-31 19:33:56 -04002343static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002344 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002345{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002346 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002347
Mark Lord0d8be5c2008-04-16 14:56:12 -04002348 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002349 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002350
Mark Lordb67a1062008-03-31 19:35:13 -04002351 if (!IS_GEN_I(hpriv)) {
2352 /* Enable 3.0gb/s link speed */
2353 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002354 }
Mark Lordb67a1062008-03-31 19:35:13 -04002355 /*
2356 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2357 * link, and physical layers. It resets all SATA interface registers
2358 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002359 */
Mark Lordb67a1062008-03-31 19:35:13 -04002360 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2361 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002362 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002363
Jeff Garzikc9d39132005-11-13 17:47:51 -05002364 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2365
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002366 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002367 mdelay(1);
2368}
2369
Mark Lorde49856d2008-04-16 14:59:07 -04002370static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002371{
Mark Lorde49856d2008-04-16 14:59:07 -04002372 if (sata_pmp_supported(ap)) {
2373 void __iomem *port_mmio = mv_ap_base(ap);
2374 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2375 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002376
Mark Lorde49856d2008-04-16 14:59:07 -04002377 if (old != pmp) {
2378 reg = (reg & ~0xf) | pmp;
2379 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2380 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09002381 }
Brett Russ20f733e2005-09-01 18:26:17 -04002382}
2383
Mark Lorde49856d2008-04-16 14:59:07 -04002384static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2385 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002386{
Mark Lorde49856d2008-04-16 14:59:07 -04002387 mv_pmp_select(link->ap, sata_srst_pmp(link));
2388 return sata_std_hardreset(link, class, deadline);
2389}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002390
Mark Lorde49856d2008-04-16 14:59:07 -04002391static int mv_softreset(struct ata_link *link, unsigned int *class,
2392 unsigned long deadline)
2393{
2394 mv_pmp_select(link->ap, sata_srst_pmp(link));
2395 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05002396}
2397
Tejun Heocc0680a2007-08-06 18:36:23 +09002398static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 unsigned long deadline)
2400{
Tejun Heocc0680a2007-08-06 18:36:23 +09002401 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002402 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002403 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002404 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002405 int rc, attempts = 0, extra = 0;
2406 u32 sstatus;
2407 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408
Mark Lorde12bef52008-03-31 19:33:56 -04002409 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002410 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002411
Mark Lord0d8be5c2008-04-16 14:56:12 -04002412 /* Workaround for errata FEr SATA#10 (part 2) */
2413 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002414 const unsigned long *timing =
2415 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002416
Mark Lord17c5aab2008-04-16 14:56:51 -04002417 rc = sata_link_hardreset(link, timing, deadline + extra,
2418 &online, NULL);
2419 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002420 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002421 sata_scr_read(link, SCR_STATUS, &sstatus);
2422 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2423 /* Force 1.5gb/s link speed and try again */
2424 mv_setup_ifctl(mv_ap_base(ap), 0);
2425 if (time_after(jiffies + HZ, deadline))
2426 extra = HZ; /* only extend it once, max */
2427 }
2428 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002429
Mark Lord17c5aab2008-04-16 14:56:51 -04002430 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002431}
2432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002433static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002434{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002435 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord1cfd19a2008-04-19 15:05:50 -04002436 unsigned int shift, hardport, port = ap->port_no;
Mark Lord352fab72008-04-19 14:43:42 -04002437 u32 main_mask;
Brett Russ31961942005-09-30 01:36:00 -04002438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002440
Mark Lord1cfd19a2008-04-19 15:05:50 -04002441 mv_stop_edma(ap);
2442 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Brett Russ31961942005-09-30 01:36:00 -04002443
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002444 /* disable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002445 main_mask = readl(hpriv->main_mask_reg_addr);
2446 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2447 writelfl(main_mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002448}
2449
2450static void mv_eh_thaw(struct ata_port *ap)
2451{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002452 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord1cfd19a2008-04-19 15:05:50 -04002453 unsigned int shift, hardport, port = ap->port_no;
2454 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002455 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lord352fab72008-04-19 14:43:42 -04002456 u32 main_mask, hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002457
2458 /* FIXME: handle coalescing completion events properly */
2459
Mark Lord1cfd19a2008-04-19 15:05:50 -04002460 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002461
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002462 /* clear EDMA errors on this port */
2463 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2464
2465 /* clear pending irq events */
2466 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord1cfd19a2008-04-19 15:05:50 -04002467 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2468 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002469
2470 /* enable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002471 main_mask = readl(hpriv->main_mask_reg_addr);
2472 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2473 writelfl(main_mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002474}
2475
Brett Russ05b308e2005-10-05 17:08:53 -04002476/**
2477 * mv_port_init - Perform some early initialization on a single port.
2478 * @port: libata data structure storing shadow register addresses
2479 * @port_mmio: base address of the port
2480 *
2481 * Initialize shadow register mmio addresses, clear outstanding
2482 * interrupts on the port, and unmask interrupts for the future
2483 * start of the port.
2484 *
2485 * LOCKING:
2486 * Inherited from caller.
2487 */
Brett Russ31961942005-09-30 01:36:00 -04002488static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2489{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002490 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002491 unsigned serr_ofs;
2492
Jeff Garzik8b260242005-11-12 12:32:50 -05002493 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002494 */
2495 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002496 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002497 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2498 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2499 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2500 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2501 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2502 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002503 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002504 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2505 /* special case: control/altstatus doesn't have ATA_REG_ address */
2506 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2507
2508 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002509 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002510
Brett Russ31961942005-09-30 01:36:00 -04002511 /* Clear any currently outstanding port interrupt conditions */
2512 serr_ofs = mv_scr_offset(SCR_ERROR);
2513 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2514 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2515
Mark Lord646a4da2008-01-26 18:30:37 -05002516 /* unmask all non-transient EDMA error interrupts */
2517 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002518
Jeff Garzik8b260242005-11-12 12:32:50 -05002519 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002520 readl(port_mmio + EDMA_CFG_OFS),
2521 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2522 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002523}
2524
Tejun Heo4447d352007-04-17 23:44:08 +09002525static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002526{
Tejun Heo4447d352007-04-17 23:44:08 +09002527 struct pci_dev *pdev = to_pci_dev(host->dev);
2528 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002529 u32 hp_flags = hpriv->hp_flags;
2530
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002531 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002532 case chip_5080:
2533 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002534 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002535
Auke Kok44c10132007-06-08 15:46:36 -07002536 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002537 case 0x1:
2538 hp_flags |= MV_HP_ERRATA_50XXB0;
2539 break;
2540 case 0x3:
2541 hp_flags |= MV_HP_ERRATA_50XXB2;
2542 break;
2543 default:
2544 dev_printk(KERN_WARNING, &pdev->dev,
2545 "Applying 50XXB2 workarounds to unknown rev\n");
2546 hp_flags |= MV_HP_ERRATA_50XXB2;
2547 break;
2548 }
2549 break;
2550
2551 case chip_504x:
2552 case chip_508x:
2553 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002554 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002555
Auke Kok44c10132007-06-08 15:46:36 -07002556 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002557 case 0x0:
2558 hp_flags |= MV_HP_ERRATA_50XXB0;
2559 break;
2560 case 0x3:
2561 hp_flags |= MV_HP_ERRATA_50XXB2;
2562 break;
2563 default:
2564 dev_printk(KERN_WARNING, &pdev->dev,
2565 "Applying B2 workarounds to unknown rev\n");
2566 hp_flags |= MV_HP_ERRATA_50XXB2;
2567 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002568 }
2569 break;
2570
2571 case chip_604x:
2572 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002573 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002574 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002575
Auke Kok44c10132007-06-08 15:46:36 -07002576 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002577 case 0x7:
2578 hp_flags |= MV_HP_ERRATA_60X1B2;
2579 break;
2580 case 0x9:
2581 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002582 break;
2583 default:
2584 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002585 "Applying B2 workarounds to unknown rev\n");
2586 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002587 break;
2588 }
2589 break;
2590
Jeff Garzike4e7b892006-01-31 12:18:41 -05002591 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002592 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002593 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2594 (pdev->device == 0x2300 || pdev->device == 0x2310))
2595 {
Mark Lord4e520032007-12-11 12:58:05 -05002596 /*
2597 * Highpoint RocketRAID PCIe 23xx series cards:
2598 *
2599 * Unconfigured drives are treated as "Legacy"
2600 * by the BIOS, and it overwrites sector 8 with
2601 * a "Lgcy" metadata block prior to Linux boot.
2602 *
2603 * Configured drives (RAID or JBOD) leave sector 8
2604 * alone, but instead overwrite a high numbered
2605 * sector for the RAID metadata. This sector can
2606 * be determined exactly, by truncating the physical
2607 * drive capacity to a nice even GB value.
2608 *
2609 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2610 *
2611 * Warn the user, lest they think we're just buggy.
2612 */
2613 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2614 " BIOS CORRUPTS DATA on all attached drives,"
2615 " regardless of if/how they are configured."
2616 " BEWARE!\n");
2617 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2618 " use sectors 8-9 on \"Legacy\" drives,"
2619 " and avoid the final two gigabytes on"
2620 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002621 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002622 case chip_6042:
2623 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002624 hp_flags |= MV_HP_GEN_IIE;
2625
Auke Kok44c10132007-06-08 15:46:36 -07002626 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002627 case 0x0:
2628 hp_flags |= MV_HP_ERRATA_XX42A0;
2629 break;
2630 case 0x1:
2631 hp_flags |= MV_HP_ERRATA_60X1C0;
2632 break;
2633 default:
2634 dev_printk(KERN_WARNING, &pdev->dev,
2635 "Applying 60X1C0 workarounds to unknown rev\n");
2636 hp_flags |= MV_HP_ERRATA_60X1C0;
2637 break;
2638 }
2639 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002640 case chip_soc:
2641 hpriv->ops = &mv_soc_ops;
2642 hp_flags |= MV_HP_ERRATA_60X1C0;
2643 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002644
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002645 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002646 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002647 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002648 return 1;
2649 }
2650
2651 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002652 if (hp_flags & MV_HP_PCIE) {
2653 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2654 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2655 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2656 } else {
2657 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2660 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002661
2662 return 0;
2663}
2664
Brett Russ05b308e2005-10-05 17:08:53 -04002665/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002666 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002667 * @host: ATA host to initialize
2668 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002669 *
2670 * If possible, do an early global reset of the host. Then do
2671 * our port init and clear/unmask all/relevant host interrupts.
2672 *
2673 * LOCKING:
2674 * Inherited from caller.
2675 */
Tejun Heo4447d352007-04-17 23:44:08 +09002676static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002677{
2678 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002679 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002680 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002681
Tejun Heo4447d352007-04-17 23:44:08 +09002682 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002683 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04002684 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002685
2686 if (HAS_PCI(host)) {
Mark Lord352fab72008-04-19 14:43:42 -04002687 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2688 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002689 } else {
Mark Lord352fab72008-04-19 14:43:42 -04002690 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002692 }
Mark Lord352fab72008-04-19 14:43:42 -04002693
2694 /* global interrupt mask: 0 == mask everything */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002695 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002696
Tejun Heo4447d352007-04-17 23:44:08 +09002697 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002698
Tejun Heo4447d352007-04-17 23:44:08 +09002699 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002700 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002701
Jeff Garzikc9d39132005-11-13 17:47:51 -05002702 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002703 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002704 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002705
Jeff Garzik522479f2005-11-12 22:14:02 -05002706 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002707 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002708 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002709
Tejun Heo4447d352007-04-17 23:44:08 +09002710 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002711 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002712 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002713
2714 mv_port_init(&ap->ioaddr, port_mmio);
2715
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002716#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002717 if (HAS_PCI(host)) {
2718 unsigned int offset = port_mmio - mmio;
2719 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2720 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2721 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002722#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002723 }
2724
2725 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002726 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2727
2728 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2729 "(before clear)=0x%08x\n", hc,
2730 readl(hc_mmio + HC_CFG_OFS),
2731 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2732
2733 /* Clear any currently outstanding hc interrupt conditions */
2734 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002735 }
2736
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002737 if (HAS_PCI(host)) {
2738 /* Clear any currently outstanding host interrupt conditions */
2739 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002740
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002741 /* and unmask interrupt generation for host regs */
2742 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2743 if (IS_GEN_I(hpriv))
2744 writelfl(~HC_MAIN_MASKED_IRQS_5,
2745 hpriv->main_mask_reg_addr);
2746 else
2747 writelfl(~HC_MAIN_MASKED_IRQS,
2748 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002749
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002750 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2751 "PCI int cause/mask=0x%08x/0x%08x\n",
2752 readl(hpriv->main_cause_reg_addr),
2753 readl(hpriv->main_mask_reg_addr),
2754 readl(mmio + hpriv->irq_cause_ofs),
2755 readl(mmio + hpriv->irq_mask_ofs));
2756 } else {
2757 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2758 hpriv->main_mask_reg_addr);
2759 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2760 readl(hpriv->main_cause_reg_addr),
2761 readl(hpriv->main_mask_reg_addr));
2762 }
Brett Russ31961942005-09-30 01:36:00 -04002763done:
Brett Russ20f733e2005-09-01 18:26:17 -04002764 return rc;
2765}
2766
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002767static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2768{
2769 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2770 MV_CRQB_Q_SZ, 0);
2771 if (!hpriv->crqb_pool)
2772 return -ENOMEM;
2773
2774 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2775 MV_CRPB_Q_SZ, 0);
2776 if (!hpriv->crpb_pool)
2777 return -ENOMEM;
2778
2779 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2780 MV_SG_TBL_SZ, 0);
2781 if (!hpriv->sg_tbl_pool)
2782 return -ENOMEM;
2783
2784 return 0;
2785}
2786
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002787static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2788 struct mbus_dram_target_info *dram)
2789{
2790 int i;
2791
2792 for (i = 0; i < 4; i++) {
2793 writel(0, hpriv->base + WINDOW_CTRL(i));
2794 writel(0, hpriv->base + WINDOW_BASE(i));
2795 }
2796
2797 for (i = 0; i < dram->num_cs; i++) {
2798 struct mbus_dram_window *cs = dram->cs + i;
2799
2800 writel(((cs->size - 1) & 0xffff0000) |
2801 (cs->mbus_attr << 8) |
2802 (dram->mbus_dram_target_id << 4) | 1,
2803 hpriv->base + WINDOW_CTRL(i));
2804 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2805 }
2806}
2807
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002808/**
2809 * mv_platform_probe - handle a positive probe of an soc Marvell
2810 * host
2811 * @pdev: platform device found
2812 *
2813 * LOCKING:
2814 * Inherited from caller.
2815 */
2816static int mv_platform_probe(struct platform_device *pdev)
2817{
2818 static int printed_version;
2819 const struct mv_sata_platform_data *mv_platform_data;
2820 const struct ata_port_info *ppi[] =
2821 { &mv_port_info[chip_soc], NULL };
2822 struct ata_host *host;
2823 struct mv_host_priv *hpriv;
2824 struct resource *res;
2825 int n_ports, rc;
2826
2827 if (!printed_version++)
2828 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2829
2830 /*
2831 * Simple resource validation ..
2832 */
2833 if (unlikely(pdev->num_resources != 2)) {
2834 dev_err(&pdev->dev, "invalid number of resources\n");
2835 return -EINVAL;
2836 }
2837
2838 /*
2839 * Get the register base first
2840 */
2841 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2842 if (res == NULL)
2843 return -EINVAL;
2844
2845 /* allocate host */
2846 mv_platform_data = pdev->dev.platform_data;
2847 n_ports = mv_platform_data->n_ports;
2848
2849 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2850 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2851
2852 if (!host || !hpriv)
2853 return -ENOMEM;
2854 host->private_data = hpriv;
2855 hpriv->n_ports = n_ports;
2856
2857 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002858 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2859 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002860 hpriv->base -= MV_SATAHC0_REG_BASE;
2861
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002862 /*
2863 * (Re-)program MBUS remapping windows if we are asked to.
2864 */
2865 if (mv_platform_data->dram != NULL)
2866 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2867
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002868 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2869 if (rc)
2870 return rc;
2871
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002872 /* initialize adapter */
2873 rc = mv_init_host(host, chip_soc);
2874 if (rc)
2875 return rc;
2876
2877 dev_printk(KERN_INFO, &pdev->dev,
2878 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2879 host->n_ports);
2880
2881 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2882 IRQF_SHARED, &mv6_sht);
2883}
2884
2885/*
2886 *
2887 * mv_platform_remove - unplug a platform interface
2888 * @pdev: platform device
2889 *
2890 * A platform bus SATA device has been unplugged. Perform the needed
2891 * cleanup. Also called on module unload for any active devices.
2892 */
2893static int __devexit mv_platform_remove(struct platform_device *pdev)
2894{
2895 struct device *dev = &pdev->dev;
2896 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002897
2898 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002899 return 0;
2900}
2901
2902static struct platform_driver mv_platform_driver = {
2903 .probe = mv_platform_probe,
2904 .remove = __devexit_p(mv_platform_remove),
2905 .driver = {
2906 .name = DRV_NAME,
2907 .owner = THIS_MODULE,
2908 },
2909};
2910
2911
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002912#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002913static int mv_pci_init_one(struct pci_dev *pdev,
2914 const struct pci_device_id *ent);
2915
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002916
2917static struct pci_driver mv_pci_driver = {
2918 .name = DRV_NAME,
2919 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002920 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002921 .remove = ata_pci_remove_one,
2922};
2923
2924/*
2925 * module options
2926 */
2927static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2928
2929
2930/* move to PCI layer or libata core? */
2931static int pci_go_64(struct pci_dev *pdev)
2932{
2933 int rc;
2934
2935 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2936 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2937 if (rc) {
2938 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2939 if (rc) {
2940 dev_printk(KERN_ERR, &pdev->dev,
2941 "64-bit DMA enable failed\n");
2942 return rc;
2943 }
2944 }
2945 } else {
2946 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2947 if (rc) {
2948 dev_printk(KERN_ERR, &pdev->dev,
2949 "32-bit DMA enable failed\n");
2950 return rc;
2951 }
2952 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2953 if (rc) {
2954 dev_printk(KERN_ERR, &pdev->dev,
2955 "32-bit consistent DMA enable failed\n");
2956 return rc;
2957 }
2958 }
2959
2960 return rc;
2961}
2962
Brett Russ05b308e2005-10-05 17:08:53 -04002963/**
2964 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002965 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002966 *
2967 * FIXME: complete this.
2968 *
2969 * LOCKING:
2970 * Inherited from caller.
2971 */
Tejun Heo4447d352007-04-17 23:44:08 +09002972static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002973{
Tejun Heo4447d352007-04-17 23:44:08 +09002974 struct pci_dev *pdev = to_pci_dev(host->dev);
2975 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002976 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002977 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002978
2979 /* Use this to determine the HW stepping of the chip so we know
2980 * what errata to workaround
2981 */
Brett Russ31961942005-09-30 01:36:00 -04002982 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2983 if (scc == 0)
2984 scc_s = "SCSI";
2985 else if (scc == 0x01)
2986 scc_s = "RAID";
2987 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002988 scc_s = "?";
2989
2990 if (IS_GEN_I(hpriv))
2991 gen = "I";
2992 else if (IS_GEN_II(hpriv))
2993 gen = "II";
2994 else if (IS_GEN_IIE(hpriv))
2995 gen = "IIE";
2996 else
2997 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002998
Jeff Garzika9524a72005-10-30 14:39:11 -05002999 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003000 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3001 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003002 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3003}
3004
Brett Russ05b308e2005-10-05 17:08:53 -04003005/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003006 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003007 * @pdev: PCI device found
3008 * @ent: PCI device ID entry for the matched host
3009 *
3010 * LOCKING:
3011 * Inherited from caller.
3012 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003013static int mv_pci_init_one(struct pci_dev *pdev,
3014 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003015{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003016 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003017 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003018 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3019 struct ata_host *host;
3020 struct mv_host_priv *hpriv;
3021 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003022
Jeff Garzika9524a72005-10-30 14:39:11 -05003023 if (!printed_version++)
3024 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003025
Tejun Heo4447d352007-04-17 23:44:08 +09003026 /* allocate host */
3027 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3028
3029 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3030 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3031 if (!host || !hpriv)
3032 return -ENOMEM;
3033 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003034 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003035
3036 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003037 rc = pcim_enable_device(pdev);
3038 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003039 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003040
Tejun Heo0d5ff562007-02-01 15:06:36 +09003041 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3042 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003043 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003044 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003045 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003046 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003047 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003048
Jeff Garzikd88184f2007-02-26 01:26:06 -05003049 rc = pci_go_64(pdev);
3050 if (rc)
3051 return rc;
3052
Mark Lordda2fa9b2008-01-26 18:32:45 -05003053 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3054 if (rc)
3055 return rc;
3056
Brett Russ20f733e2005-09-01 18:26:17 -04003057 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003058 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003059 if (rc)
3060 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003061
Brett Russ31961942005-09-30 01:36:00 -04003062 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003063 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003064 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003065
Brett Russ31961942005-09-30 01:36:00 -04003066 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003067 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003068
Tejun Heo4447d352007-04-17 23:44:08 +09003069 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003070 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003071 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003072 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003073}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003074#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003075
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003076static int mv_platform_probe(struct platform_device *pdev);
3077static int __devexit mv_platform_remove(struct platform_device *pdev);
3078
Brett Russ20f733e2005-09-01 18:26:17 -04003079static int __init mv_init(void)
3080{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003081 int rc = -ENODEV;
3082#ifdef CONFIG_PCI
3083 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003084 if (rc < 0)
3085 return rc;
3086#endif
3087 rc = platform_driver_register(&mv_platform_driver);
3088
3089#ifdef CONFIG_PCI
3090 if (rc < 0)
3091 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003092#endif
3093 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003094}
3095
3096static void __exit mv_exit(void)
3097{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003098#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003099 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003100#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003101 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003102}
3103
3104MODULE_AUTHOR("Brett Russ");
3105MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3106MODULE_LICENSE("GPL");
3107MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3108MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003109MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003110
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003111#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003112module_param(msi, int, 0444);
3113MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003114#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003115
Brett Russ20f733e2005-09-01 18:26:17 -04003116module_init(mv_init);
3117module_exit(mv_exit);