blob: 257c128f4aaa7d73b428c625d38bcb6730bcff64 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 *
6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/init.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/sched.h>
31#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050032#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040033#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050034#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040035#include <linux/libata.h>
36#include <asm/io.h>
37
38#define DRV_NAME "sata_mv"
Brett Russ7e6c1202005-10-20 08:39:43 -040039#define DRV_VERSION "0.25"
Brett Russ20f733e2005-09-01 18:26:17 -040040
41enum {
42 /* BAR's are enumerated in terms of pci_resource_start() terms */
43 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
44 MV_IO_BAR = 2, /* offset 0x18: IO space */
45 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
46
47 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
48 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
49
50 MV_PCI_REG_BASE = 0,
51 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
52 MV_SATAHC0_REG_BASE = 0x20000,
53
54 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
55 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
56 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
57 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
58
Brett Russ31961942005-09-30 01:36:00 -040059 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -040060
Brett Russ31961942005-09-30 01:36:00 -040061 MV_MAX_Q_DEPTH = 32,
62 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
63
64 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
65 * CRPB needs alignment on a 256B boundary. Size == 256B
66 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
67 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
68 */
69 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
70 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
71 MV_MAX_SG_CT = 176,
72 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
73 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
74
75 /* Our DMA boundary is determined by an ePRD being unable to handle
76 * anything larger than 64KB
77 */
78 MV_DMA_BOUNDARY = 0xffffU,
Brett Russ20f733e2005-09-01 18:26:17 -040079
80 MV_PORTS_PER_HC = 4,
81 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
82 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -040083 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -040084 MV_PORT_MASK = 3,
85
86 /* Host Flags */
87 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
88 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Brett Russ31961942005-09-30 01:36:00 -040089 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
90 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
91 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
92 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
93 MV_FLAG_GLBL_SFT_RST),
Brett Russ20f733e2005-09-01 18:26:17 -040094
95 chip_504x = 0,
96 chip_508x = 1,
97 chip_604x = 2,
98 chip_608x = 3,
99
Brett Russ31961942005-09-30 01:36:00 -0400100 CRQB_FLAG_READ = (1 << 0),
101 CRQB_TAG_SHIFT = 1,
102 CRQB_CMD_ADDR_SHIFT = 8,
103 CRQB_CMD_CS = (0x2 << 11),
104 CRQB_CMD_LAST = (1 << 15),
105
106 CRPB_FLAG_STATUS_SHIFT = 8,
107
108 EPRD_FLAG_END_OF_TBL = (1 << 31),
109
Brett Russ20f733e2005-09-01 18:26:17 -0400110 /* PCI interface registers */
111
Brett Russ31961942005-09-30 01:36:00 -0400112 PCI_COMMAND_OFS = 0xc00,
113
Brett Russ20f733e2005-09-01 18:26:17 -0400114 PCI_MAIN_CMD_STS_OFS = 0xd30,
115 STOP_PCI_MASTER = (1 << 2),
116 PCI_MASTER_EMPTY = (1 << 3),
117 GLOB_SFT_RST = (1 << 4),
118
119 PCI_IRQ_CAUSE_OFS = 0x1d58,
120 PCI_IRQ_MASK_OFS = 0x1d5c,
121 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
122
123 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
124 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
125 PORT0_ERR = (1 << 0), /* shift by port # */
126 PORT0_DONE = (1 << 1), /* shift by port # */
127 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
128 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
129 PCI_ERR = (1 << 18),
130 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
131 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
132 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
133 GPIO_INT = (1 << 22),
134 SELF_INT = (1 << 23),
135 TWSI_INT = (1 << 24),
136 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
137 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
138 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
139 HC_MAIN_RSVD),
140
141 /* SATAHC registers */
142 HC_CFG_OFS = 0,
143
144 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400145 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400146 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
147 DEV_IRQ = (1 << 8), /* shift by port # */
148
149 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400150 SHD_BLK_OFS = 0x100,
151 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400152
153 /* SATA registers */
154 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
155 SATA_ACTIVE_OFS = 0x350,
156
157 /* Port registers */
158 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400159 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
160 EDMA_CFG_NCQ = (1 << 5),
161 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
162 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
163 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400164
165 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
166 EDMA_ERR_IRQ_MASK_OFS = 0xc,
167 EDMA_ERR_D_PAR = (1 << 0),
168 EDMA_ERR_PRD_PAR = (1 << 1),
169 EDMA_ERR_DEV = (1 << 2),
170 EDMA_ERR_DEV_DCON = (1 << 3),
171 EDMA_ERR_DEV_CON = (1 << 4),
172 EDMA_ERR_SERR = (1 << 5),
173 EDMA_ERR_SELF_DIS = (1 << 7),
174 EDMA_ERR_BIST_ASYNC = (1 << 8),
175 EDMA_ERR_CRBQ_PAR = (1 << 9),
176 EDMA_ERR_CRPB_PAR = (1 << 10),
177 EDMA_ERR_INTRL_PAR = (1 << 11),
178 EDMA_ERR_IORDY = (1 << 12),
179 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
180 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
181 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
182 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
183 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
184 EDMA_ERR_TRANS_PROTO = (1 << 31),
185 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
186 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
187 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
188 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
189 EDMA_ERR_LNK_DATA_RX |
190 EDMA_ERR_LNK_DATA_TX |
191 EDMA_ERR_TRANS_PROTO),
192
Brett Russ31961942005-09-30 01:36:00 -0400193 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
194 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
195 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
196
197 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
198 EDMA_REQ_Q_PTR_SHIFT = 5,
199
200 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
201 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
202 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
203 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
204 EDMA_RSP_Q_PTR_SHIFT = 3,
205
Brett Russ20f733e2005-09-01 18:26:17 -0400206 EDMA_CMD_OFS = 0x28,
207 EDMA_EN = (1 << 0),
208 EDMA_DS = (1 << 1),
209 ATA_RST = (1 << 2),
210
Brett Russ31961942005-09-30 01:36:00 -0400211 /* Host private flags (hp_flags) */
212 MV_HP_FLAG_MSI = (1 << 0),
Brett Russ20f733e2005-09-01 18:26:17 -0400213
Brett Russ31961942005-09-30 01:36:00 -0400214 /* Port private flags (pp_flags) */
215 MV_PP_FLAG_EDMA_EN = (1 << 0),
216 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
217};
218
219/* Command ReQuest Block: 32B */
220struct mv_crqb {
221 u32 sg_addr;
222 u32 sg_addr_hi;
223 u16 ctrl_flags;
224 u16 ata_cmd[11];
225};
226
227/* Command ResPonse Block: 8B */
228struct mv_crpb {
229 u16 id;
230 u16 flags;
231 u32 tmstmp;
232};
233
234/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
235struct mv_sg {
236 u32 addr;
237 u32 flags_size;
238 u32 addr_hi;
239 u32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400240};
241
242struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400243 struct mv_crqb *crqb;
244 dma_addr_t crqb_dma;
245 struct mv_crpb *crpb;
246 dma_addr_t crpb_dma;
247 struct mv_sg *sg_tbl;
248 dma_addr_t sg_tbl_dma;
Brett Russ20f733e2005-09-01 18:26:17 -0400249
Brett Russ31961942005-09-30 01:36:00 -0400250 unsigned req_producer; /* cp of req_in_ptr */
251 unsigned rsp_consumer; /* cp of rsp_out_ptr */
252 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400253};
254
255struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400256 u32 hp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400257};
258
259static void mv_irq_clear(struct ata_port *ap);
260static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
261static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
262static void mv_phy_reset(struct ata_port *ap);
Brett Russ31961942005-09-30 01:36:00 -0400263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
Brett Russ20f733e2005-09-01 18:26:17 -0400268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
269 struct pt_regs *regs);
Brett Russ31961942005-09-30 01:36:00 -0400270static void mv_eng_timeout(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
272
Jeff Garzik193515d2005-11-07 00:59:37 -0500273static struct scsi_host_template mv_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400274 .module = THIS_MODULE,
275 .name = DRV_NAME,
276 .ioctl = ata_scsi_ioctl,
277 .queuecommand = ata_scsi_queuecmd,
278 .eh_strategy_handler = ata_scsi_error,
Brett Russ31961942005-09-30 01:36:00 -0400279 .can_queue = MV_USE_Q_DEPTH,
Brett Russ20f733e2005-09-01 18:26:17 -0400280 .this_id = ATA_SHT_THIS_ID,
Brett Russ31961942005-09-30 01:36:00 -0400281 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400282 .max_sectors = ATA_MAX_SECTORS,
283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
284 .emulated = ATA_SHT_EMULATED,
Brett Russ31961942005-09-30 01:36:00 -0400285 .use_clustering = ATA_SHT_USE_CLUSTERING,
Brett Russ20f733e2005-09-01 18:26:17 -0400286 .proc_name = DRV_NAME,
287 .dma_boundary = MV_DMA_BOUNDARY,
288 .slave_configure = ata_scsi_slave_config,
289 .bios_param = ata_std_bios_param,
290 .ordered_flush = 1,
291};
292
Jeff Garzik057ace52005-10-22 14:27:05 -0400293static const struct ata_port_operations mv_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400294 .port_disable = ata_port_disable,
295
296 .tf_load = ata_tf_load,
297 .tf_read = ata_tf_read,
298 .check_status = ata_check_status,
299 .exec_command = ata_exec_command,
300 .dev_select = ata_std_dev_select,
301
302 .phy_reset = mv_phy_reset,
303
Brett Russ31961942005-09-30 01:36:00 -0400304 .qc_prep = mv_qc_prep,
305 .qc_issue = mv_qc_issue,
Brett Russ20f733e2005-09-01 18:26:17 -0400306
Brett Russ31961942005-09-30 01:36:00 -0400307 .eng_timeout = mv_eng_timeout,
Brett Russ20f733e2005-09-01 18:26:17 -0400308
309 .irq_handler = mv_interrupt,
310 .irq_clear = mv_irq_clear,
311
312 .scr_read = mv_scr_read,
313 .scr_write = mv_scr_write,
314
Brett Russ31961942005-09-30 01:36:00 -0400315 .port_start = mv_port_start,
316 .port_stop = mv_port_stop,
317 .host_stop = mv_host_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400318};
319
320static struct ata_port_info mv_port_info[] = {
321 { /* chip_504x */
322 .sht = &mv_sht,
Brett Russ31961942005-09-30 01:36:00 -0400323 .host_flags = MV_COMMON_FLAGS,
324 .pio_mask = 0x1f, /* pio0-4 */
325 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
Brett Russ20f733e2005-09-01 18:26:17 -0400326 .port_ops = &mv_ops,
327 },
328 { /* chip_508x */
329 .sht = &mv_sht,
Brett Russ31961942005-09-30 01:36:00 -0400330 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
331 .pio_mask = 0x1f, /* pio0-4 */
332 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
Brett Russ20f733e2005-09-01 18:26:17 -0400333 .port_ops = &mv_ops,
334 },
335 { /* chip_604x */
336 .sht = &mv_sht,
Brett Russ31961942005-09-30 01:36:00 -0400337 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
338 .pio_mask = 0x1f, /* pio0-4 */
339 .udma_mask = 0x7f, /* udma0-6 */
Brett Russ20f733e2005-09-01 18:26:17 -0400340 .port_ops = &mv_ops,
341 },
342 { /* chip_608x */
343 .sht = &mv_sht,
Brett Russ31961942005-09-30 01:36:00 -0400344 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
345 MV_FLAG_DUAL_HC),
346 .pio_mask = 0x1f, /* pio0-4 */
347 .udma_mask = 0x7f, /* udma0-6 */
Brett Russ20f733e2005-09-01 18:26:17 -0400348 .port_ops = &mv_ops,
349 },
350};
351
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500352static const struct pci_device_id mv_pci_tbl[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400353 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
354 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
355 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x},
356 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
357
358 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
359 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
360 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
361 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
Jeff Garzik29179532005-11-11 08:08:03 -0500362
363 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
Brett Russ20f733e2005-09-01 18:26:17 -0400364 {} /* terminate list */
365};
366
367static struct pci_driver mv_pci_driver = {
368 .name = DRV_NAME,
369 .id_table = mv_pci_tbl,
370 .probe = mv_init_one,
371 .remove = ata_pci_remove_one,
372};
373
374/*
375 * Functions
376 */
377
378static inline void writelfl(unsigned long data, void __iomem *addr)
379{
380 writel(data, addr);
381 (void) readl(addr); /* flush to avoid PCI posted write */
382}
383
Brett Russ20f733e2005-09-01 18:26:17 -0400384static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
385{
386 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
387}
388
389static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
390{
391 return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) +
392 MV_SATAHC_ARBTR_REG_SZ +
393 ((port & MV_PORT_MASK) * MV_PORT_REG_SZ));
394}
395
396static inline void __iomem *mv_ap_base(struct ata_port *ap)
397{
398 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
399}
400
Brett Russ31961942005-09-30 01:36:00 -0400401static inline int mv_get_hc_count(unsigned long hp_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400402{
Brett Russ31961942005-09-30 01:36:00 -0400403 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400404}
405
406static void mv_irq_clear(struct ata_port *ap)
407{
408}
409
Brett Russ05b308e2005-10-05 17:08:53 -0400410/**
411 * mv_start_dma - Enable eDMA engine
412 * @base: port base address
413 * @pp: port private data
414 *
415 * Verify the local cache of the eDMA state is accurate with an
416 * assert.
417 *
418 * LOCKING:
419 * Inherited from caller.
420 */
Brett Russafb0edd2005-10-05 17:08:42 -0400421static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400422{
Brett Russafb0edd2005-10-05 17:08:42 -0400423 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
424 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
425 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
426 }
427 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
Brett Russ31961942005-09-30 01:36:00 -0400428}
429
Brett Russ05b308e2005-10-05 17:08:53 -0400430/**
431 * mv_stop_dma - Disable eDMA engine
432 * @ap: ATA channel to manipulate
433 *
434 * Verify the local cache of the eDMA state is accurate with an
435 * assert.
436 *
437 * LOCKING:
438 * Inherited from caller.
439 */
Brett Russ31961942005-09-30 01:36:00 -0400440static void mv_stop_dma(struct ata_port *ap)
441{
442 void __iomem *port_mmio = mv_ap_base(ap);
443 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400444 u32 reg;
445 int i;
446
Brett Russafb0edd2005-10-05 17:08:42 -0400447 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
448 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400449 */
Brett Russ31961942005-09-30 01:36:00 -0400450 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
451 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400452 } else {
453 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
454 }
Brett Russ31961942005-09-30 01:36:00 -0400455
456 /* now properly wait for the eDMA to stop */
457 for (i = 1000; i > 0; i--) {
458 reg = readl(port_mmio + EDMA_CMD_OFS);
459 if (!(EDMA_EN & reg)) {
460 break;
461 }
462 udelay(100);
463 }
464
Brett Russ31961942005-09-30 01:36:00 -0400465 if (EDMA_EN & reg) {
466 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
Brett Russafb0edd2005-10-05 17:08:42 -0400467 /* FIXME: Consider doing a reset here to recover */
Brett Russ31961942005-09-30 01:36:00 -0400468 }
469}
470
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400471#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400472static void mv_dump_mem(void __iomem *start, unsigned bytes)
473{
Brett Russ31961942005-09-30 01:36:00 -0400474 int b, w;
475 for (b = 0; b < bytes; ) {
476 DPRINTK("%p: ", start + b);
477 for (w = 0; b < bytes && w < 4; w++) {
478 printk("%08x ",readl(start + b));
479 b += sizeof(u32);
480 }
481 printk("\n");
482 }
Brett Russ31961942005-09-30 01:36:00 -0400483}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400484#endif
485
Brett Russ31961942005-09-30 01:36:00 -0400486static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
487{
488#ifdef ATA_DEBUG
489 int b, w;
490 u32 dw;
491 for (b = 0; b < bytes; ) {
492 DPRINTK("%02x: ", b);
493 for (w = 0; b < bytes && w < 4; w++) {
494 (void) pci_read_config_dword(pdev,b,&dw);
495 printk("%08x ",dw);
496 b += sizeof(u32);
497 }
498 printk("\n");
499 }
500#endif
501}
502static void mv_dump_all_regs(void __iomem *mmio_base, int port,
503 struct pci_dev *pdev)
504{
505#ifdef ATA_DEBUG
506 void __iomem *hc_base = mv_hc_base(mmio_base,
507 port >> MV_PORT_HC_SHIFT);
508 void __iomem *port_base;
509 int start_port, num_ports, p, start_hc, num_hcs, hc;
510
511 if (0 > port) {
512 start_hc = start_port = 0;
513 num_ports = 8; /* shld be benign for 4 port devs */
514 num_hcs = 2;
515 } else {
516 start_hc = port >> MV_PORT_HC_SHIFT;
517 start_port = port;
518 num_ports = num_hcs = 1;
519 }
520 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
521 num_ports > 1 ? num_ports - 1 : start_port);
522
523 if (NULL != pdev) {
524 DPRINTK("PCI config space regs:\n");
525 mv_dump_pci_cfg(pdev, 0x68);
526 }
527 DPRINTK("PCI regs:\n");
528 mv_dump_mem(mmio_base+0xc00, 0x3c);
529 mv_dump_mem(mmio_base+0xd00, 0x34);
530 mv_dump_mem(mmio_base+0xf00, 0x4);
531 mv_dump_mem(mmio_base+0x1d00, 0x6c);
532 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
533 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
534 DPRINTK("HC regs (HC %i):\n", hc);
535 mv_dump_mem(hc_base, 0x1c);
536 }
537 for (p = start_port; p < start_port + num_ports; p++) {
538 port_base = mv_port_base(mmio_base, p);
539 DPRINTK("EDMA regs (port %i):\n",p);
540 mv_dump_mem(port_base, 0x54);
541 DPRINTK("SATA regs (port %i):\n",p);
542 mv_dump_mem(port_base+0x300, 0x60);
543 }
544#endif
545}
546
Brett Russ20f733e2005-09-01 18:26:17 -0400547static unsigned int mv_scr_offset(unsigned int sc_reg_in)
548{
549 unsigned int ofs;
550
551 switch (sc_reg_in) {
552 case SCR_STATUS:
553 case SCR_CONTROL:
554 case SCR_ERROR:
555 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
556 break;
557 case SCR_ACTIVE:
558 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
559 break;
560 default:
561 ofs = 0xffffffffU;
562 break;
563 }
564 return ofs;
565}
566
567static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
568{
569 unsigned int ofs = mv_scr_offset(sc_reg_in);
570
571 if (0xffffffffU != ofs) {
572 return readl(mv_ap_base(ap) + ofs);
573 } else {
574 return (u32) ofs;
575 }
576}
577
578static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
579{
580 unsigned int ofs = mv_scr_offset(sc_reg_in);
581
582 if (0xffffffffU != ofs) {
583 writelfl(val, mv_ap_base(ap) + ofs);
584 }
585}
586
Brett Russ05b308e2005-10-05 17:08:53 -0400587/**
588 * mv_global_soft_reset - Perform the 6xxx global soft reset
589 * @mmio_base: base address of the HBA
590 *
591 * This routine only applies to 6xxx parts.
592 *
593 * LOCKING:
594 * Inherited from caller.
595 */
Brett Russ31961942005-09-30 01:36:00 -0400596static int mv_global_soft_reset(void __iomem *mmio_base)
Brett Russ20f733e2005-09-01 18:26:17 -0400597{
598 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
599 int i, rc = 0;
600 u32 t;
601
Brett Russ20f733e2005-09-01 18:26:17 -0400602 /* Following procedure defined in PCI "main command and status
603 * register" table.
604 */
605 t = readl(reg);
606 writel(t | STOP_PCI_MASTER, reg);
607
Brett Russ31961942005-09-30 01:36:00 -0400608 for (i = 0; i < 1000; i++) {
609 udelay(1);
Brett Russ20f733e2005-09-01 18:26:17 -0400610 t = readl(reg);
611 if (PCI_MASTER_EMPTY & t) {
612 break;
613 }
614 }
615 if (!(PCI_MASTER_EMPTY & t)) {
Brett Russ31961942005-09-30 01:36:00 -0400616 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
617 rc = 1;
Brett Russ20f733e2005-09-01 18:26:17 -0400618 goto done;
619 }
620
621 /* set reset */
622 i = 5;
623 do {
624 writel(t | GLOB_SFT_RST, reg);
625 t = readl(reg);
626 udelay(1);
627 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
628
629 if (!(GLOB_SFT_RST & t)) {
Brett Russ31961942005-09-30 01:36:00 -0400630 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
631 rc = 1;
Brett Russ20f733e2005-09-01 18:26:17 -0400632 goto done;
633 }
634
Brett Russ31961942005-09-30 01:36:00 -0400635 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
Brett Russ20f733e2005-09-01 18:26:17 -0400636 i = 5;
637 do {
Brett Russ31961942005-09-30 01:36:00 -0400638 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
Brett Russ20f733e2005-09-01 18:26:17 -0400639 t = readl(reg);
640 udelay(1);
641 } while ((GLOB_SFT_RST & t) && (i-- > 0));
642
643 if (GLOB_SFT_RST & t) {
Brett Russ31961942005-09-30 01:36:00 -0400644 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
645 rc = 1;
646 }
647done:
648 return rc;
649}
650
Brett Russ05b308e2005-10-05 17:08:53 -0400651/**
652 * mv_host_stop - Host specific cleanup/stop routine.
653 * @host_set: host data structure
654 *
655 * Disable ints, cleanup host memory, call general purpose
656 * host_stop.
657 *
658 * LOCKING:
659 * Inherited from caller.
660 */
Brett Russ31961942005-09-30 01:36:00 -0400661static void mv_host_stop(struct ata_host_set *host_set)
662{
663 struct mv_host_priv *hpriv = host_set->private_data;
664 struct pci_dev *pdev = to_pci_dev(host_set->dev);
665
666 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
667 pci_disable_msi(pdev);
668 } else {
669 pci_intx(pdev, 0);
670 }
671 kfree(hpriv);
672 ata_host_stop(host_set);
673}
674
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500675static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
676{
677 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
678}
679
Brett Russ05b308e2005-10-05 17:08:53 -0400680/**
681 * mv_port_start - Port specific init/start routine.
682 * @ap: ATA channel to manipulate
683 *
684 * Allocate and point to DMA memory, init port private memory,
685 * zero indices.
686 *
687 * LOCKING:
688 * Inherited from caller.
689 */
Brett Russ31961942005-09-30 01:36:00 -0400690static int mv_port_start(struct ata_port *ap)
691{
692 struct device *dev = ap->host_set->dev;
693 struct mv_port_priv *pp;
694 void __iomem *port_mmio = mv_ap_base(ap);
695 void *mem;
696 dma_addr_t mem_dma;
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500697 int rc = -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -0400698
699 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500700 if (!pp)
701 goto err_out;
Brett Russ31961942005-09-30 01:36:00 -0400702 memset(pp, 0, sizeof(*pp));
703
704 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
705 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500706 if (!mem)
707 goto err_out_pp;
Brett Russ31961942005-09-30 01:36:00 -0400708 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
709
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500710 rc = ata_pad_alloc(ap, dev);
711 if (rc)
712 goto err_out_priv;
713
Brett Russ31961942005-09-30 01:36:00 -0400714 /* First item in chunk of DMA memory:
715 * 32-slot command request table (CRQB), 32 bytes each in size
716 */
717 pp->crqb = mem;
718 pp->crqb_dma = mem_dma;
719 mem += MV_CRQB_Q_SZ;
720 mem_dma += MV_CRQB_Q_SZ;
721
722 /* Second item:
723 * 32-slot command response table (CRPB), 8 bytes each in size
724 */
725 pp->crpb = mem;
726 pp->crpb_dma = mem_dma;
727 mem += MV_CRPB_Q_SZ;
728 mem_dma += MV_CRPB_Q_SZ;
729
730 /* Third item:
731 * Table of scatter-gather descriptors (ePRD), 16 bytes each
732 */
733 pp->sg_tbl = mem;
734 pp->sg_tbl_dma = mem_dma;
735
736 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
737 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
738
739 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
740 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
741 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
742
743 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
744 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
745
746 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
747 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
748 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
749
750 pp->req_producer = pp->rsp_consumer = 0;
751
752 /* Don't turn on EDMA here...do it before DMA commands only. Else
753 * we'll be unable to send non-data, PIO, etc due to restricted access
754 * to shadow regs.
755 */
756 ap->private_data = pp;
757 return 0;
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500758
759err_out_priv:
760 mv_priv_free(pp, dev);
761err_out_pp:
762 kfree(pp);
763err_out:
764 return rc;
Brett Russ31961942005-09-30 01:36:00 -0400765}
766
Brett Russ05b308e2005-10-05 17:08:53 -0400767/**
768 * mv_port_stop - Port specific cleanup/stop routine.
769 * @ap: ATA channel to manipulate
770 *
771 * Stop DMA, cleanup port memory.
772 *
773 * LOCKING:
774 * This routine uses the host_set lock to protect the DMA stop.
775 */
Brett Russ31961942005-09-30 01:36:00 -0400776static void mv_port_stop(struct ata_port *ap)
777{
778 struct device *dev = ap->host_set->dev;
779 struct mv_port_priv *pp = ap->private_data;
Brett Russafb0edd2005-10-05 17:08:42 -0400780 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -0400781
Brett Russafb0edd2005-10-05 17:08:42 -0400782 spin_lock_irqsave(&ap->host_set->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -0400783 mv_stop_dma(ap);
Brett Russafb0edd2005-10-05 17:08:42 -0400784 spin_unlock_irqrestore(&ap->host_set->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -0400785
786 ap->private_data = NULL;
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500787 ata_pad_free(ap, dev);
788 mv_priv_free(pp, dev);
Brett Russ31961942005-09-30 01:36:00 -0400789 kfree(pp);
790}
791
Brett Russ05b308e2005-10-05 17:08:53 -0400792/**
793 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
794 * @qc: queued command whose SG list to source from
795 *
796 * Populate the SG list and mark the last entry.
797 *
798 * LOCKING:
799 * Inherited from caller.
800 */
Brett Russ31961942005-09-30 01:36:00 -0400801static void mv_fill_sg(struct ata_queued_cmd *qc)
802{
803 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -0400804 unsigned int i = 0;
805 struct scatterlist *sg;
Brett Russ31961942005-09-30 01:36:00 -0400806
Jeff Garzik972c26b2005-10-18 22:14:54 -0400807 ata_for_each_sg(sg, qc) {
Brett Russ31961942005-09-30 01:36:00 -0400808 u32 sg_len;
809 dma_addr_t addr;
810
Jeff Garzik972c26b2005-10-18 22:14:54 -0400811 addr = sg_dma_address(sg);
812 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -0400813
814 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
815 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
816 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
817 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
Jeff Garzik972c26b2005-10-18 22:14:54 -0400818 if (ata_sg_is_last(sg, qc))
819 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
820
821 i++;
Brett Russ31961942005-09-30 01:36:00 -0400822 }
823}
824
825static inline unsigned mv_inc_q_index(unsigned *index)
826{
827 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
828 return *index;
829}
830
831static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
832{
833 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
834 (last ? CRQB_CMD_LAST : 0);
835}
836
Brett Russ05b308e2005-10-05 17:08:53 -0400837/**
838 * mv_qc_prep - Host specific command preparation.
839 * @qc: queued command to prepare
840 *
841 * This routine simply redirects to the general purpose routine
842 * if command is not DMA. Else, it handles prep of the CRQB
843 * (command request block), does some sanity checking, and calls
844 * the SG load routine.
845 *
846 * LOCKING:
847 * Inherited from caller.
848 */
Brett Russ31961942005-09-30 01:36:00 -0400849static void mv_qc_prep(struct ata_queued_cmd *qc)
850{
851 struct ata_port *ap = qc->ap;
852 struct mv_port_priv *pp = ap->private_data;
853 u16 *cw;
854 struct ata_taskfile *tf;
855 u16 flags = 0;
856
857 if (ATA_PROT_DMA != qc->tf.protocol) {
858 return;
Brett Russ20f733e2005-09-01 18:26:17 -0400859 }
860
Brett Russ31961942005-09-30 01:36:00 -0400861 /* the req producer index should be the same as we remember it */
862 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
863 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
864 pp->req_producer);
865
866 /* Fill in command request block
867 */
868 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
869 flags |= CRQB_FLAG_READ;
870 }
871 assert(MV_MAX_Q_DEPTH > qc->tag);
872 flags |= qc->tag << CRQB_TAG_SHIFT;
873
874 pp->crqb[pp->req_producer].sg_addr =
875 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
876 pp->crqb[pp->req_producer].sg_addr_hi =
877 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
878 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
879
880 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
881 tf = &qc->tf;
882
883 /* Sadly, the CRQB cannot accomodate all registers--there are
884 * only 11 bytes...so we must pick and choose required
885 * registers based on the command. So, we drop feature and
886 * hob_feature for [RW] DMA commands, but they are needed for
887 * NCQ. NCQ will drop hob_nsect.
888 */
889 switch (tf->command) {
890 case ATA_CMD_READ:
891 case ATA_CMD_READ_EXT:
892 case ATA_CMD_WRITE:
893 case ATA_CMD_WRITE_EXT:
894 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
895 break;
896#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
897 case ATA_CMD_FPDMA_READ:
898 case ATA_CMD_FPDMA_WRITE:
899 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
900 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
901 break;
902#endif /* FIXME: remove this line when NCQ added */
903 default:
904 /* The only other commands EDMA supports in non-queued and
905 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
906 * of which are defined/used by Linux. If we get here, this
907 * driver needs work.
908 *
909 * FIXME: modify libata to give qc_prep a return value and
910 * return error here.
911 */
912 BUG_ON(tf->command);
913 break;
914 }
915 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
916 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
917 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
918 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
919 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
920 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
921 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
922 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
923 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
924
925 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
926 return;
927 }
928 mv_fill_sg(qc);
929}
930
Brett Russ05b308e2005-10-05 17:08:53 -0400931/**
932 * mv_qc_issue - Initiate a command to the host
933 * @qc: queued command to start
934 *
935 * This routine simply redirects to the general purpose routine
936 * if command is not DMA. Else, it sanity checks our local
937 * caches of the request producer/consumer indices then enables
938 * DMA and bumps the request producer index.
939 *
940 * LOCKING:
941 * Inherited from caller.
942 */
Brett Russ31961942005-09-30 01:36:00 -0400943static int mv_qc_issue(struct ata_queued_cmd *qc)
944{
945 void __iomem *port_mmio = mv_ap_base(qc->ap);
946 struct mv_port_priv *pp = qc->ap->private_data;
947 u32 in_ptr;
948
949 if (ATA_PROT_DMA != qc->tf.protocol) {
950 /* We're about to send a non-EDMA capable command to the
951 * port. Turn off EDMA so there won't be problems accessing
952 * shadow block, etc registers.
953 */
954 mv_stop_dma(qc->ap);
955 return ata_qc_issue_prot(qc);
956 }
957
958 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
959
960 /* the req producer index should be the same as we remember it */
961 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
962 pp->req_producer);
963 /* until we do queuing, the queue should be empty at this point */
964 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
965 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
966 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
967
968 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
969
Brett Russafb0edd2005-10-05 17:08:42 -0400970 mv_start_dma(port_mmio, pp);
Brett Russ31961942005-09-30 01:36:00 -0400971
972 /* and write the request in pointer to kick the EDMA to life */
973 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
974 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
975 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
976
977 return 0;
978}
979
Brett Russ05b308e2005-10-05 17:08:53 -0400980/**
981 * mv_get_crpb_status - get status from most recently completed cmd
982 * @ap: ATA channel to manipulate
983 *
984 * This routine is for use when the port is in DMA mode, when it
985 * will be using the CRPB (command response block) method of
986 * returning command completion information. We assert indices
987 * are good, grab status, and bump the response consumer index to
988 * prove that we're up to date.
989 *
990 * LOCKING:
991 * Inherited from caller.
992 */
Brett Russ31961942005-09-30 01:36:00 -0400993static u8 mv_get_crpb_status(struct ata_port *ap)
994{
995 void __iomem *port_mmio = mv_ap_base(ap);
996 struct mv_port_priv *pp = ap->private_data;
997 u32 out_ptr;
998
999 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1000
1001 /* the response consumer index should be the same as we remember it */
1002 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1003 pp->rsp_consumer);
1004
1005 /* increment our consumer index... */
1006 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1007
1008 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1009 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1010 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1011 pp->rsp_consumer);
1012
1013 /* write out our inc'd consumer index so EDMA knows we're caught up */
1014 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1015 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
1016 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1017
1018 /* Return ATA status register for completed CRPB */
1019 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
Brett Russ20f733e2005-09-01 18:26:17 -04001020}
1021
Brett Russ05b308e2005-10-05 17:08:53 -04001022/**
1023 * mv_err_intr - Handle error interrupts on the port
1024 * @ap: ATA channel to manipulate
1025 *
1026 * In most cases, just clear the interrupt and move on. However,
1027 * some cases require an eDMA reset, which is done right before
1028 * the COMRESET in mv_phy_reset(). The SERR case requires a
1029 * clear of pending errors in the SATA SERROR register. Finally,
1030 * if the port disabled DMA, update our cached copy to match.
1031 *
1032 * LOCKING:
1033 * Inherited from caller.
1034 */
Brett Russ20f733e2005-09-01 18:26:17 -04001035static void mv_err_intr(struct ata_port *ap)
1036{
Brett Russ31961942005-09-30 01:36:00 -04001037 void __iomem *port_mmio = mv_ap_base(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001038 u32 edma_err_cause, serr = 0;
1039
Brett Russ20f733e2005-09-01 18:26:17 -04001040 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1041
1042 if (EDMA_ERR_SERR & edma_err_cause) {
1043 serr = scr_read(ap, SCR_ERROR);
1044 scr_write_flush(ap, SCR_ERROR, serr);
1045 }
Brett Russafb0edd2005-10-05 17:08:42 -04001046 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1047 struct mv_port_priv *pp = ap->private_data;
1048 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1049 }
1050 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1051 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001052
1053 /* Clear EDMA now that SERR cleanup done */
1054 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1055
1056 /* check for fatal here and recover if needed */
1057 if (EDMA_ERR_FATAL & edma_err_cause) {
1058 mv_phy_reset(ap);
1059 }
1060}
1061
Brett Russ05b308e2005-10-05 17:08:53 -04001062/**
1063 * mv_host_intr - Handle all interrupts on the given host controller
1064 * @host_set: host specific structure
1065 * @relevant: port error bits relevant to this host controller
1066 * @hc: which host controller we're to look at
1067 *
1068 * Read then write clear the HC interrupt status then walk each
1069 * port connected to the HC and see if it needs servicing. Port
1070 * success ints are reported in the HC interrupt status reg, the
1071 * port error ints are reported in the higher level main
1072 * interrupt status register and thus are passed in via the
1073 * 'relevant' argument.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
Brett Russ20f733e2005-09-01 18:26:17 -04001078static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1079 unsigned int hc)
1080{
1081 void __iomem *mmio = host_set->mmio_base;
1082 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1083 struct ata_port *ap;
1084 struct ata_queued_cmd *qc;
1085 u32 hc_irq_cause;
Brett Russ31961942005-09-30 01:36:00 -04001086 int shift, port, port0, hard_port, handled;
Jeff Garzika7dac442005-10-30 04:44:42 -05001087 unsigned int err_mask;
Brett Russ31961942005-09-30 01:36:00 -04001088 u8 ata_status = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04001089
1090 if (hc == 0) {
1091 port0 = 0;
1092 } else {
1093 port0 = MV_PORTS_PER_HC;
1094 }
1095
1096 /* we'll need the HC success int register in most cases */
1097 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1098 if (hc_irq_cause) {
Brett Russ31961942005-09-30 01:36:00 -04001099 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001100 }
1101
1102 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1103 hc,relevant,hc_irq_cause);
1104
1105 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1106 ap = host_set->ports[port];
1107 hard_port = port & MV_PORT_MASK; /* range 0-3 */
Brett Russ31961942005-09-30 01:36:00 -04001108 handled = 0; /* ensure ata_status is set if handled++ */
Brett Russ20f733e2005-09-01 18:26:17 -04001109
Brett Russ31961942005-09-30 01:36:00 -04001110 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1111 /* new CRPB on the queue; just one at a time until NCQ
1112 */
1113 ata_status = mv_get_crpb_status(ap);
1114 handled++;
1115 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1116 /* received ATA IRQ; read the status reg to clear INTRQ
Brett Russ20f733e2005-09-01 18:26:17 -04001117 */
1118 ata_status = readb((void __iomem *)
1119 ap->ioaddr.status_addr);
Brett Russ31961942005-09-30 01:36:00 -04001120 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001121 }
1122
Jeff Garzika7dac442005-10-30 04:44:42 -05001123 err_mask = ac_err_mask(ata_status);
1124
Brett Russ31961942005-09-30 01:36:00 -04001125 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001126 if (port >= MV_PORTS_PER_HC) {
1127 shift++; /* skip bit 8 in the HC Main IRQ reg */
1128 }
1129 if ((PORT0_ERR << shift) & relevant) {
1130 mv_err_intr(ap);
Jeff Garzika7dac442005-10-30 04:44:42 -05001131 err_mask |= AC_ERR_OTHER;
Brett Russ31961942005-09-30 01:36:00 -04001132 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001133 }
1134
Brett Russ31961942005-09-30 01:36:00 -04001135 if (handled && ap) {
Brett Russ20f733e2005-09-01 18:26:17 -04001136 qc = ata_qc_from_tag(ap, ap->active_tag);
1137 if (NULL != qc) {
1138 VPRINTK("port %u IRQ found for qc, "
1139 "ata_status 0x%x\n", port,ata_status);
Brett Russ20f733e2005-09-01 18:26:17 -04001140 /* mark qc status appropriately */
Jeff Garzika7dac442005-10-30 04:44:42 -05001141 ata_qc_complete(qc, err_mask);
Brett Russ20f733e2005-09-01 18:26:17 -04001142 }
1143 }
1144 }
1145 VPRINTK("EXIT\n");
1146}
1147
Brett Russ05b308e2005-10-05 17:08:53 -04001148/**
1149 * mv_interrupt -
1150 * @irq: unused
1151 * @dev_instance: private data; in this case the host structure
1152 * @regs: unused
1153 *
1154 * Read the read only register to determine if any host
1155 * controllers have pending interrupts. If so, call lower level
1156 * routine to handle. Also check for PCI errors which are only
1157 * reported here.
1158 *
1159 * LOCKING:
1160 * This routine holds the host_set lock while processing pending
1161 * interrupts.
1162 */
Brett Russ20f733e2005-09-01 18:26:17 -04001163static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1164 struct pt_regs *regs)
1165{
1166 struct ata_host_set *host_set = dev_instance;
1167 unsigned int hc, handled = 0, n_hcs;
Brett Russ31961942005-09-30 01:36:00 -04001168 void __iomem *mmio = host_set->mmio_base;
Brett Russ20f733e2005-09-01 18:26:17 -04001169 u32 irq_stat;
1170
Brett Russ20f733e2005-09-01 18:26:17 -04001171 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001172
1173 /* check the cases where we either have nothing pending or have read
1174 * a bogus register value which can indicate HW removal or PCI fault
1175 */
1176 if (!irq_stat || (0xffffffffU == irq_stat)) {
1177 return IRQ_NONE;
1178 }
1179
Brett Russ31961942005-09-30 01:36:00 -04001180 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001181 spin_lock(&host_set->lock);
1182
1183 for (hc = 0; hc < n_hcs; hc++) {
1184 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1185 if (relevant) {
1186 mv_host_intr(host_set, relevant, hc);
Brett Russ31961942005-09-30 01:36:00 -04001187 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001188 }
1189 }
1190 if (PCI_ERR & irq_stat) {
Brett Russ31961942005-09-30 01:36:00 -04001191 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1192 readl(mmio + PCI_IRQ_CAUSE_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04001193
Brett Russafb0edd2005-10-05 17:08:42 -04001194 DPRINTK("All regs @ PCI error\n");
Brett Russ31961942005-09-30 01:36:00 -04001195 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1196
1197 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1198 handled++;
1199 }
Brett Russ20f733e2005-09-01 18:26:17 -04001200 spin_unlock(&host_set->lock);
1201
1202 return IRQ_RETVAL(handled);
1203}
1204
Brett Russ05b308e2005-10-05 17:08:53 -04001205/**
Brett Russ05b308e2005-10-05 17:08:53 -04001206 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1207 * @ap: ATA channel to manipulate
1208 *
1209 * Part of this is taken from __sata_phy_reset and modified to
1210 * not sleep since this routine gets called from interrupt level.
1211 *
1212 * LOCKING:
1213 * Inherited from caller. This is coded to safe to call at
1214 * interrupt level, i.e. it does not sleep.
Brett Russ31961942005-09-30 01:36:00 -04001215 */
Brett Russ20f733e2005-09-01 18:26:17 -04001216static void mv_phy_reset(struct ata_port *ap)
1217{
1218 void __iomem *port_mmio = mv_ap_base(ap);
1219 struct ata_taskfile tf;
1220 struct ata_device *dev = &ap->device[0];
Brett Russ31961942005-09-30 01:36:00 -04001221 unsigned long timeout;
Brett Russ20f733e2005-09-01 18:26:17 -04001222
1223 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1224
Brett Russ31961942005-09-30 01:36:00 -04001225 mv_stop_dma(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001226
Brett Russ31961942005-09-30 01:36:00 -04001227 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001228 udelay(25); /* allow reset propagation */
1229
1230 /* Spec never mentions clearing the bit. Marvell's driver does
1231 * clear the bit, however.
1232 */
Brett Russ31961942005-09-30 01:36:00 -04001233 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001234
Brett Russ31961942005-09-30 01:36:00 -04001235 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1236 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1237 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04001238
1239 /* proceed to init communications via the scr_control reg */
Brett Russ31961942005-09-30 01:36:00 -04001240 scr_write_flush(ap, SCR_CONTROL, 0x301);
1241 mdelay(1);
1242 scr_write_flush(ap, SCR_CONTROL, 0x300);
1243 timeout = jiffies + (HZ * 1);
1244 do {
1245 mdelay(10);
1246 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1247 break;
1248 } while (time_before(jiffies, timeout));
Brett Russ20f733e2005-09-01 18:26:17 -04001249
Brett Russ31961942005-09-30 01:36:00 -04001250 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1251 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1252 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1253
1254 if (sata_dev_present(ap)) {
1255 ata_port_probe(ap);
1256 } else {
1257 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1258 ap->id, scr_read(ap, SCR_STATUS));
1259 ata_port_disable(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001260 return;
1261 }
Brett Russ31961942005-09-30 01:36:00 -04001262 ap->cbl = ATA_CBL_SATA;
Brett Russ20f733e2005-09-01 18:26:17 -04001263
1264 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
1265 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
1266 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
1267 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
1268
1269 dev->class = ata_dev_classify(&tf);
1270 if (!ata_dev_present(dev)) {
1271 VPRINTK("Port disabled post-sig: No device present.\n");
1272 ata_port_disable(ap);
1273 }
1274 VPRINTK("EXIT\n");
1275}
1276
Brett Russ05b308e2005-10-05 17:08:53 -04001277/**
1278 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1279 * @ap: ATA channel to manipulate
1280 *
1281 * Intent is to clear all pending error conditions, reset the
1282 * chip/bus, fail the command, and move on.
1283 *
1284 * LOCKING:
1285 * This routine holds the host_set lock while failing the command.
1286 */
Brett Russ31961942005-09-30 01:36:00 -04001287static void mv_eng_timeout(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04001288{
Brett Russ31961942005-09-30 01:36:00 -04001289 struct ata_queued_cmd *qc;
1290 unsigned long flags;
1291
1292 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1293 DPRINTK("All regs @ start of eng_timeout\n");
1294 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1295 to_pci_dev(ap->host_set->dev));
1296
1297 qc = ata_qc_from_tag(ap, ap->active_tag);
1298 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1299 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1300 &qc->scsicmd->cmnd);
1301
1302 mv_err_intr(ap);
1303 mv_phy_reset(ap);
1304
1305 if (!qc) {
1306 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1307 ap->id);
1308 } else {
1309 /* hack alert! We cannot use the supplied completion
1310 * function from inside the ->eh_strategy_handler() thread.
1311 * libata is the only user of ->eh_strategy_handler() in
1312 * any kernel, so the default scsi_done() assumes it is
1313 * not being called from the SCSI EH.
1314 */
1315 spin_lock_irqsave(&ap->host_set->lock, flags);
1316 qc->scsidone = scsi_finish_command;
Jeff Garzika7dac442005-10-30 04:44:42 -05001317 ata_qc_complete(qc, AC_ERR_OTHER);
Brett Russ31961942005-09-30 01:36:00 -04001318 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1319 }
1320}
1321
Brett Russ05b308e2005-10-05 17:08:53 -04001322/**
1323 * mv_port_init - Perform some early initialization on a single port.
1324 * @port: libata data structure storing shadow register addresses
1325 * @port_mmio: base address of the port
1326 *
1327 * Initialize shadow register mmio addresses, clear outstanding
1328 * interrupts on the port, and unmask interrupts for the future
1329 * start of the port.
1330 *
1331 * LOCKING:
1332 * Inherited from caller.
1333 */
Brett Russ31961942005-09-30 01:36:00 -04001334static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
1335{
1336 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
1337 unsigned serr_ofs;
1338
1339 /* PIO related setup
1340 */
1341 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
1342 port->error_addr =
1343 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
1344 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
1345 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
1346 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1347 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1348 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1349 port->status_addr =
1350 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1351 /* special case: control/altstatus doesn't have ATA_REG_ address */
1352 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1353
1354 /* unused: */
Brett Russ20f733e2005-09-01 18:26:17 -04001355 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
1356
Brett Russ31961942005-09-30 01:36:00 -04001357 /* Clear any currently outstanding port interrupt conditions */
1358 serr_ofs = mv_scr_offset(SCR_ERROR);
1359 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1360 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1361
Brett Russ20f733e2005-09-01 18:26:17 -04001362 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04001363 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001364
1365 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04001366 readl(port_mmio + EDMA_CFG_OFS),
1367 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
1368 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04001369}
1370
Brett Russ05b308e2005-10-05 17:08:53 -04001371/**
1372 * mv_host_init - Perform some early initialization of the host.
1373 * @probe_ent: early data struct representing the host
1374 *
1375 * If possible, do an early global reset of the host. Then do
1376 * our port init and clear/unmask all/relevant host interrupts.
1377 *
1378 * LOCKING:
1379 * Inherited from caller.
1380 */
Brett Russ20f733e2005-09-01 18:26:17 -04001381static int mv_host_init(struct ata_probe_ent *probe_ent)
1382{
1383 int rc = 0, n_hc, port, hc;
1384 void __iomem *mmio = probe_ent->mmio_base;
1385 void __iomem *port_mmio;
1386
Brett Russ31961942005-09-30 01:36:00 -04001387 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1388 mv_global_soft_reset(probe_ent->mmio_base)) {
Brett Russ20f733e2005-09-01 18:26:17 -04001389 rc = 1;
1390 goto done;
1391 }
1392
1393 n_hc = mv_get_hc_count(probe_ent->host_flags);
1394 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
1395
1396 for (port = 0; port < probe_ent->n_ports; port++) {
1397 port_mmio = mv_port_base(mmio, port);
Brett Russ31961942005-09-30 01:36:00 -04001398 mv_port_init(&probe_ent->port[port], port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04001399 }
1400
1401 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04001402 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1403
1404 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1405 "(before clear)=0x%08x\n", hc,
1406 readl(hc_mmio + HC_CFG_OFS),
1407 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1408
1409 /* Clear any currently outstanding hc interrupt conditions */
1410 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001411 }
1412
Brett Russ31961942005-09-30 01:36:00 -04001413 /* Clear any currently outstanding host interrupt conditions */
1414 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1415
1416 /* and unmask interrupt generation for host regs */
1417 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1418 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001419
1420 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
1421 "PCI int cause/mask=0x%08x/0x%08x\n",
1422 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
1423 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
1424 readl(mmio + PCI_IRQ_CAUSE_OFS),
1425 readl(mmio + PCI_IRQ_MASK_OFS));
Brett Russ31961942005-09-30 01:36:00 -04001426done:
Brett Russ20f733e2005-09-01 18:26:17 -04001427 return rc;
1428}
1429
Brett Russ05b308e2005-10-05 17:08:53 -04001430/**
1431 * mv_print_info - Dump key info to kernel log for perusal.
1432 * @probe_ent: early data struct representing the host
1433 *
1434 * FIXME: complete this.
1435 *
1436 * LOCKING:
1437 * Inherited from caller.
1438 */
Brett Russ31961942005-09-30 01:36:00 -04001439static void mv_print_info(struct ata_probe_ent *probe_ent)
1440{
1441 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1442 struct mv_host_priv *hpriv = probe_ent->private_data;
1443 u8 rev_id, scc;
1444 const char *scc_s;
1445
1446 /* Use this to determine the HW stepping of the chip so we know
1447 * what errata to workaround
1448 */
1449 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1450
1451 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1452 if (scc == 0)
1453 scc_s = "SCSI";
1454 else if (scc == 0x01)
1455 scc_s = "RAID";
1456 else
1457 scc_s = "unknown";
1458
Jeff Garzika9524a72005-10-30 14:39:11 -05001459 dev_printk(KERN_INFO, &pdev->dev,
1460 "%u slots %u ports %s mode IRQ via %s\n",
1461 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04001462 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1463}
1464
Brett Russ05b308e2005-10-05 17:08:53 -04001465/**
1466 * mv_init_one - handle a positive probe of a Marvell host
1467 * @pdev: PCI device found
1468 * @ent: PCI device ID entry for the matched host
1469 *
1470 * LOCKING:
1471 * Inherited from caller.
1472 */
Brett Russ20f733e2005-09-01 18:26:17 -04001473static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1474{
1475 static int printed_version = 0;
1476 struct ata_probe_ent *probe_ent = NULL;
1477 struct mv_host_priv *hpriv;
1478 unsigned int board_idx = (unsigned int)ent->driver_data;
1479 void __iomem *mmio_base;
Brett Russ31961942005-09-30 01:36:00 -04001480 int pci_dev_busy = 0, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04001481
Jeff Garzika9524a72005-10-30 14:39:11 -05001482 if (!printed_version++)
1483 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04001484
Brett Russ20f733e2005-09-01 18:26:17 -04001485 rc = pci_enable_device(pdev);
1486 if (rc) {
1487 return rc;
1488 }
1489
1490 rc = pci_request_regions(pdev, DRV_NAME);
1491 if (rc) {
1492 pci_dev_busy = 1;
1493 goto err_out;
1494 }
1495
Brett Russ20f733e2005-09-01 18:26:17 -04001496 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1497 if (probe_ent == NULL) {
1498 rc = -ENOMEM;
1499 goto err_out_regions;
1500 }
1501
1502 memset(probe_ent, 0, sizeof(*probe_ent));
1503 probe_ent->dev = pci_dev_to_dev(pdev);
1504 INIT_LIST_HEAD(&probe_ent->node);
1505
Brett Russ31961942005-09-30 01:36:00 -04001506 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
Brett Russ20f733e2005-09-01 18:26:17 -04001507 if (mmio_base == NULL) {
1508 rc = -ENOMEM;
1509 goto err_out_free_ent;
1510 }
1511
1512 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1513 if (!hpriv) {
1514 rc = -ENOMEM;
1515 goto err_out_iounmap;
1516 }
1517 memset(hpriv, 0, sizeof(*hpriv));
1518
1519 probe_ent->sht = mv_port_info[board_idx].sht;
1520 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
1521 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
1522 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
1523 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
1524
1525 probe_ent->irq = pdev->irq;
1526 probe_ent->irq_flags = SA_SHIRQ;
1527 probe_ent->mmio_base = mmio_base;
1528 probe_ent->private_data = hpriv;
1529
1530 /* initialize adapter */
1531 rc = mv_host_init(probe_ent);
1532 if (rc) {
1533 goto err_out_hpriv;
1534 }
Brett Russ20f733e2005-09-01 18:26:17 -04001535
Brett Russ31961942005-09-30 01:36:00 -04001536 /* Enable interrupts */
1537 if (pci_enable_msi(pdev) == 0) {
1538 hpriv->hp_flags |= MV_HP_FLAG_MSI;
1539 } else {
1540 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04001541 }
1542
Brett Russ31961942005-09-30 01:36:00 -04001543 mv_dump_pci_cfg(pdev, 0x68);
1544 mv_print_info(probe_ent);
Brett Russ20f733e2005-09-01 18:26:17 -04001545
Brett Russ31961942005-09-30 01:36:00 -04001546 if (ata_device_add(probe_ent) == 0) {
1547 rc = -ENODEV; /* No devices discovered */
1548 goto err_out_dev_add;
1549 }
1550
1551 kfree(probe_ent);
Brett Russ20f733e2005-09-01 18:26:17 -04001552 return 0;
1553
Brett Russ31961942005-09-30 01:36:00 -04001554err_out_dev_add:
1555 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1556 pci_disable_msi(pdev);
1557 } else {
1558 pci_intx(pdev, 0);
1559 }
1560err_out_hpriv:
Brett Russ20f733e2005-09-01 18:26:17 -04001561 kfree(hpriv);
Brett Russ31961942005-09-30 01:36:00 -04001562err_out_iounmap:
1563 pci_iounmap(pdev, mmio_base);
1564err_out_free_ent:
Brett Russ20f733e2005-09-01 18:26:17 -04001565 kfree(probe_ent);
Brett Russ31961942005-09-30 01:36:00 -04001566err_out_regions:
Brett Russ20f733e2005-09-01 18:26:17 -04001567 pci_release_regions(pdev);
Brett Russ31961942005-09-30 01:36:00 -04001568err_out:
Brett Russ20f733e2005-09-01 18:26:17 -04001569 if (!pci_dev_busy) {
1570 pci_disable_device(pdev);
1571 }
1572
1573 return rc;
1574}
1575
1576static int __init mv_init(void)
1577{
1578 return pci_module_init(&mv_pci_driver);
1579}
1580
1581static void __exit mv_exit(void)
1582{
1583 pci_unregister_driver(&mv_pci_driver);
1584}
1585
1586MODULE_AUTHOR("Brett Russ");
1587MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
1588MODULE_LICENSE("GPL");
1589MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
1590MODULE_VERSION(DRV_VERSION);
1591
1592module_init(mv_init);
1593module_exit(mv_exit);