blob: a0ec4aaa24a2f233264fcda751bc748be659a997 [file] [log] [blame]
Andy Yan20b09c22009-05-08 17:46:40 -04001/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07006 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
Andy Yan20b09c22009-05-08 17:46:40 -04007 *
8 * This file is licensed under GPLv2.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
13 * License.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24*/
25
26#include "mv_sas.h"
27#include "mv_94xx.h"
28#include "mv_chips.h"
29
30static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
31{
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34 u32 phy_status;
35
36 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
37 reg = mvs_read_port_vsr_data(mvi, i);
38 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
39 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
40 switch (phy_status) {
41 case 0x10:
42 phy->phy_type |= PORT_TYPE_SAS;
43 break;
44 case 0x1d:
45 default:
46 phy->phy_type |= PORT_TYPE_SATA;
47 break;
48 }
49}
50
Xiangliang Yuf1f82a92011-05-24 22:28:31 +080051void set_phy_tuning(struct mvs_info *mvi, int phy_id,
52 struct phy_tuning phy_tuning)
53{
54 u32 tmp, setting_0 = 0, setting_1 = 0;
55 u8 i;
56
57 /* Remap information for B0 chip:
58 *
59 * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60 * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61 * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62 * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63 * R10h -> R120h[15:0] (Generation 2 Setting 1)
64 * R11h -> R120h[31:16] (Generation 3 Setting 0)
65 * R12h -> R124h[15:0] (Generation 3 Setting 1)
66 * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
67 */
68
69 /* A0 has a different set of registers */
70 if (mvi->pdev->revision == VANIR_A0_REV)
71 return;
72
73 for (i = 0; i < 3; i++) {
74 /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
75 switch (i) {
76 case 0:
77 setting_0 = GENERATION_1_SETTING;
78 setting_1 = GENERATION_1_2_SETTING;
79 break;
80 case 1:
81 setting_0 = GENERATION_1_2_SETTING;
82 setting_1 = GENERATION_2_3_SETTING;
83 break;
84 case 2:
85 setting_0 = GENERATION_2_3_SETTING;
86 setting_1 = GENERATION_3_4_SETTING;
87 break;
88 }
89
90 /* Set:
91 *
92 * Transmitter Emphasis Enable
93 * Transmitter Emphasis Amplitude
94 * Transmitter Amplitude
95 */
96 mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
97 tmp = mvs_read_port_vsr_data(mvi, phy_id);
98 tmp &= ~(0xFBE << 16);
99 tmp |= (((phy_tuning.trans_emp_en << 11) |
100 (phy_tuning.trans_emp_amp << 7) |
101 (phy_tuning.trans_amp << 1)) << 16);
102 mvs_write_port_vsr_data(mvi, phy_id, tmp);
103
104 /* Set Transmitter Amplitude Adjust */
105 mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
106 tmp = mvs_read_port_vsr_data(mvi, phy_id);
107 tmp &= ~(0xC000);
108 tmp |= (phy_tuning.trans_amp_adj << 14);
109 mvs_write_port_vsr_data(mvi, phy_id, tmp);
110 }
111}
112
113void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
114 struct ffe_control ffe)
115{
116 u32 tmp;
117
118 /* Don't run this if A0/B0 */
119 if ((mvi->pdev->revision == VANIR_A0_REV)
120 || (mvi->pdev->revision == VANIR_B0_REV))
121 return;
122
123 /* FFE Resistor and Capacitor */
124 /* R10Ch DFE Resolution Control/Squelch and FFE Setting
125 *
126 * FFE_FORCE [7]
127 * FFE_RES_SEL [6:4]
128 * FFE_CAP_SEL [3:0]
129 */
130 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
131 tmp = mvs_read_port_vsr_data(mvi, phy_id);
132 tmp &= ~0xFF;
133
134 /* Read from HBA_Info_Page */
135 tmp |= ((0x1 << 7) |
136 (ffe.ffe_rss_sel << 4) |
137 (ffe.ffe_cap_sel << 0));
138
139 mvs_write_port_vsr_data(mvi, phy_id, tmp);
140
141 /* R064h PHY Mode Register 1
142 *
143 * DFE_DIS 18
144 */
145 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
146 tmp = mvs_read_port_vsr_data(mvi, phy_id);
147 tmp &= ~0x40001;
148 /* Hard coding */
149 /* No defines in HBA_Info_Page */
150 tmp |= (0 << 18);
151 mvs_write_port_vsr_data(mvi, phy_id, tmp);
152
153 /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
154 *
155 * DFE_UPDATE_EN [11:6]
156 * DFE_FX_FORCE [5:0]
157 */
158 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
159 tmp = mvs_read_port_vsr_data(mvi, phy_id);
160 tmp &= ~0xFFF;
161 /* Hard coding */
162 /* No defines in HBA_Info_Page */
163 tmp |= ((0x3F << 6) | (0x0 << 0));
164 mvs_write_port_vsr_data(mvi, phy_id, tmp);
165
166 /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
167 *
168 * FFE_TRAIN_EN 3
169 */
170 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
171 tmp = mvs_read_port_vsr_data(mvi, phy_id);
172 tmp &= ~0x8;
173 /* Hard coding */
174 /* No defines in HBA_Info_Page */
175 tmp |= (0 << 3);
176 mvs_write_port_vsr_data(mvi, phy_id, tmp);
177}
178
179/*Notice: this function must be called when phy is disabled*/
180void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
181{
182 union reg_phy_cfg phy_cfg, phy_cfg_tmp;
183 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
184 phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
185 phy_cfg.v = 0;
186 phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
187 phy_cfg.u.sas_support = 1;
188 phy_cfg.u.sata_support = 1;
189 phy_cfg.u.sata_host_mode = 1;
190
191 switch (rate) {
192 case 0x0:
193 /* support 1.5 Gbps */
194 phy_cfg.u.speed_support = 1;
195 phy_cfg.u.snw_3_support = 0;
196 phy_cfg.u.tx_lnk_parity = 1;
197 phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
198 break;
199 case 0x1:
200
201 /* support 1.5, 3.0 Gbps */
202 phy_cfg.u.speed_support = 3;
203 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
204 phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
205 break;
206 case 0x2:
207 default:
208 /* support 1.5, 3.0, 6.0 Gbps */
209 phy_cfg.u.speed_support = 7;
210 phy_cfg.u.snw_3_support = 1;
211 phy_cfg.u.tx_lnk_parity = 1;
212 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
213 phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
214 break;
215 }
216 mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
217}
218
219static void __devinit
220mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
221{
222 u32 temp;
223 temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
224 if (temp == 0xFFFFFFFFL) {
225 mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
226 mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
227 mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
228 }
229
230 temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
231 if (temp == 0xFFL) {
232 switch (mvi->pdev->revision) {
233 case VANIR_A0_REV:
234 case VANIR_B0_REV:
235 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
236 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
237 break;
238 case VANIR_C0_REV:
239 case VANIR_C1_REV:
240 case VANIR_C2_REV:
241 default:
242 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
243 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
244 break;
245 }
246 }
247
248 temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
249 if (temp == 0xFFL)
250 /*set default phy_rate = 6Gbps*/
251 mvi->hba_info_param.phy_rate[phy_id] = 0x2;
252
253 set_phy_tuning(mvi, phy_id,
254 mvi->hba_info_param.phy_tuning[phy_id]);
255 set_phy_ffe_tuning(mvi, phy_id,
256 mvi->hba_info_param.ffe_ctl[phy_id]);
257 set_phy_rate(mvi, phy_id,
258 mvi->hba_info_param.phy_rate[phy_id]);
259}
260
Andy Yan20b09c22009-05-08 17:46:40 -0400261static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
262{
263 void __iomem *regs = mvi->regs;
264 u32 tmp;
265
266 tmp = mr32(MVS_PCS);
267 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
268 mw32(MVS_PCS, tmp);
269}
270
271static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
272{
273 u32 tmp;
274
275 tmp = mvs_read_port_irq_stat(mvi, phy_id);
276 tmp &= ~PHYEV_RDY_CH;
277 mvs_write_port_irq_stat(mvi, phy_id, tmp);
278 if (hard) {
279 tmp = mvs_read_phy_ctl(mvi, phy_id);
280 tmp |= PHY_RST_HARD;
281 mvs_write_phy_ctl(mvi, phy_id, tmp);
282 do {
283 tmp = mvs_read_phy_ctl(mvi, phy_id);
284 } while (tmp & PHY_RST_HARD);
285 } else {
286 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
287 tmp = mvs_read_port_vsr_data(mvi, phy_id);
288 tmp |= PHY_RST;
289 mvs_write_port_vsr_data(mvi, phy_id, tmp);
290 }
291}
292
293static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
294{
295 u32 tmp;
296 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
297 tmp = mvs_read_port_vsr_data(mvi, phy_id);
298 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
299}
300
301static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
302{
Xiangliang Yuf1f82a92011-05-24 22:28:31 +0800303 u32 tmp;
304 u8 revision = 0;
305
306 revision = mvi->pdev->revision;
307 if (revision == VANIR_A0_REV) {
308 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
309 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
310 }
311 if (revision == VANIR_B0_REV) {
312 mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
313 mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
314 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
315 mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
316 }
317
Andy Yan20b09c22009-05-08 17:46:40 -0400318 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
Xiangliang Yuf1f82a92011-05-24 22:28:31 +0800319 tmp = mvs_read_port_vsr_data(mvi, phy_id);
320 tmp |= bit(0);
321 mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
Andy Yan20b09c22009-05-08 17:46:40 -0400322}
323
324static int __devinit mvs_94xx_init(struct mvs_info *mvi)
325{
326 void __iomem *regs = mvi->regs;
327 int i;
328 u32 tmp, cctl;
Xiangliang Yuf1f82a92011-05-24 22:28:31 +0800329 u8 revision;
Andy Yan20b09c22009-05-08 17:46:40 -0400330
Xiangliang Yuf1f82a92011-05-24 22:28:31 +0800331 revision = mvi->pdev->revision;
Andy Yan20b09c22009-05-08 17:46:40 -0400332 mvs_show_pcie_usage(mvi);
333 if (mvi->flags & MVF_FLAG_SOC) {
334 tmp = mr32(MVS_PHY_CTL);
335 tmp &= ~PCTL_PWR_OFF;
336 tmp |= PCTL_PHY_DSBL;
337 mw32(MVS_PHY_CTL, tmp);
338 }
339
340 /* Init Chip */
341 /* make sure RST is set; HBA_RST /should/ have done that for us */
342 cctl = mr32(MVS_CTL) & 0xFFFF;
343 if (cctl & CCTL_RST)
344 cctl &= ~CCTL_RST;
345 else
346 mw32_f(MVS_CTL, cctl | CCTL_RST);
347
348 if (mvi->flags & MVF_FLAG_SOC) {
349 tmp = mr32(MVS_PHY_CTL);
350 tmp &= ~PCTL_PWR_OFF;
351 tmp |= PCTL_COM_ON;
352 tmp &= ~PCTL_PHY_DSBL;
353 tmp |= PCTL_LINK_RST;
354 mw32(MVS_PHY_CTL, tmp);
355 msleep(100);
356 tmp &= ~PCTL_LINK_RST;
357 mw32(MVS_PHY_CTL, tmp);
358 msleep(100);
359 }
360
Xiangliang Yuf1f82a92011-05-24 22:28:31 +0800361 /* disable Multiplexing, enable phy implemented */
362 mw32(MVS_PORTS_IMP, 0xFF);
363
364 if (revision == VANIR_A0_REV) {
365 mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
366 mw32(MVS_PA_VSR_PORT, 0x00018080);
367 }
368 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
369 if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
370 /* set 6G/3G/1.5G, multiplexing, without SSC */
371 mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
372 else
373 /* set 6G/3G/1.5G, multiplexing, with and without SSC */
374 mw32(MVS_PA_VSR_PORT, 0x0084fffe);
375
376 if (revision == VANIR_B0_REV) {
377 mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
378 mw32(MVS_PA_VSR_PORT, 0x08001006);
379 mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
380 mw32(MVS_PA_VSR_PORT, 0x0000705f);
381 }
382
Andy Yan20b09c22009-05-08 17:46:40 -0400383 /* reset control */
384 mw32(MVS_PCS, 0); /* MVS_PCS */
385 mw32(MVS_STP_REG_SET_0, 0);
386 mw32(MVS_STP_REG_SET_1, 0);
387
388 /* init phys */
389 mvs_phy_hacks(mvi);
390
Andy Yan20b09c22009-05-08 17:46:40 -0400391 /* set LED blink when IO*/
Xiangliang Yua4632aa2011-05-24 22:36:02 +0800392 mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
Andy Yan20b09c22009-05-08 17:46:40 -0400393 tmp = mr32(MVS_PA_VSR_PORT);
394 tmp &= 0xFFFF00FF;
395 tmp |= 0x00003300;
396 mw32(MVS_PA_VSR_PORT, tmp);
397
398 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
399 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
400
401 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
402 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
403
404 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
405 mw32(MVS_TX_LO, mvi->tx_dma);
406 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
407
408 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
409 mw32(MVS_RX_LO, mvi->rx_dma);
410 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
411
412 for (i = 0; i < mvi->chip->n_phy; i++) {
413 mvs_94xx_phy_disable(mvi, i);
414 /* set phy local SAS address */
415 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
416 (mvi->phy[i].dev_sas_addr));
417
418 mvs_94xx_enable_xmt(mvi, i);
Xiangliang Yuf1f82a92011-05-24 22:28:31 +0800419 mvs_94xx_config_reg_from_hba(mvi, i);
Andy Yan20b09c22009-05-08 17:46:40 -0400420 mvs_94xx_phy_enable(mvi, i);
421
Xiangliang Yua4632aa2011-05-24 22:36:02 +0800422 mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
Andy Yan20b09c22009-05-08 17:46:40 -0400423 msleep(500);
424 mvs_94xx_detect_porttype(mvi, i);
425 }
426
427 if (mvi->flags & MVF_FLAG_SOC) {
428 /* set select registers */
429 writel(0x0E008000, regs + 0x000);
430 writel(0x59000008, regs + 0x004);
431 writel(0x20, regs + 0x008);
432 writel(0x20, regs + 0x00c);
433 writel(0x20, regs + 0x010);
434 writel(0x20, regs + 0x014);
435 writel(0x20, regs + 0x018);
436 writel(0x20, regs + 0x01c);
437 }
438 for (i = 0; i < mvi->chip->n_phy; i++) {
439 /* clear phy int status */
440 tmp = mvs_read_port_irq_stat(mvi, i);
441 tmp &= ~PHYEV_SIG_FIS;
442 mvs_write_port_irq_stat(mvi, i, tmp);
443
444 /* set phy int mask */
445 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
446 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
447 mvs_write_port_irq_mask(mvi, i, tmp);
448
449 msleep(100);
450 mvs_update_phyinfo(mvi, i, 1);
451 }
452
453 /* FIXME: update wide port bitmaps */
454
455 /* little endian for open address and command table, etc. */
456 /*
457 * it seems that ( from the spec ) turning on big-endian won't
458 * do us any good on big-endian machines, need further confirmation
459 */
460 cctl = mr32(MVS_CTL);
461 cctl |= CCTL_ENDIAN_CMD;
462 cctl |= CCTL_ENDIAN_DATA;
463 cctl &= ~CCTL_ENDIAN_OPEN;
464 cctl |= CCTL_ENDIAN_RSP;
465 mw32_f(MVS_CTL, cctl);
466
467 /* reset CMD queue */
468 tmp = mr32(MVS_PCS);
469 tmp |= PCS_CMD_RST;
470 mw32(MVS_PCS, tmp);
471 /* interrupt coalescing may cause missing HW interrput in some case,
472 * and the max count is 0x1ff, while our max slot is 0x200,
473 * it will make count 0.
474 */
475 tmp = 0;
476 mw32(MVS_INT_COAL, tmp);
477
Xiangliang Yu83c7b612011-05-24 22:31:47 +0800478 tmp = 0x10000 | interrupt_coalescing;
Andy Yan20b09c22009-05-08 17:46:40 -0400479 mw32(MVS_INT_COAL_TMOUT, tmp);
480
481 /* ladies and gentlemen, start your engines */
482 mw32(MVS_TX_CFG, 0);
483 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
484 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
485 /* enable CMD/CMPL_Q/RESP mode */
486 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
487 PCS_CMD_EN | PCS_CMD_STOP_ERR);
488
489 /* enable completion queue interrupt */
490 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
Xiangliang Yu534ff102011-05-24 22:26:50 +0800491 CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
Andy Yan20b09c22009-05-08 17:46:40 -0400492 tmp |= CINT_PHY_MASK;
493 mw32(MVS_INT_MASK, tmp);
494
495 /* Enable SRS interrupt */
496 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
497
498 return 0;
499}
500
501static int mvs_94xx_ioremap(struct mvs_info *mvi)
502{
503 if (!mvs_ioremap(mvi, 2, -1)) {
504 mvi->regs_ex = mvi->regs + 0x10200;
505 mvi->regs += 0x20000;
506 if (mvi->id == 1)
507 mvi->regs += 0x4000;
508 return 0;
509 }
510 return -1;
511}
512
513static void mvs_94xx_iounmap(struct mvs_info *mvi)
514{
515 if (mvi->regs) {
516 mvi->regs -= 0x20000;
517 if (mvi->id == 1)
518 mvi->regs -= 0x4000;
519 mvs_iounmap(mvi->regs);
520 }
521}
522
523static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
524{
525 void __iomem *regs = mvi->regs_ex;
526 u32 tmp;
527
528 tmp = mr32(MVS_GBL_CTL);
529 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
530 mw32(MVS_GBL_INT_STAT, tmp);
531 writel(tmp, regs + 0x0C);
532 writel(tmp, regs + 0x10);
533 writel(tmp, regs + 0x14);
534 writel(tmp, regs + 0x18);
535 mw32(MVS_GBL_CTL, tmp);
536}
537
538static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
539{
540 void __iomem *regs = mvi->regs_ex;
541 u32 tmp;
542
543 tmp = mr32(MVS_GBL_CTL);
544
545 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
546 mw32(MVS_GBL_INT_STAT, tmp);
547 writel(tmp, regs + 0x0C);
548 writel(tmp, regs + 0x10);
549 writel(tmp, regs + 0x14);
550 writel(tmp, regs + 0x18);
551 mw32(MVS_GBL_CTL, tmp);
552}
553
554static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
555{
556 void __iomem *regs = mvi->regs_ex;
557 u32 stat = 0;
558 if (!(mvi->flags & MVF_FLAG_SOC)) {
559 stat = mr32(MVS_GBL_INT_STAT);
560
561 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
562 return 0;
563 }
564 return stat;
565}
566
567static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
568{
569 void __iomem *regs = mvi->regs;
570
571 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
572 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
573 mw32_f(MVS_INT_STAT, CINT_DONE);
574 #ifndef MVS_USE_TASKLET
575 spin_lock(&mvi->lock);
576 #endif
577 mvs_int_full(mvi);
578 #ifndef MVS_USE_TASKLET
579 spin_unlock(&mvi->lock);
580 #endif
581 }
582 return IRQ_HANDLED;
583}
584
585static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
586{
587 u32 tmp;
Xiangliang Yua4632aa2011-05-24 22:36:02 +0800588 tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
589 if (tmp && 1 << (slot_idx % 32)) {
590 mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
591 mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
592 1 << (slot_idx % 32));
593 do {
594 tmp = mvs_cr32(mvi,
595 MVS_COMMAND_ACTIVE + (slot_idx >> 3));
596 } while (tmp & 1 << (slot_idx % 32));
597 }
598}
599
600void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
601{
602 void __iomem *regs = mvi->regs;
603 u32 tmp;
604
605 if (clear_all) {
606 tmp = mr32(MVS_INT_STAT_SRS_0);
607 if (tmp) {
608 mv_dprintk("check SRS 0 %08X.\n", tmp);
609 mw32(MVS_INT_STAT_SRS_0, tmp);
610 }
611 tmp = mr32(MVS_INT_STAT_SRS_1);
612 if (tmp) {
613 mv_dprintk("check SRS 1 %08X.\n", tmp);
614 mw32(MVS_INT_STAT_SRS_1, tmp);
615 }
616 } else {
617 if (reg_set > 31)
618 tmp = mr32(MVS_INT_STAT_SRS_1);
619 else
620 tmp = mr32(MVS_INT_STAT_SRS_0);
621
622 if (tmp & (1 << (reg_set % 32))) {
623 mv_dprintk("register set 0x%x was stopped.\n", reg_set);
624 if (reg_set > 31)
625 mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
626 else
627 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
628 }
629 }
Andy Yan20b09c22009-05-08 17:46:40 -0400630}
631
632static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
633 u32 tfs)
634{
635 void __iomem *regs = mvi->regs;
636 u32 tmp;
Xiangliang Yua4632aa2011-05-24 22:36:02 +0800637 mvs_94xx_clear_srs_irq(mvi, 0, 1);
Andy Yan20b09c22009-05-08 17:46:40 -0400638
Xiangliang Yua4632aa2011-05-24 22:36:02 +0800639 tmp = mr32(MVS_INT_STAT);
640 mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
Andy Yan20b09c22009-05-08 17:46:40 -0400641 tmp = mr32(MVS_PCS) | 0xFF00;
642 mw32(MVS_PCS, tmp);
643}
644
Xiangliang Yu534ff102011-05-24 22:26:50 +0800645static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
646{
647 void __iomem *regs = mvi->regs;
648 u32 err_0, err_1;
649 u8 i;
650 struct mvs_device *device;
651
652 err_0 = mr32(MVS_NON_NCQ_ERR_0);
653 err_1 = mr32(MVS_NON_NCQ_ERR_1);
654
655 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
656 err_0, err_1);
657 for (i = 0; i < 32; i++) {
658 if (err_0 & bit(i)) {
659 device = mvs_find_dev_by_reg_set(mvi, i);
660 if (device)
661 mvs_release_task(mvi, device->sas_device);
662 }
663 if (err_1 & bit(i)) {
664 device = mvs_find_dev_by_reg_set(mvi, i+32);
665 if (device)
666 mvs_release_task(mvi, device->sas_device);
667 }
668 }
669
670 mw32(MVS_NON_NCQ_ERR_0, err_0);
671 mw32(MVS_NON_NCQ_ERR_1, err_1);
672}
673
Andy Yan20b09c22009-05-08 17:46:40 -0400674static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
675{
676 void __iomem *regs = mvi->regs;
677 u32 tmp;
678 u8 reg_set = *tfs;
679
680 if (*tfs == MVS_ID_NOT_MAPPED)
681 return;
682
683 mvi->sata_reg_set &= ~bit(reg_set);
684 if (reg_set < 32) {
685 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
686 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
687 if (tmp)
688 mw32(MVS_INT_STAT_SRS_0, tmp);
689 } else {
690 w_reg_set_enable(reg_set, mvi->sata_reg_set);
691 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
692 if (tmp)
693 mw32(MVS_INT_STAT_SRS_1, tmp);
694 }
695
696 *tfs = MVS_ID_NOT_MAPPED;
697
698 return;
699}
700
701static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
702{
703 int i;
704 void __iomem *regs = mvi->regs;
705
706 if (*tfs != MVS_ID_NOT_MAPPED)
707 return 0;
708
709 i = mv_ffc64(mvi->sata_reg_set);
710 if (i > 32) {
711 mvi->sata_reg_set |= bit(i);
712 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
713 *tfs = i;
714 return 0;
715 } else if (i >= 0) {
716 mvi->sata_reg_set |= bit(i);
717 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
718 *tfs = i;
719 return 0;
720 }
721 return MVS_ID_NOT_MAPPED;
722}
723
724static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
725{
726 int i;
727 struct scatterlist *sg;
728 struct mvs_prd *buf_prd = prd;
729 for_each_sg(scatter, sg, nr, i) {
730 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
731 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
732 buf_prd++;
733 }
734}
735
736static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
737{
738 u32 phy_st;
739 phy_st = mvs_read_phy_ctl(mvi, i);
740 if (phy_st & PHY_READY_MASK) /* phy ready */
741 return 1;
742 return 0;
743}
744
745static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
746 struct sas_identify_frame *id)
747{
748 int i;
749 u32 id_frame[7];
750
751 for (i = 0; i < 7; i++) {
752 mvs_write_port_cfg_addr(mvi, port_id,
753 CONFIG_ID_FRAME0 + i * 4);
754 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
755 }
756 memcpy(id, id_frame, 28);
757}
758
759static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
760 struct sas_identify_frame *id)
761{
762 int i;
763 u32 id_frame[7];
764
765 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
766 for (i = 0; i < 7; i++) {
767 mvs_write_port_cfg_addr(mvi, port_id,
768 CONFIG_ATT_ID_FRAME0 + i * 4);
769 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
770 mv_dprintk("94xx phy %d atta frame %d %x.\n",
771 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
772 }
773 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
774 memcpy(id, id_frame, 28);
775}
776
777static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
778{
779 u32 att_dev_info = 0;
780
781 att_dev_info |= id->dev_type;
782 if (id->stp_iport)
783 att_dev_info |= PORT_DEV_STP_INIT;
784 if (id->smp_iport)
785 att_dev_info |= PORT_DEV_SMP_INIT;
786 if (id->ssp_iport)
787 att_dev_info |= PORT_DEV_SSP_INIT;
788 if (id->stp_tport)
789 att_dev_info |= PORT_DEV_STP_TRGT;
790 if (id->smp_tport)
791 att_dev_info |= PORT_DEV_SMP_TRGT;
792 if (id->ssp_tport)
793 att_dev_info |= PORT_DEV_SSP_TRGT;
794
795 att_dev_info |= (u32)id->phy_id<<24;
796 return att_dev_info;
797}
798
799static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
800{
801 return mvs_94xx_make_dev_info(id);
802}
803
804static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
805 struct sas_identify_frame *id)
806{
807 struct mvs_phy *phy = &mvi->phy[i];
808 struct asd_sas_phy *sas_phy = &phy->sas_phy;
809 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
810 sas_phy->linkrate =
811 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
812 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
813 sas_phy->linkrate += 0x8;
814 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
815 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
816 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
817 mvs_94xx_get_dev_identify_frame(mvi, i, id);
818 phy->dev_info = mvs_94xx_make_dev_info(id);
819
820 if (phy->phy_type & PORT_TYPE_SAS) {
821 mvs_94xx_get_att_identify_frame(mvi, i, id);
822 phy->att_dev_info = mvs_94xx_make_att_info(id);
823 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
824 } else {
825 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
826 }
827
828}
829
830void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
831 struct sas_phy_linkrates *rates)
832{
Xiangliang Yua4632aa2011-05-24 22:36:02 +0800833 u32 lrmax = 0;
834 u32 tmp;
835
836 tmp = mvs_read_phy_ctl(mvi, phy_id);
837 lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
838
839 if (lrmax) {
840 tmp &= ~(0x3 << 12);
841 tmp |= lrmax;
842 }
843 mvs_write_phy_ctl(mvi, phy_id, tmp);
844 mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
Andy Yan20b09c22009-05-08 17:46:40 -0400845}
846
847static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
848{
849 u32 tmp;
850 void __iomem *regs = mvi->regs;
851 tmp = mr32(MVS_STP_REG_SET_0);
852 mw32(MVS_STP_REG_SET_0, 0);
853 mw32(MVS_STP_REG_SET_0, tmp);
854 tmp = mr32(MVS_STP_REG_SET_1);
855 mw32(MVS_STP_REG_SET_1, 0);
856 mw32(MVS_STP_REG_SET_1, tmp);
857}
858
859
860u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
861{
862 void __iomem *regs = mvi->regs_ex - 0x10200;
863 return mr32(SPI_RD_DATA_REG_94XX);
864}
865
866void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
867{
868 void __iomem *regs = mvi->regs_ex - 0x10200;
869 mw32(SPI_RD_DATA_REG_94XX, data);
870}
871
872
873int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
874 u32 *dwCmd,
875 u8 cmd,
876 u8 read,
877 u8 length,
878 u32 addr
879 )
880{
881 void __iomem *regs = mvi->regs_ex - 0x10200;
882 u32 dwTmp;
883
884 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
885 if (read)
886 dwTmp |= SPI_CTRL_READ_94XX;
887
888 if (addr != MV_MAX_U32) {
889 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
890 dwTmp |= SPI_ADDR_VLD_94XX;
891 }
892
893 *dwCmd = dwTmp;
894 return 0;
895}
896
897
898int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
899{
900 void __iomem *regs = mvi->regs_ex - 0x10200;
901 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
902
903 return 0;
904}
905
906int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
907{
908 void __iomem *regs = mvi->regs_ex - 0x10200;
909 u32 i, dwTmp;
910
911 for (i = 0; i < timeout; i++) {
912 dwTmp = mr32(SPI_CTRL_REG_94XX);
913 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
914 return 0;
915 msleep(10);
916 }
917
918 return -1;
919}
920
Xiangliang Yu8882f082011-05-24 22:33:11 +0800921void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
922 int buf_len, int from, void *prd)
Andy Yan20b09c22009-05-08 17:46:40 -0400923{
924 int i;
925 struct mvs_prd *buf_prd = prd;
Xiangliang Yu8882f082011-05-24 22:33:11 +0800926 dma_addr_t buf_dma;
Andy Yan20b09c22009-05-08 17:46:40 -0400927 buf_prd += from;
Xiangliang Yu8882f082011-05-24 22:33:11 +0800928
929 if ((mvi->pdev->revision == VANIR_A0_REV) ||
930 (mvi->pdev->revision == VANIR_B0_REV))
931 buf_dma = (phy_mask <= 0x08) ?
932 mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
933 else
934 return;
935
Andy Yan20b09c22009-05-08 17:46:40 -0400936 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
937 buf_prd->addr = cpu_to_le64(buf_dma);
938 buf_prd->im_len.len = cpu_to_le32(buf_len);
939 ++buf_prd;
940 }
941}
Andy Yan20b09c22009-05-08 17:46:40 -0400942
Xiangliang Yu83c7b612011-05-24 22:31:47 +0800943static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
944{
945 void __iomem *regs = mvi->regs;
946 u32 tmp = 0;
947 /* interrupt coalescing may cause missing HW interrput in some case,
948 * and the max count is 0x1ff, while our max slot is 0x200,
949 * it will make count 0.
950 */
951 if (time == 0) {
952 mw32(MVS_INT_COAL, 0);
953 mw32(MVS_INT_COAL_TMOUT, 0x10000);
954 } else {
955 if (MVS_CHIP_SLOT_SZ > 0x1ff)
956 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
957 else
958 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
959
960 tmp = 0x10000 | time;
961 mw32(MVS_INT_COAL_TMOUT, tmp);
962 }
963
964}
965
Andy Yan20b09c22009-05-08 17:46:40 -0400966const struct mvs_dispatch mvs_94xx_dispatch = {
967 "mv94xx",
968 mvs_94xx_init,
969 NULL,
970 mvs_94xx_ioremap,
971 mvs_94xx_iounmap,
972 mvs_94xx_isr,
973 mvs_94xx_isr_status,
974 mvs_94xx_interrupt_enable,
975 mvs_94xx_interrupt_disable,
976 mvs_read_phy_ctl,
977 mvs_write_phy_ctl,
978 mvs_read_port_cfg_data,
979 mvs_write_port_cfg_data,
980 mvs_write_port_cfg_addr,
981 mvs_read_port_vsr_data,
982 mvs_write_port_vsr_data,
983 mvs_write_port_vsr_addr,
984 mvs_read_port_irq_stat,
985 mvs_write_port_irq_stat,
986 mvs_read_port_irq_mask,
987 mvs_write_port_irq_mask,
Andy Yan20b09c22009-05-08 17:46:40 -0400988 mvs_94xx_command_active,
Srinivas9dc9fd92010-02-15 00:00:00 -0600989 mvs_94xx_clear_srs_irq,
Andy Yan20b09c22009-05-08 17:46:40 -0400990 mvs_94xx_issue_stop,
991 mvs_start_delivery,
992 mvs_rx_update,
993 mvs_int_full,
994 mvs_94xx_assign_reg_set,
995 mvs_94xx_free_reg_set,
996 mvs_get_prd_size,
997 mvs_get_prd_count,
998 mvs_94xx_make_prd,
999 mvs_94xx_detect_porttype,
1000 mvs_94xx_oob_done,
1001 mvs_94xx_fix_phy_info,
1002 NULL,
1003 mvs_94xx_phy_set_link_rate,
1004 mvs_hw_max_link_rate,
1005 mvs_94xx_phy_disable,
1006 mvs_94xx_phy_enable,
1007 mvs_94xx_phy_reset,
1008 NULL,
1009 mvs_94xx_clear_active_cmds,
1010 mvs_94xx_spi_read_data,
1011 mvs_94xx_spi_write_data,
1012 mvs_94xx_spi_buildcmd,
1013 mvs_94xx_spi_issuecmd,
1014 mvs_94xx_spi_waitdataready,
Andy Yan20b09c22009-05-08 17:46:40 -04001015 mvs_94xx_fix_dma,
Xiangliang Yu83c7b612011-05-24 22:31:47 +08001016 mvs_94xx_tune_interrupt,
Xiangliang Yu534ff102011-05-24 22:26:50 +08001017 mvs_94xx_non_spec_ncq_error,
Andy Yan20b09c22009-05-08 17:46:40 -04001018};
1019