blob: 21acfb5449a746a17f9f5c9b1ce10b087fc07045 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Jerome Glissec010f802009-09-30 22:09:06 +020028/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038#include "drmP.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020039#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000040#include "radeon_asic.h"
Jerome Glissec010f802009-09-30 22:09:06 +020041#include "atom.h"
42#include "rs600d.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020043
Dave Airlie3f7dc91a2009-08-27 11:10:15 +100044#include "rs600_reg_safe.h"
45
Jerome Glisse771fe6b2009-06-05 14:42:42 +020046void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020048
Alex Deucher6f34be52010-11-21 10:59:01 -050049void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
50{
Alex Deucher6f34be52010-11-21 10:59:01 -050051 /* enable the pflip int */
52 radeon_irq_kms_pflip_irq_get(rdev, crtc);
53}
54
55void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
56{
57 /* disable the pflip int */
58 radeon_irq_kms_pflip_irq_put(rdev, crtc);
59}
60
61u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucher6a824122011-11-28 14:49:26 -050065 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -050066
67 /* Lock the graphics update lock */
68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
69 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
70
71 /* update the scanout addresses */
72 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
73 (u32)crtc_base);
74 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
75 (u32)crtc_base);
76
77 /* Wait for update_pending to go high. */
Alex Deucher6a824122011-11-28 14:49:26 -050078 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
80 break;
81 udelay(1);
82 }
Alex Deucher6f34be52010-11-21 10:59:01 -050083 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
84
85 /* Unlock the lock, so double-buffering can take place inside vblank */
86 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
87 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
88
89 /* Return current update_pending status: */
90 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
91}
92
Alex Deucher49e02b72010-04-23 17:57:27 -040093void rs600_pm_misc(struct radeon_device *rdev)
94{
Alex Deucher49e02b72010-04-23 17:57:27 -040095 int requested_index = rdev->pm.requested_power_state_index;
96 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
97 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
98 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
Alex Deucher536fcd52010-04-29 16:33:38 -040099 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
Alex Deucher49e02b72010-04-23 17:57:27 -0400100
101 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
102 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
103 tmp = RREG32(voltage->gpio.reg);
104 if (voltage->active_high)
105 tmp |= voltage->gpio.mask;
106 else
107 tmp &= ~(voltage->gpio.mask);
108 WREG32(voltage->gpio.reg, tmp);
109 if (voltage->delay)
110 udelay(voltage->delay);
111 } else {
112 tmp = RREG32(voltage->gpio.reg);
113 if (voltage->active_high)
114 tmp &= ~voltage->gpio.mask;
115 else
116 tmp |= voltage->gpio.mask;
117 WREG32(voltage->gpio.reg, tmp);
118 if (voltage->delay)
119 udelay(voltage->delay);
120 }
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400121 } else if (voltage->type == VOLTAGE_VDDC)
Alex Deucher8a83ec52011-04-12 14:49:23 -0400122 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher49e02b72010-04-23 17:57:27 -0400123
124 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
125 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
126 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
127 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
128 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
129 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
130 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
131 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
132 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
133 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
134 }
135 } else {
136 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
137 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
138 }
139 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
140
141 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
142 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
143 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
144 if (voltage->delay) {
145 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
146 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
147 } else
148 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
149 } else
150 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
151 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
152
153 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
154 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
155 hdp_dyn_cntl &= ~HDP_FORCEON;
156 else
157 hdp_dyn_cntl |= HDP_FORCEON;
158 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400159#if 0
160 /* mc_host_dyn seems to cause hangs from time to time */
Alex Deucher49e02b72010-04-23 17:57:27 -0400161 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
162 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
163 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
164 else
165 mc_host_dyn_cntl |= MC_HOST_FORCEON;
166 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400167#endif
168 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
169 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
170 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
171 else
172 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
173 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
Alex Deucher49e02b72010-04-23 17:57:27 -0400174
175 /* set pcie lanes */
176 if ((rdev->flags & RADEON_IS_PCIE) &&
177 !(rdev->flags & RADEON_IS_IGP) &&
178 rdev->asic->set_pcie_lanes &&
179 (ps->pcie_lanes !=
180 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
181 radeon_set_pcie_lanes(rdev,
182 ps->pcie_lanes);
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400183 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
Alex Deucher49e02b72010-04-23 17:57:27 -0400184 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400185}
186
187void rs600_pm_prepare(struct radeon_device *rdev)
188{
189 struct drm_device *ddev = rdev->ddev;
190 struct drm_crtc *crtc;
191 struct radeon_crtc *radeon_crtc;
192 u32 tmp;
193
194 /* disable any active CRTCs */
195 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
196 radeon_crtc = to_radeon_crtc(crtc);
197 if (radeon_crtc->enabled) {
198 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
199 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
200 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
201 }
202 }
203}
204
205void rs600_pm_finish(struct radeon_device *rdev)
206{
207 struct drm_device *ddev = rdev->ddev;
208 struct drm_crtc *crtc;
209 struct radeon_crtc *radeon_crtc;
210 u32 tmp;
211
212 /* enable any active CRTCs */
213 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
214 radeon_crtc = to_radeon_crtc(crtc);
215 if (radeon_crtc->enabled) {
216 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
217 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
218 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
219 }
220 }
221}
222
Alex Deucherdcfdd402009-12-04 15:04:19 -0500223/* hpd for digital panel detect/disconnect */
224bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
225{
226 u32 tmp;
227 bool connected = false;
228
229 switch (hpd) {
230 case RADEON_HPD_1:
231 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
232 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
233 connected = true;
234 break;
235 case RADEON_HPD_2:
236 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
237 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
238 connected = true;
239 break;
240 default:
241 break;
242 }
243 return connected;
244}
245
246void rs600_hpd_set_polarity(struct radeon_device *rdev,
247 enum radeon_hpd_id hpd)
248{
249 u32 tmp;
250 bool connected = rs600_hpd_sense(rdev, hpd);
251
252 switch (hpd) {
253 case RADEON_HPD_1:
254 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
255 if (connected)
256 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
257 else
258 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
259 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
260 break;
261 case RADEON_HPD_2:
262 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
263 if (connected)
264 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
265 else
266 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
267 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
268 break;
269 default:
270 break;
271 }
272}
273
274void rs600_hpd_init(struct radeon_device *rdev)
275{
276 struct drm_device *dev = rdev->ddev;
277 struct drm_connector *connector;
278
279 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
280 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
281 switch (radeon_connector->hpd.hpd) {
282 case RADEON_HPD_1:
283 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
284 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
285 rdev->irq.hpd[0] = true;
286 break;
287 case RADEON_HPD_2:
288 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
289 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
290 rdev->irq.hpd[1] = true;
291 break;
292 default:
293 break;
294 }
Alex Deucher7a427e42011-11-03 11:21:39 -0400295 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500296 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100297 if (rdev->irq.installed)
298 rs600_irq_set(rdev);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500299}
300
301void rs600_hpd_fini(struct radeon_device *rdev)
302{
303 struct drm_device *dev = rdev->ddev;
304 struct drm_connector *connector;
305
306 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
307 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
308 switch (radeon_connector->hpd.hpd) {
309 case RADEON_HPD_1:
310 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
311 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
312 rdev->irq.hpd[0] = false;
313 break;
314 case RADEON_HPD_2:
315 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
316 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
317 rdev->irq.hpd[1] = false;
318 break;
319 default:
320 break;
321 }
322 }
323}
324
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000325void rs600_bm_disable(struct radeon_device *rdev)
326{
Michel Dänzerbe2ef852012-01-05 18:42:17 +0100327 u16 tmp;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000328
329 /* disable bus mastering */
Michel Dänzerbe2ef852012-01-05 18:42:17 +0100330 pci_read_config_word(rdev->pdev, 0x4, &tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000331 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
332 mdelay(1);
333}
334
335int rs600_asic_reset(struct radeon_device *rdev)
336{
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000337 struct rv515_mc_save save;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500338 u32 status, tmp;
339 int ret = 0;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000340
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000341 status = RREG32(R_000E40_RBBM_STATUS);
342 if (!G_000E40_GUI_ACTIVE(status)) {
343 return 0;
344 }
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500345 /* Stops all mc clients */
346 rv515_mc_stop(rdev, &save);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000347 status = RREG32(R_000E40_RBBM_STATUS);
348 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
349 /* stop CP */
350 WREG32(RADEON_CP_CSQ_CNTL, 0);
351 tmp = RREG32(RADEON_CP_RB_CNTL);
352 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
353 WREG32(RADEON_CP_RB_RPTR_WR, 0);
354 WREG32(RADEON_CP_RB_WPTR, 0);
355 WREG32(RADEON_CP_RB_CNTL, tmp);
356 pci_save_state(rdev->pdev);
357 /* disable bus mastering */
358 rs600_bm_disable(rdev);
359 /* reset GA+VAP */
360 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
361 S_0000F0_SOFT_RESET_GA(1));
362 RREG32(R_0000F0_RBBM_SOFT_RESET);
363 mdelay(500);
364 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
365 mdelay(1);
366 status = RREG32(R_000E40_RBBM_STATUS);
367 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
368 /* reset CP */
369 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
370 RREG32(R_0000F0_RBBM_SOFT_RESET);
371 mdelay(500);
372 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
373 mdelay(1);
374 status = RREG32(R_000E40_RBBM_STATUS);
375 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
376 /* reset MC */
377 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
378 RREG32(R_0000F0_RBBM_SOFT_RESET);
379 mdelay(500);
380 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
381 mdelay(1);
382 status = RREG32(R_000E40_RBBM_STATUS);
383 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
384 /* restore PCI & busmastering */
385 pci_restore_state(rdev->pdev);
386 /* Check if GPU is idle */
387 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
388 dev_err(rdev->dev, "failed to reset GPU\n");
389 rdev->gpu_lockup = true;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500390 ret = -1;
391 } else
392 dev_info(rdev->dev, "GPU reset succeed\n");
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000393 rv515_mc_resume(rdev, &save);
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500394 return ret;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000395}
396
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200397/*
398 * GART.
399 */
400void rs600_gart_tlb_flush(struct radeon_device *rdev)
401{
402 uint32_t tmp;
403
Jerome Glissec010f802009-09-30 22:09:06 +0200404 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
405 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
406 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200407
Jerome Glissec010f802009-09-30 22:09:06 +0200408 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse30f69f32010-04-16 18:46:35 +0200409 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
Jerome Glissec010f802009-09-30 22:09:06 +0200410 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200411
Jerome Glissec010f802009-09-30 22:09:06 +0200412 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
413 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
414 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
415 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200416}
417
Jerome Glisse4aac0472009-09-14 18:29:49 +0200418int rs600_gart_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200419{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200420 int r;
421
Jerome Glisse4aac0472009-09-14 18:29:49 +0200422 if (rdev->gart.table.vram.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000423 WARN(1, "RS600 GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200424 return 0;
425 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200426 /* Initialize common gart structure */
427 r = radeon_gart_init(rdev);
428 if (r) {
429 return r;
430 }
431 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200432 return radeon_gart_table_vram_alloc(rdev);
433}
434
Alex Deuchere22e6d22011-07-11 20:27:23 +0000435static int rs600_gart_enable(struct radeon_device *rdev)
Jerome Glisse4aac0472009-09-14 18:29:49 +0200436{
Jerome Glissec010f802009-09-30 22:09:06 +0200437 u32 tmp;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200438 int r, i;
439
440 if (rdev->gart.table.vram.robj == NULL) {
441 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
442 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200443 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200444 r = radeon_gart_table_vram_pin(rdev);
445 if (r)
446 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000447 radeon_gart_restore(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200448 /* Enable bus master */
Alex Deuchere22e6d22011-07-11 20:27:23 +0000449 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
450 WREG32(RADEON_BUS_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200451 /* FIXME: setup default page */
Jerome Glissec010f802009-09-30 22:09:06 +0200452 WREG32_MC(R_000100_MC_PT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500453 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
454 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
455
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200456 for (i = 0; i < 19; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200457 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
Alex Deucher4f15d242009-12-05 17:55:37 -0500458 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
459 S_00016C_SYSTEM_ACCESS_MODE_MASK(
460 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
461 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
462 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
463 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
464 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
465 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200466 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200467 /* enable first context */
Jerome Glissec010f802009-09-30 22:09:06 +0200468 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500469 S_000102_ENABLE_PAGE_TABLE(1) |
470 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
471
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200472 /* disable all other contexts */
Alex Deucher4f15d242009-12-05 17:55:37 -0500473 for (i = 1; i < 8; i++)
Jerome Glissec010f802009-09-30 22:09:06 +0200474 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200475
476 /* setup the page table */
Jerome Glissec010f802009-09-30 22:09:06 +0200477 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
Alex Deucher4f15d242009-12-05 17:55:37 -0500478 rdev->gart.table_addr);
479 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
480 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
Jerome Glissec010f802009-09-30 22:09:06 +0200481 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200482
Alex Deucher4f15d242009-12-05 17:55:37 -0500483 /* System context maps to VRAM space */
484 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
485 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
486
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200487 /* enable page tables */
Jerome Glissec010f802009-09-30 22:09:06 +0200488 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
489 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
490 tmp = RREG32_MC(R_000009_MC_CNTL1);
491 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200492 rs600_gart_tlb_flush(rdev);
493 rdev->gart.ready = true;
494 return 0;
495}
496
497void rs600_gart_disable(struct radeon_device *rdev)
498{
Jerome Glisse4c788672009-11-20 14:29:23 +0100499 u32 tmp;
500 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200501
502 /* FIXME: disable out of gart access */
Jerome Glissec010f802009-09-30 22:09:06 +0200503 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
504 tmp = RREG32_MC(R_000009_MC_CNTL1);
505 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200506 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100507 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
508 if (r == 0) {
509 radeon_bo_kunmap(rdev->gart.table.vram.robj);
510 radeon_bo_unpin(rdev->gart.table.vram.robj);
511 radeon_bo_unreserve(rdev->gart.table.vram.robj);
512 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200513 }
514}
515
516void rs600_gart_fini(struct radeon_device *rdev)
517{
Jerome Glissef9274562010-03-17 14:44:29 +0000518 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200519 rs600_gart_disable(rdev);
520 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200521}
522
523#define R600_PTE_VALID (1 << 0)
524#define R600_PTE_SYSTEM (1 << 1)
525#define R600_PTE_SNOOPED (1 << 2)
526#define R600_PTE_READABLE (1 << 5)
527#define R600_PTE_WRITEABLE (1 << 6)
528
529int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
530{
531 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
532
533 if (i < 0 || i > rdev->gart.num_gpu_pages) {
534 return -EINVAL;
535 }
536 addr = addr & 0xFFFFFFFFFFFFF000ULL;
537 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
538 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
539 writeq(addr, ((void __iomem *)ptr) + (i * 8));
540 return 0;
541}
542
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200543int rs600_irq_set(struct radeon_device *rdev)
544{
545 uint32_t tmp = 0;
546 uint32_t mode_int = 0;
Alex Deucherdcfdd402009-12-04 15:04:19 -0500547 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
548 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
549 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
550 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200551
Jerome Glisse003e69f2010-01-07 15:39:14 +0100552 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000553 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +0100554 WREG32(R_000040_GEN_INT_CNTL, 0);
555 return -EINVAL;
556 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200557 if (rdev->irq.sw_int) {
Jerome Glissec010f802009-09-30 22:09:06 +0200558 tmp |= S_000040_SW_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200559 }
Alex Deucher2031f772010-04-22 12:52:11 -0400560 if (rdev->irq.gui_idle) {
561 tmp |= S_000040_GUI_IDLE(1);
562 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500563 if (rdev->irq.crtc_vblank_int[0] ||
564 rdev->irq.pflip[0]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200565 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200566 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500567 if (rdev->irq.crtc_vblank_int[1] ||
568 rdev->irq.pflip[1]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200569 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200570 }
Alex Deucherdcfdd402009-12-04 15:04:19 -0500571 if (rdev->irq.hpd[0]) {
572 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
573 }
574 if (rdev->irq.hpd[1]) {
575 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
576 }
Jerome Glissec010f802009-09-30 22:09:06 +0200577 WREG32(R_000040_GEN_INT_CNTL, tmp);
578 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500579 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
580 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200581 return 0;
582}
583
Alex Deucher6f34be52010-11-21 10:59:01 -0500584static inline u32 rs600_irq_ack(struct radeon_device *rdev)
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200585{
Jerome Glisse01ceae82009-10-07 11:08:22 +0200586 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
Alex Deucher2031f772010-04-22 12:52:11 -0400587 uint32_t irq_mask = S_000044_SW_INT(1);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500588 u32 tmp;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200589
Alex Deucher2031f772010-04-22 12:52:11 -0400590 /* the interrupt works, but the status bit is permanently asserted */
591 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
592 if (!rdev->irq.gui_idle_acked)
593 irq_mask |= S_000044_GUI_IDLE_STAT(1);
594 }
595
Jerome Glisse01ceae82009-10-07 11:08:22 +0200596 if (G_000044_DISPLAY_INT_STAT(irqs)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500597 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
598 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200599 WREG32(R_006534_D1MODE_VBLANK_STATUS,
600 S_006534_D1MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200601 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500602 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200603 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
604 S_006D34_D2MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200605 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500606 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500607 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
608 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
609 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
610 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500611 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500612 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
613 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
614 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
615 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200616 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -0500617 rdev->irq.stat_regs.r500.disp_int = 0;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200618 }
619
620 if (irqs) {
Jerome Glisse01ceae82009-10-07 11:08:22 +0200621 WREG32(R_000044_GEN_INT_STATUS, irqs);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200622 }
623 return irqs & irq_mask;
624}
625
Jerome Glisseac447df2009-09-30 22:18:43 +0200626void rs600_irq_disable(struct radeon_device *rdev)
627{
Jerome Glisseac447df2009-09-30 22:18:43 +0200628 WREG32(R_000040_GEN_INT_CNTL, 0);
629 WREG32(R_006540_DxMODE_INT_MASK, 0);
630 /* Wait and acknowledge irq */
631 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -0500632 rs600_irq_ack(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200633}
634
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200635int rs600_irq_process(struct radeon_device *rdev)
636{
Alex Deucher6f34be52010-11-21 10:59:01 -0500637 u32 status, msi_rearm;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500638 bool queue_hotplug = false;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200639
Alex Deucher2031f772010-04-22 12:52:11 -0400640 /* reset gui idle ack. the status bit is broken */
641 rdev->irq.gui_idle_acked = false;
642
Alex Deucher6f34be52010-11-21 10:59:01 -0500643 status = rs600_irq_ack(rdev);
644 if (!status && !rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200645 return IRQ_NONE;
646 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500647 while (status || rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200648 /* SW interrupt */
Alex Deucher6f34be52010-11-21 10:59:01 -0500649 if (G_000044_SW_INT(status)) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200650 radeon_fence_process(rdev);
Alex Deucher6f34be52010-11-21 10:59:01 -0500651 }
Alex Deucher2031f772010-04-22 12:52:11 -0400652 /* GUI idle */
653 if (G_000040_GUI_IDLE(status)) {
654 rdev->irq.gui_idle_acked = true;
655 rdev->pm.gui_idle = true;
656 wake_up(&rdev->irq.idle_queue);
657 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200658 /* Vertical blank interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -0500659 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500660 if (rdev->irq.crtc_vblank_int[0]) {
661 drm_handle_vblank(rdev->ddev, 0);
662 rdev->pm.vblank_sync = true;
663 wake_up(&rdev->irq.vblank_queue);
664 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500665 if (rdev->irq.pflip[0])
666 radeon_crtc_handle_flip(rdev, 0);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100667 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500668 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500669 if (rdev->irq.crtc_vblank_int[1]) {
670 drm_handle_vblank(rdev->ddev, 1);
671 rdev->pm.vblank_sync = true;
672 wake_up(&rdev->irq.vblank_queue);
673 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500674 if (rdev->irq.pflip[1])
675 radeon_crtc_handle_flip(rdev, 1);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100676 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500677 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500678 queue_hotplug = true;
679 DRM_DEBUG("HPD1\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500680 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500681 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500682 queue_hotplug = true;
683 DRM_DEBUG("HPD2\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500684 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500685 status = rs600_irq_ack(rdev);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200686 }
Alex Deucher2031f772010-04-22 12:52:11 -0400687 /* reset gui idle ack. the status bit is broken */
688 rdev->irq.gui_idle_acked = false;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500689 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +0100690 schedule_work(&rdev->hotplug_work);
Alex Deucher3e5cb982009-10-16 12:21:24 -0400691 if (rdev->msi_enabled) {
692 switch (rdev->family) {
693 case CHIP_RS600:
694 case CHIP_RS690:
695 case CHIP_RS740:
696 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
697 WREG32(RADEON_BUS_CNTL, msi_rearm);
698 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
699 break;
700 default:
701 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
702 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
703 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
704 break;
705 }
706 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200707 return IRQ_HANDLED;
708}
709
710u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
711{
712 if (crtc == 0)
Jerome Glissec010f802009-09-30 22:09:06 +0200713 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200714 else
Jerome Glissec010f802009-09-30 22:09:06 +0200715 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200716}
717
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200718int rs600_mc_wait_for_idle(struct radeon_device *rdev)
719{
720 unsigned i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200721
722 for (i = 0; i < rdev->usec_timeout; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200723 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200724 return 0;
Jerome Glissec010f802009-09-30 22:09:06 +0200725 udelay(1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200726 }
727 return -1;
728}
729
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200730void rs600_gpu_init(struct radeon_device *rdev)
731{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200732 r420_pipes_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200733 /* Wait for mc idle */
734 if (rs600_mc_wait_for_idle(rdev))
735 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200736}
737
Jerome Glissed594e462010-02-17 21:54:29 +0000738void rs600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200739{
Jerome Glissed594e462010-02-17 21:54:29 +0000740 u64 base;
741
Jordan Crouse01d73a62010-05-27 13:40:24 -0600742 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
743 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200744 rdev->mc.vram_is_ddr = true;
745 rdev->mc.vram_width = 128;
Alex Deucher722f2942009-12-03 16:18:19 -0500746 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
747 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
Jerome Glisse51e5fcd2010-02-19 14:33:54 +0000748 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000749 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
750 base = RREG32_MC(R_000004_MC_FB_LOCATION);
751 base = G_000004_MC_FB_START(base) << 16;
752 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -0400753 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +0000754 radeon_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -0400755 radeon_update_bandwidth_info(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200756}
757
Jerome Glissec93bb852009-07-13 21:04:08 +0200758void rs600_bandwidth_update(struct radeon_device *rdev)
759{
Alex Deucherf46c0122010-03-31 00:33:27 -0400760 struct drm_display_mode *mode0 = NULL;
761 struct drm_display_mode *mode1 = NULL;
762 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
763 /* FIXME: implement full support */
764
765 radeon_update_display_priority(rdev);
766
767 if (rdev->mode_info.crtcs[0]->base.enabled)
768 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
769 if (rdev->mode_info.crtcs[1]->base.enabled)
770 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
771
772 rs690_line_buffer_adjust(rdev, mode0, mode1);
773
774 if (rdev->disp_priority == 2) {
775 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
776 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
777 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
778 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
779 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
780 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
781 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
782 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
783 }
Jerome Glissec93bb852009-07-13 21:04:08 +0200784}
785
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200786uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
787{
Jerome Glissec010f802009-09-30 22:09:06 +0200788 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
789 S_000070_MC_IND_CITF_ARB0(1));
790 return RREG32(R_000074_MC_IND_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200791}
792
793void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
794{
Jerome Glissec010f802009-09-30 22:09:06 +0200795 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
796 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
797 WREG32(R_000074_MC_IND_DATA, v);
798}
799
800void rs600_debugfs(struct radeon_device *rdev)
801{
802 if (r100_debugfs_rbbm_init(rdev))
803 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200804}
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000805
Jerome Glisse3bc68532009-10-01 09:39:24 +0200806void rs600_set_safe_registers(struct radeon_device *rdev)
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000807{
808 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
809 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
Jerome Glisse3bc68532009-10-01 09:39:24 +0200810}
811
Jerome Glissec010f802009-09-30 22:09:06 +0200812static void rs600_mc_program(struct radeon_device *rdev)
813{
814 struct rv515_mc_save save;
815
816 /* Stops all mc clients */
817 rv515_mc_stop(rdev, &save);
818
819 /* Wait for mc idle */
820 if (rs600_mc_wait_for_idle(rdev))
821 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
822
823 /* FIXME: What does AGP means for such chipset ? */
824 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
825 WREG32_MC(R_000006_AGP_BASE, 0);
826 WREG32_MC(R_000007_AGP_BASE_2, 0);
827 /* Program MC */
828 WREG32_MC(R_000004_MC_FB_LOCATION,
829 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
830 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
831 WREG32(R_000134_HDP_FB_LOCATION,
832 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
833
834 rv515_mc_resume(rdev, &save);
835}
836
837static int rs600_startup(struct radeon_device *rdev)
838{
839 int r;
840
841 rs600_mc_program(rdev);
842 /* Resume clock */
843 rv515_clock_startup(rdev);
844 /* Initialize GPU configuration (# pipes, ...) */
845 rs600_gpu_init(rdev);
846 /* Initialize GART (initialize after TTM so we can allocate
847 * memory through TTM but finalize after TTM) */
848 r = rs600_gart_enable(rdev);
849 if (r)
850 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -0400851
852 /* allocate wb buffer */
853 r = radeon_wb_init(rdev);
854 if (r)
855 return r;
856
Jerome Glissec010f802009-09-30 22:09:06 +0200857 /* Enable IRQ */
Jerome Glissec010f802009-09-30 22:09:06 +0200858 rs600_irq_set(rdev);
Jerome Glissecafe6602010-01-07 12:39:21 +0100859 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
Jerome Glissec010f802009-09-30 22:09:06 +0200860 /* 1M ring buffer */
861 r = r100_cp_init(rdev, 1024 * 1024);
862 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100863 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200864 return r;
865 }
Jerome Glissec010f802009-09-30 22:09:06 +0200866 r = r100_ib_init(rdev);
867 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100868 dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200869 return r;
870 }
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200871
872 r = r600_audio_init(rdev);
873 if (r) {
874 dev_err(rdev->dev, "failed initializing audio\n");
875 return r;
876 }
877
Jerome Glissec010f802009-09-30 22:09:06 +0200878 return 0;
879}
880
881int rs600_resume(struct radeon_device *rdev)
882{
883 /* Make sur GART are not working */
884 rs600_gart_disable(rdev);
885 /* Resume clock before doing reset */
886 rv515_clock_startup(rdev);
887 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000888 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200889 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
890 RREG32(R_000E40_RBBM_STATUS),
891 RREG32(R_0007C0_CP_STAT));
892 }
893 /* post */
894 atom_asic_init(rdev->mode_info.atom_context);
895 /* Resume clock after posting */
896 rv515_clock_startup(rdev);
Dave Airlie550e2d92009-12-09 14:15:38 +1000897 /* Initialize surface registers */
898 radeon_surface_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200899 return rs600_startup(rdev);
900}
901
902int rs600_suspend(struct radeon_device *rdev)
903{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200904 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200905 r100_cp_disable(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400906 radeon_wb_disable(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200907 rs600_irq_disable(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200908 rs600_gart_disable(rdev);
909 return 0;
910}
911
912void rs600_fini(struct radeon_device *rdev)
913{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200914 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200915 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400916 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200917 r100_ib_fini(rdev);
918 radeon_gem_fini(rdev);
919 rs600_gart_fini(rdev);
920 radeon_irq_kms_fini(rdev);
921 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +0100922 radeon_bo_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200923 radeon_atombios_fini(rdev);
924 kfree(rdev->bios);
925 rdev->bios = NULL;
926}
927
Jerome Glisse3bc68532009-10-01 09:39:24 +0200928int rs600_init(struct radeon_device *rdev)
929{
Jerome Glissec010f802009-09-30 22:09:06 +0200930 int r;
931
Jerome Glissec010f802009-09-30 22:09:06 +0200932 /* Disable VGA */
933 rv515_vga_render_disable(rdev);
934 /* Initialize scratch registers */
935 radeon_scratch_init(rdev);
936 /* Initialize surface registers */
937 radeon_surface_init(rdev);
Dave Airlie4c712e62010-07-15 12:13:50 +1000938 /* restore some register to sane defaults */
939 r100_restore_sanity(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200940 /* BIOS */
941 if (!radeon_get_bios(rdev)) {
942 if (ASIC_IS_AVIVO(rdev))
943 return -EINVAL;
944 }
945 if (rdev->is_atom_bios) {
946 r = radeon_atombios_init(rdev);
947 if (r)
948 return r;
949 } else {
950 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
951 return -EINVAL;
952 }
953 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000954 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200955 dev_warn(rdev->dev,
956 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
957 RREG32(R_000E40_RBBM_STATUS),
958 RREG32(R_0007C0_CP_STAT));
959 }
960 /* check if cards are posted or not */
Dave Airlie72542d72009-12-01 14:06:31 +1000961 if (radeon_boot_test_post_card(rdev) == false)
962 return -EINVAL;
963
Jerome Glissec010f802009-09-30 22:09:06 +0200964 /* Initialize clocks */
965 radeon_get_clock_info(rdev->ddev);
Jerome Glissed594e462010-02-17 21:54:29 +0000966 /* initialize memory controller */
967 rs600_mc_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200968 rs600_debugfs(rdev);
969 /* Fence driver */
970 r = radeon_fence_driver_init(rdev);
971 if (r)
972 return r;
973 r = radeon_irq_kms_init(rdev);
974 if (r)
975 return r;
976 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +0100977 r = radeon_bo_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200978 if (r)
979 return r;
980 r = rs600_gart_init(rdev);
981 if (r)
982 return r;
983 rs600_set_safe_registers(rdev);
984 rdev->accel_working = true;
985 r = rs600_startup(rdev);
986 if (r) {
987 /* Somethings want wront with the accel init stop accel */
988 dev_err(rdev->dev, "Disabling GPU acceleration\n");
Jerome Glissec010f802009-09-30 22:09:06 +0200989 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400990 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200991 r100_ib_fini(rdev);
992 rs600_gart_fini(rdev);
993 radeon_irq_kms_fini(rdev);
994 rdev->accel_working = false;
995 }
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000996 return 0;
997}