blob: 9320dd6404f67cdbe9ac3ba20aa6e4e582745904 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Jerome Glissec010f802009-09-30 22:09:06 +020028/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038#include "drmP.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020039#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000040#include "radeon_asic.h"
Jerome Glissec010f802009-09-30 22:09:06 +020041#include "atom.h"
42#include "rs600d.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020043
Dave Airlie3f7dc91a2009-08-27 11:10:15 +100044#include "rs600_reg_safe.h"
45
Jerome Glisse771fe6b2009-06-05 14:42:42 +020046void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020048
Alex Deucher6f34be52010-11-21 10:59:01 -050049void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
50{
Alex Deucher6f34be52010-11-21 10:59:01 -050051 /* enable the pflip int */
52 radeon_irq_kms_pflip_irq_get(rdev, crtc);
53}
54
55void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
56{
57 /* disable the pflip int */
58 radeon_irq_kms_pflip_irq_put(rdev, crtc);
59}
60
61u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
65
66 /* Lock the graphics update lock */
67 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
68 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
69
70 /* update the scanout addresses */
71 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
72 (u32)crtc_base);
73 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
74 (u32)crtc_base);
75
76 /* Wait for update_pending to go high. */
77 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
79
80 /* Unlock the lock, so double-buffering can take place inside vblank */
81 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
82 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
83
84 /* Return current update_pending status: */
85 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
86}
87
Alex Deucher49e02b72010-04-23 17:57:27 -040088void rs600_pm_misc(struct radeon_device *rdev)
89{
Alex Deucher49e02b72010-04-23 17:57:27 -040090 int requested_index = rdev->pm.requested_power_state_index;
91 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
92 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
93 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
Alex Deucher536fcd52010-04-29 16:33:38 -040094 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
Alex Deucher49e02b72010-04-23 17:57:27 -040095
96 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
97 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
98 tmp = RREG32(voltage->gpio.reg);
99 if (voltage->active_high)
100 tmp |= voltage->gpio.mask;
101 else
102 tmp &= ~(voltage->gpio.mask);
103 WREG32(voltage->gpio.reg, tmp);
104 if (voltage->delay)
105 udelay(voltage->delay);
106 } else {
107 tmp = RREG32(voltage->gpio.reg);
108 if (voltage->active_high)
109 tmp &= ~voltage->gpio.mask;
110 else
111 tmp |= voltage->gpio.mask;
112 WREG32(voltage->gpio.reg, tmp);
113 if (voltage->delay)
114 udelay(voltage->delay);
115 }
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400116 } else if (voltage->type == VOLTAGE_VDDC)
Alex Deucher8a83ec52011-04-12 14:49:23 -0400117 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher49e02b72010-04-23 17:57:27 -0400118
119 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
120 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
121 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
122 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
123 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
124 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
125 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
126 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
127 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
128 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
129 }
130 } else {
131 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
132 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
133 }
134 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
135
136 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
137 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
138 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
139 if (voltage->delay) {
140 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
141 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
142 } else
143 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
144 } else
145 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
146 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
147
148 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
149 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
150 hdp_dyn_cntl &= ~HDP_FORCEON;
151 else
152 hdp_dyn_cntl |= HDP_FORCEON;
153 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400154#if 0
155 /* mc_host_dyn seems to cause hangs from time to time */
Alex Deucher49e02b72010-04-23 17:57:27 -0400156 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
157 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
158 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
159 else
160 mc_host_dyn_cntl |= MC_HOST_FORCEON;
161 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400162#endif
163 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
164 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
165 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
166 else
167 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
168 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
Alex Deucher49e02b72010-04-23 17:57:27 -0400169
170 /* set pcie lanes */
171 if ((rdev->flags & RADEON_IS_PCIE) &&
172 !(rdev->flags & RADEON_IS_IGP) &&
173 rdev->asic->set_pcie_lanes &&
174 (ps->pcie_lanes !=
175 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
176 radeon_set_pcie_lanes(rdev,
177 ps->pcie_lanes);
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400178 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
Alex Deucher49e02b72010-04-23 17:57:27 -0400179 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400180}
181
182void rs600_pm_prepare(struct radeon_device *rdev)
183{
184 struct drm_device *ddev = rdev->ddev;
185 struct drm_crtc *crtc;
186 struct radeon_crtc *radeon_crtc;
187 u32 tmp;
188
189 /* disable any active CRTCs */
190 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
191 radeon_crtc = to_radeon_crtc(crtc);
192 if (radeon_crtc->enabled) {
193 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
194 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
195 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
196 }
197 }
198}
199
200void rs600_pm_finish(struct radeon_device *rdev)
201{
202 struct drm_device *ddev = rdev->ddev;
203 struct drm_crtc *crtc;
204 struct radeon_crtc *radeon_crtc;
205 u32 tmp;
206
207 /* enable any active CRTCs */
208 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
209 radeon_crtc = to_radeon_crtc(crtc);
210 if (radeon_crtc->enabled) {
211 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
212 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
213 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
214 }
215 }
216}
217
Alex Deucherdcfdd402009-12-04 15:04:19 -0500218/* hpd for digital panel detect/disconnect */
219bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
220{
221 u32 tmp;
222 bool connected = false;
223
224 switch (hpd) {
225 case RADEON_HPD_1:
226 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
227 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
228 connected = true;
229 break;
230 case RADEON_HPD_2:
231 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
232 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
233 connected = true;
234 break;
235 default:
236 break;
237 }
238 return connected;
239}
240
241void rs600_hpd_set_polarity(struct radeon_device *rdev,
242 enum radeon_hpd_id hpd)
243{
244 u32 tmp;
245 bool connected = rs600_hpd_sense(rdev, hpd);
246
247 switch (hpd) {
248 case RADEON_HPD_1:
249 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
250 if (connected)
251 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
252 else
253 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
254 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
255 break;
256 case RADEON_HPD_2:
257 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
258 if (connected)
259 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
260 else
261 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
262 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
263 break;
264 default:
265 break;
266 }
267}
268
269void rs600_hpd_init(struct radeon_device *rdev)
270{
271 struct drm_device *dev = rdev->ddev;
272 struct drm_connector *connector;
273
274 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
275 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
276 switch (radeon_connector->hpd.hpd) {
277 case RADEON_HPD_1:
278 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
279 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
280 rdev->irq.hpd[0] = true;
281 break;
282 case RADEON_HPD_2:
283 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
284 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
285 rdev->irq.hpd[1] = true;
286 break;
287 default:
288 break;
289 }
290 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100291 if (rdev->irq.installed)
292 rs600_irq_set(rdev);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500293}
294
295void rs600_hpd_fini(struct radeon_device *rdev)
296{
297 struct drm_device *dev = rdev->ddev;
298 struct drm_connector *connector;
299
300 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
301 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
302 switch (radeon_connector->hpd.hpd) {
303 case RADEON_HPD_1:
304 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
305 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
306 rdev->irq.hpd[0] = false;
307 break;
308 case RADEON_HPD_2:
309 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
310 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
311 rdev->irq.hpd[1] = false;
312 break;
313 default:
314 break;
315 }
316 }
317}
318
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000319void rs600_bm_disable(struct radeon_device *rdev)
320{
321 u32 tmp;
322
323 /* disable bus mastering */
324 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
325 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
326 mdelay(1);
327}
328
329int rs600_asic_reset(struct radeon_device *rdev)
330{
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000331 struct rv515_mc_save save;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500332 u32 status, tmp;
333 int ret = 0;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000334
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000335 status = RREG32(R_000E40_RBBM_STATUS);
336 if (!G_000E40_GUI_ACTIVE(status)) {
337 return 0;
338 }
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500339 /* Stops all mc clients */
340 rv515_mc_stop(rdev, &save);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000341 status = RREG32(R_000E40_RBBM_STATUS);
342 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
343 /* stop CP */
344 WREG32(RADEON_CP_CSQ_CNTL, 0);
345 tmp = RREG32(RADEON_CP_RB_CNTL);
346 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
347 WREG32(RADEON_CP_RB_RPTR_WR, 0);
348 WREG32(RADEON_CP_RB_WPTR, 0);
349 WREG32(RADEON_CP_RB_CNTL, tmp);
350 pci_save_state(rdev->pdev);
351 /* disable bus mastering */
352 rs600_bm_disable(rdev);
353 /* reset GA+VAP */
354 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
355 S_0000F0_SOFT_RESET_GA(1));
356 RREG32(R_0000F0_RBBM_SOFT_RESET);
357 mdelay(500);
358 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
359 mdelay(1);
360 status = RREG32(R_000E40_RBBM_STATUS);
361 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
362 /* reset CP */
363 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
364 RREG32(R_0000F0_RBBM_SOFT_RESET);
365 mdelay(500);
366 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
367 mdelay(1);
368 status = RREG32(R_000E40_RBBM_STATUS);
369 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
370 /* reset MC */
371 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
372 RREG32(R_0000F0_RBBM_SOFT_RESET);
373 mdelay(500);
374 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
375 mdelay(1);
376 status = RREG32(R_000E40_RBBM_STATUS);
377 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
378 /* restore PCI & busmastering */
379 pci_restore_state(rdev->pdev);
380 /* Check if GPU is idle */
381 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
382 dev_err(rdev->dev, "failed to reset GPU\n");
383 rdev->gpu_lockup = true;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500384 ret = -1;
385 } else
386 dev_info(rdev->dev, "GPU reset succeed\n");
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000387 rv515_mc_resume(rdev, &save);
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500388 return ret;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000389}
390
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200391/*
392 * GART.
393 */
394void rs600_gart_tlb_flush(struct radeon_device *rdev)
395{
396 uint32_t tmp;
397
Jerome Glissec010f802009-09-30 22:09:06 +0200398 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
399 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
400 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200401
Jerome Glissec010f802009-09-30 22:09:06 +0200402 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse30f69f32010-04-16 18:46:35 +0200403 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
Jerome Glissec010f802009-09-30 22:09:06 +0200404 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200405
Jerome Glissec010f802009-09-30 22:09:06 +0200406 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
407 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
408 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
409 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200410}
411
Jerome Glisse4aac0472009-09-14 18:29:49 +0200412int rs600_gart_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200413{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200414 int r;
415
Jerome Glisse4aac0472009-09-14 18:29:49 +0200416 if (rdev->gart.table.vram.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000417 WARN(1, "RS600 GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200418 return 0;
419 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200420 /* Initialize common gart structure */
421 r = radeon_gart_init(rdev);
422 if (r) {
423 return r;
424 }
425 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200426 return radeon_gart_table_vram_alloc(rdev);
427}
428
Alex Deuchere22e6d22011-07-11 20:27:23 +0000429static int rs600_gart_enable(struct radeon_device *rdev)
Jerome Glisse4aac0472009-09-14 18:29:49 +0200430{
Jerome Glissec010f802009-09-30 22:09:06 +0200431 u32 tmp;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200432 int r, i;
433
434 if (rdev->gart.table.vram.robj == NULL) {
435 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
436 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200437 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200438 r = radeon_gart_table_vram_pin(rdev);
439 if (r)
440 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000441 radeon_gart_restore(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200442 /* Enable bus master */
Alex Deuchere22e6d22011-07-11 20:27:23 +0000443 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
444 WREG32(RADEON_BUS_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200445 /* FIXME: setup default page */
Jerome Glissec010f802009-09-30 22:09:06 +0200446 WREG32_MC(R_000100_MC_PT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500447 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
448 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
449
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200450 for (i = 0; i < 19; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200451 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
Alex Deucher4f15d242009-12-05 17:55:37 -0500452 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
453 S_00016C_SYSTEM_ACCESS_MODE_MASK(
454 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
455 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
456 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
457 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
458 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
459 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200460 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200461 /* enable first context */
Jerome Glissec010f802009-09-30 22:09:06 +0200462 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500463 S_000102_ENABLE_PAGE_TABLE(1) |
464 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
465
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200466 /* disable all other contexts */
Alex Deucher4f15d242009-12-05 17:55:37 -0500467 for (i = 1; i < 8; i++)
Jerome Glissec010f802009-09-30 22:09:06 +0200468 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200469
470 /* setup the page table */
Jerome Glissec010f802009-09-30 22:09:06 +0200471 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
Alex Deucher4f15d242009-12-05 17:55:37 -0500472 rdev->gart.table_addr);
473 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
474 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
Jerome Glissec010f802009-09-30 22:09:06 +0200475 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200476
Alex Deucher4f15d242009-12-05 17:55:37 -0500477 /* System context maps to VRAM space */
478 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
479 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
480
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200481 /* enable page tables */
Jerome Glissec010f802009-09-30 22:09:06 +0200482 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
483 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
484 tmp = RREG32_MC(R_000009_MC_CNTL1);
485 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200486 rs600_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000487 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
488 (unsigned)(rdev->mc.gtt_size >> 20),
489 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200490 rdev->gart.ready = true;
491 return 0;
492}
493
494void rs600_gart_disable(struct radeon_device *rdev)
495{
Jerome Glisse4c788672009-11-20 14:29:23 +0100496 u32 tmp;
497 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200498
499 /* FIXME: disable out of gart access */
Jerome Glissec010f802009-09-30 22:09:06 +0200500 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
501 tmp = RREG32_MC(R_000009_MC_CNTL1);
502 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200503 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100504 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
505 if (r == 0) {
506 radeon_bo_kunmap(rdev->gart.table.vram.robj);
507 radeon_bo_unpin(rdev->gart.table.vram.robj);
508 radeon_bo_unreserve(rdev->gart.table.vram.robj);
509 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200510 }
511}
512
513void rs600_gart_fini(struct radeon_device *rdev)
514{
Jerome Glissef9274562010-03-17 14:44:29 +0000515 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200516 rs600_gart_disable(rdev);
517 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200518}
519
520#define R600_PTE_VALID (1 << 0)
521#define R600_PTE_SYSTEM (1 << 1)
522#define R600_PTE_SNOOPED (1 << 2)
523#define R600_PTE_READABLE (1 << 5)
524#define R600_PTE_WRITEABLE (1 << 6)
525
526int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
527{
528 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
529
530 if (i < 0 || i > rdev->gart.num_gpu_pages) {
531 return -EINVAL;
532 }
533 addr = addr & 0xFFFFFFFFFFFFF000ULL;
534 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
535 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +0000536 writeq(addr, ptr + (i * 8));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200537 return 0;
538}
539
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200540int rs600_irq_set(struct radeon_device *rdev)
541{
542 uint32_t tmp = 0;
543 uint32_t mode_int = 0;
Alex Deucherdcfdd402009-12-04 15:04:19 -0500544 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
545 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
546 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
547 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200548
Jerome Glisse003e69f2010-01-07 15:39:14 +0100549 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000550 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +0100551 WREG32(R_000040_GEN_INT_CNTL, 0);
552 return -EINVAL;
553 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200554 if (rdev->irq.sw_int) {
Jerome Glissec010f802009-09-30 22:09:06 +0200555 tmp |= S_000040_SW_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200556 }
Alex Deucher2031f772010-04-22 12:52:11 -0400557 if (rdev->irq.gui_idle) {
558 tmp |= S_000040_GUI_IDLE(1);
559 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500560 if (rdev->irq.crtc_vblank_int[0] ||
561 rdev->irq.pflip[0]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200562 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200563 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500564 if (rdev->irq.crtc_vblank_int[1] ||
565 rdev->irq.pflip[1]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200566 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200567 }
Alex Deucherdcfdd402009-12-04 15:04:19 -0500568 if (rdev->irq.hpd[0]) {
569 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
570 }
571 if (rdev->irq.hpd[1]) {
572 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
573 }
Jerome Glissec010f802009-09-30 22:09:06 +0200574 WREG32(R_000040_GEN_INT_CNTL, tmp);
575 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500576 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
577 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200578 return 0;
579}
580
Alex Deucher6f34be52010-11-21 10:59:01 -0500581static inline u32 rs600_irq_ack(struct radeon_device *rdev)
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200582{
Jerome Glisse01ceae82009-10-07 11:08:22 +0200583 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
Alex Deucher2031f772010-04-22 12:52:11 -0400584 uint32_t irq_mask = S_000044_SW_INT(1);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500585 u32 tmp;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200586
Alex Deucher2031f772010-04-22 12:52:11 -0400587 /* the interrupt works, but the status bit is permanently asserted */
588 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
589 if (!rdev->irq.gui_idle_acked)
590 irq_mask |= S_000044_GUI_IDLE_STAT(1);
591 }
592
Jerome Glisse01ceae82009-10-07 11:08:22 +0200593 if (G_000044_DISPLAY_INT_STAT(irqs)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500594 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
595 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200596 WREG32(R_006534_D1MODE_VBLANK_STATUS,
597 S_006534_D1MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200598 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500599 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200600 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
601 S_006D34_D2MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200602 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500603 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500604 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
605 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
606 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
607 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500608 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500609 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
610 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
611 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
612 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200613 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -0500614 rdev->irq.stat_regs.r500.disp_int = 0;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200615 }
616
617 if (irqs) {
Jerome Glisse01ceae82009-10-07 11:08:22 +0200618 WREG32(R_000044_GEN_INT_STATUS, irqs);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200619 }
620 return irqs & irq_mask;
621}
622
Jerome Glisseac447df2009-09-30 22:18:43 +0200623void rs600_irq_disable(struct radeon_device *rdev)
624{
Jerome Glisseac447df2009-09-30 22:18:43 +0200625 WREG32(R_000040_GEN_INT_CNTL, 0);
626 WREG32(R_006540_DxMODE_INT_MASK, 0);
627 /* Wait and acknowledge irq */
628 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -0500629 rs600_irq_ack(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200630}
631
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200632int rs600_irq_process(struct radeon_device *rdev)
633{
Alex Deucher6f34be52010-11-21 10:59:01 -0500634 u32 status, msi_rearm;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500635 bool queue_hotplug = false;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200636
Alex Deucher2031f772010-04-22 12:52:11 -0400637 /* reset gui idle ack. the status bit is broken */
638 rdev->irq.gui_idle_acked = false;
639
Alex Deucher6f34be52010-11-21 10:59:01 -0500640 status = rs600_irq_ack(rdev);
641 if (!status && !rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200642 return IRQ_NONE;
643 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500644 while (status || rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200645 /* SW interrupt */
Alex Deucher6f34be52010-11-21 10:59:01 -0500646 if (G_000044_SW_INT(status)) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200647 radeon_fence_process(rdev);
Alex Deucher6f34be52010-11-21 10:59:01 -0500648 }
Alex Deucher2031f772010-04-22 12:52:11 -0400649 /* GUI idle */
650 if (G_000040_GUI_IDLE(status)) {
651 rdev->irq.gui_idle_acked = true;
652 rdev->pm.gui_idle = true;
653 wake_up(&rdev->irq.idle_queue);
654 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200655 /* Vertical blank interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -0500656 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500657 if (rdev->irq.crtc_vblank_int[0]) {
658 drm_handle_vblank(rdev->ddev, 0);
659 rdev->pm.vblank_sync = true;
660 wake_up(&rdev->irq.vblank_queue);
661 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500662 if (rdev->irq.pflip[0])
663 radeon_crtc_handle_flip(rdev, 0);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100664 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500665 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500666 if (rdev->irq.crtc_vblank_int[1]) {
667 drm_handle_vblank(rdev->ddev, 1);
668 rdev->pm.vblank_sync = true;
669 wake_up(&rdev->irq.vblank_queue);
670 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500671 if (rdev->irq.pflip[1])
672 radeon_crtc_handle_flip(rdev, 1);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100673 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500674 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500675 queue_hotplug = true;
676 DRM_DEBUG("HPD1\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500677 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500678 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500679 queue_hotplug = true;
680 DRM_DEBUG("HPD2\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500681 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500682 status = rs600_irq_ack(rdev);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200683 }
Alex Deucher2031f772010-04-22 12:52:11 -0400684 /* reset gui idle ack. the status bit is broken */
685 rdev->irq.gui_idle_acked = false;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500686 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +0100687 schedule_work(&rdev->hotplug_work);
Alex Deucher3e5cb982009-10-16 12:21:24 -0400688 if (rdev->msi_enabled) {
689 switch (rdev->family) {
690 case CHIP_RS600:
691 case CHIP_RS690:
692 case CHIP_RS740:
693 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
694 WREG32(RADEON_BUS_CNTL, msi_rearm);
695 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
696 break;
697 default:
698 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
699 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
700 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
701 break;
702 }
703 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200704 return IRQ_HANDLED;
705}
706
707u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
708{
709 if (crtc == 0)
Jerome Glissec010f802009-09-30 22:09:06 +0200710 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200711 else
Jerome Glissec010f802009-09-30 22:09:06 +0200712 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200713}
714
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200715int rs600_mc_wait_for_idle(struct radeon_device *rdev)
716{
717 unsigned i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200718
719 for (i = 0; i < rdev->usec_timeout; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200720 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200721 return 0;
Jerome Glissec010f802009-09-30 22:09:06 +0200722 udelay(1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200723 }
724 return -1;
725}
726
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200727void rs600_gpu_init(struct radeon_device *rdev)
728{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200729 r420_pipes_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200730 /* Wait for mc idle */
731 if (rs600_mc_wait_for_idle(rdev))
732 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200733}
734
Jerome Glissed594e462010-02-17 21:54:29 +0000735void rs600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200736{
Jerome Glissed594e462010-02-17 21:54:29 +0000737 u64 base;
738
Jordan Crouse01d73a62010-05-27 13:40:24 -0600739 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
740 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200741 rdev->mc.vram_is_ddr = true;
742 rdev->mc.vram_width = 128;
Alex Deucher722f2942009-12-03 16:18:19 -0500743 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
744 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
Jerome Glisse51e5fcd2010-02-19 14:33:54 +0000745 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000746 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
747 base = RREG32_MC(R_000004_MC_FB_LOCATION);
748 base = G_000004_MC_FB_START(base) << 16;
749 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -0400750 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +0000751 radeon_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -0400752 radeon_update_bandwidth_info(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200753}
754
Jerome Glissec93bb852009-07-13 21:04:08 +0200755void rs600_bandwidth_update(struct radeon_device *rdev)
756{
Alex Deucherf46c0122010-03-31 00:33:27 -0400757 struct drm_display_mode *mode0 = NULL;
758 struct drm_display_mode *mode1 = NULL;
759 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
760 /* FIXME: implement full support */
761
762 radeon_update_display_priority(rdev);
763
764 if (rdev->mode_info.crtcs[0]->base.enabled)
765 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
766 if (rdev->mode_info.crtcs[1]->base.enabled)
767 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
768
769 rs690_line_buffer_adjust(rdev, mode0, mode1);
770
771 if (rdev->disp_priority == 2) {
772 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
773 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
774 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
775 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
776 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
777 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
778 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
779 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
780 }
Jerome Glissec93bb852009-07-13 21:04:08 +0200781}
782
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200783uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
784{
Jerome Glissec010f802009-09-30 22:09:06 +0200785 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
786 S_000070_MC_IND_CITF_ARB0(1));
787 return RREG32(R_000074_MC_IND_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200788}
789
790void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
791{
Jerome Glissec010f802009-09-30 22:09:06 +0200792 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
793 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
794 WREG32(R_000074_MC_IND_DATA, v);
795}
796
797void rs600_debugfs(struct radeon_device *rdev)
798{
799 if (r100_debugfs_rbbm_init(rdev))
800 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200801}
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000802
Jerome Glisse3bc68532009-10-01 09:39:24 +0200803void rs600_set_safe_registers(struct radeon_device *rdev)
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000804{
805 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
806 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
Jerome Glisse3bc68532009-10-01 09:39:24 +0200807}
808
Jerome Glissec010f802009-09-30 22:09:06 +0200809static void rs600_mc_program(struct radeon_device *rdev)
810{
811 struct rv515_mc_save save;
812
813 /* Stops all mc clients */
814 rv515_mc_stop(rdev, &save);
815
816 /* Wait for mc idle */
817 if (rs600_mc_wait_for_idle(rdev))
818 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
819
820 /* FIXME: What does AGP means for such chipset ? */
821 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
822 WREG32_MC(R_000006_AGP_BASE, 0);
823 WREG32_MC(R_000007_AGP_BASE_2, 0);
824 /* Program MC */
825 WREG32_MC(R_000004_MC_FB_LOCATION,
826 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
827 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
828 WREG32(R_000134_HDP_FB_LOCATION,
829 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
830
831 rv515_mc_resume(rdev, &save);
832}
833
834static int rs600_startup(struct radeon_device *rdev)
835{
836 int r;
837
838 rs600_mc_program(rdev);
839 /* Resume clock */
840 rv515_clock_startup(rdev);
841 /* Initialize GPU configuration (# pipes, ...) */
842 rs600_gpu_init(rdev);
843 /* Initialize GART (initialize after TTM so we can allocate
844 * memory through TTM but finalize after TTM) */
845 r = rs600_gart_enable(rdev);
846 if (r)
847 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -0400848
849 /* allocate wb buffer */
850 r = radeon_wb_init(rdev);
851 if (r)
852 return r;
853
Jerome Glissec010f802009-09-30 22:09:06 +0200854 /* Enable IRQ */
Jerome Glissec010f802009-09-30 22:09:06 +0200855 rs600_irq_set(rdev);
Jerome Glissecafe6602010-01-07 12:39:21 +0100856 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
Jerome Glissec010f802009-09-30 22:09:06 +0200857 /* 1M ring buffer */
858 r = r100_cp_init(rdev, 1024 * 1024);
859 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100860 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200861 return r;
862 }
Jerome Glissec010f802009-09-30 22:09:06 +0200863 r = r100_ib_init(rdev);
864 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100865 dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200866 return r;
867 }
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200868
869 r = r600_audio_init(rdev);
870 if (r) {
871 dev_err(rdev->dev, "failed initializing audio\n");
872 return r;
873 }
874
Jerome Glissec010f802009-09-30 22:09:06 +0200875 return 0;
876}
877
878int rs600_resume(struct radeon_device *rdev)
879{
880 /* Make sur GART are not working */
881 rs600_gart_disable(rdev);
882 /* Resume clock before doing reset */
883 rv515_clock_startup(rdev);
884 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000885 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200886 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
887 RREG32(R_000E40_RBBM_STATUS),
888 RREG32(R_0007C0_CP_STAT));
889 }
890 /* post */
891 atom_asic_init(rdev->mode_info.atom_context);
892 /* Resume clock after posting */
893 rv515_clock_startup(rdev);
Dave Airlie550e2d92009-12-09 14:15:38 +1000894 /* Initialize surface registers */
895 radeon_surface_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200896 return rs600_startup(rdev);
897}
898
899int rs600_suspend(struct radeon_device *rdev)
900{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200901 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200902 r100_cp_disable(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400903 radeon_wb_disable(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200904 rs600_irq_disable(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200905 rs600_gart_disable(rdev);
906 return 0;
907}
908
909void rs600_fini(struct radeon_device *rdev)
910{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200911 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200912 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400913 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200914 r100_ib_fini(rdev);
915 radeon_gem_fini(rdev);
916 rs600_gart_fini(rdev);
917 radeon_irq_kms_fini(rdev);
918 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +0100919 radeon_bo_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200920 radeon_atombios_fini(rdev);
921 kfree(rdev->bios);
922 rdev->bios = NULL;
923}
924
Jerome Glisse3bc68532009-10-01 09:39:24 +0200925int rs600_init(struct radeon_device *rdev)
926{
Jerome Glissec010f802009-09-30 22:09:06 +0200927 int r;
928
Jerome Glissec010f802009-09-30 22:09:06 +0200929 /* Disable VGA */
930 rv515_vga_render_disable(rdev);
931 /* Initialize scratch registers */
932 radeon_scratch_init(rdev);
933 /* Initialize surface registers */
934 radeon_surface_init(rdev);
Dave Airlie4c712e62010-07-15 12:13:50 +1000935 /* restore some register to sane defaults */
936 r100_restore_sanity(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200937 /* BIOS */
938 if (!radeon_get_bios(rdev)) {
939 if (ASIC_IS_AVIVO(rdev))
940 return -EINVAL;
941 }
942 if (rdev->is_atom_bios) {
943 r = radeon_atombios_init(rdev);
944 if (r)
945 return r;
946 } else {
947 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
948 return -EINVAL;
949 }
950 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000951 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200952 dev_warn(rdev->dev,
953 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
954 RREG32(R_000E40_RBBM_STATUS),
955 RREG32(R_0007C0_CP_STAT));
956 }
957 /* check if cards are posted or not */
Dave Airlie72542d72009-12-01 14:06:31 +1000958 if (radeon_boot_test_post_card(rdev) == false)
959 return -EINVAL;
960
Jerome Glissec010f802009-09-30 22:09:06 +0200961 /* Initialize clocks */
962 radeon_get_clock_info(rdev->ddev);
Jerome Glissed594e462010-02-17 21:54:29 +0000963 /* initialize memory controller */
964 rs600_mc_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200965 rs600_debugfs(rdev);
966 /* Fence driver */
967 r = radeon_fence_driver_init(rdev);
968 if (r)
969 return r;
970 r = radeon_irq_kms_init(rdev);
971 if (r)
972 return r;
973 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +0100974 r = radeon_bo_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200975 if (r)
976 return r;
977 r = rs600_gart_init(rdev);
978 if (r)
979 return r;
980 rs600_set_safe_registers(rdev);
981 rdev->accel_working = true;
982 r = rs600_startup(rdev);
983 if (r) {
984 /* Somethings want wront with the accel init stop accel */
985 dev_err(rdev->dev, "Disabling GPU acceleration\n");
Jerome Glissec010f802009-09-30 22:09:06 +0200986 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400987 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200988 r100_ib_fini(rdev);
989 rs600_gart_fini(rdev);
990 radeon_irq_kms_fini(rdev);
991 rdev->accel_working = false;
992 }
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000993 return 0;
994}