blob: 07b6dbf681525a3d381bac88a95a2aca4b7270de [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Jerome Glissec010f802009-09-30 22:09:06 +020028/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038#include "drmP.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020039#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000040#include "radeon_asic.h"
Jerome Glissec010f802009-09-30 22:09:06 +020041#include "atom.h"
42#include "rs600d.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020043
Dave Airlie3f7dc91a2009-08-27 11:10:15 +100044#include "rs600_reg_safe.h"
45
Jerome Glisse771fe6b2009-06-05 14:42:42 +020046void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020048
Alex Deucher3ae19b72012-02-23 17:53:37 -050049void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
50{
51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
52 int i;
53
54 if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) {
55 for (i = 0; i < rdev->usec_timeout; i++) {
56 if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK))
57 break;
58 udelay(1);
59 }
60 for (i = 0; i < rdev->usec_timeout; i++) {
61 if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)
62 break;
63 udelay(1);
64 }
65 }
66}
67
Alex Deucher6f34be52010-11-21 10:59:01 -050068void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
69{
Alex Deucher6f34be52010-11-21 10:59:01 -050070 /* enable the pflip int */
71 radeon_irq_kms_pflip_irq_get(rdev, crtc);
72}
73
74void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
75{
76 /* disable the pflip int */
77 radeon_irq_kms_pflip_irq_put(rdev, crtc);
78}
79
80u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
81{
82 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
83 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -050084 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -050085
86 /* Lock the graphics update lock */
87 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
88 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
89
90 /* update the scanout addresses */
91 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
92 (u32)crtc_base);
93 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
94 (u32)crtc_base);
95
96 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -050097 for (i = 0; i < rdev->usec_timeout; i++) {
98 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
99 break;
100 udelay(1);
101 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500102 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
103
104 /* Unlock the lock, so double-buffering can take place inside vblank */
105 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
106 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
107
108 /* Return current update_pending status: */
109 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
110}
111
Alex Deucher49e02b72010-04-23 17:57:27 -0400112void rs600_pm_misc(struct radeon_device *rdev)
113{
Alex Deucher49e02b72010-04-23 17:57:27 -0400114 int requested_index = rdev->pm.requested_power_state_index;
115 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
116 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
117 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
Alex Deucher536fcd52010-04-29 16:33:38 -0400118 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
Alex Deucher49e02b72010-04-23 17:57:27 -0400119
120 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
121 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
122 tmp = RREG32(voltage->gpio.reg);
123 if (voltage->active_high)
124 tmp |= voltage->gpio.mask;
125 else
126 tmp &= ~(voltage->gpio.mask);
127 WREG32(voltage->gpio.reg, tmp);
128 if (voltage->delay)
129 udelay(voltage->delay);
130 } else {
131 tmp = RREG32(voltage->gpio.reg);
132 if (voltage->active_high)
133 tmp &= ~voltage->gpio.mask;
134 else
135 tmp |= voltage->gpio.mask;
136 WREG32(voltage->gpio.reg, tmp);
137 if (voltage->delay)
138 udelay(voltage->delay);
139 }
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400140 } else if (voltage->type == VOLTAGE_VDDC)
Alex Deucher8a83ec52011-04-12 14:49:23 -0400141 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher49e02b72010-04-23 17:57:27 -0400142
143 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
144 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
145 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
146 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
147 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
148 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
149 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
150 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
151 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
152 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
153 }
154 } else {
155 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
156 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
157 }
158 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
159
160 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
161 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
162 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
163 if (voltage->delay) {
164 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
165 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
166 } else
167 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
168 } else
169 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
170 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
171
172 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
173 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
174 hdp_dyn_cntl &= ~HDP_FORCEON;
175 else
176 hdp_dyn_cntl |= HDP_FORCEON;
177 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400178#if 0
179 /* mc_host_dyn seems to cause hangs from time to time */
Alex Deucher49e02b72010-04-23 17:57:27 -0400180 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
181 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
182 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
183 else
184 mc_host_dyn_cntl |= MC_HOST_FORCEON;
185 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400186#endif
187 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
188 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
189 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
190 else
191 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
192 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
Alex Deucher49e02b72010-04-23 17:57:27 -0400193
194 /* set pcie lanes */
195 if ((rdev->flags & RADEON_IS_PCIE) &&
196 !(rdev->flags & RADEON_IS_IGP) &&
Alex Deucher798bcf72012-02-23 17:53:48 -0500197 rdev->asic->pm.set_pcie_lanes &&
Alex Deucher49e02b72010-04-23 17:57:27 -0400198 (ps->pcie_lanes !=
199 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
200 radeon_set_pcie_lanes(rdev,
201 ps->pcie_lanes);
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400202 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
Alex Deucher49e02b72010-04-23 17:57:27 -0400203 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400204}
205
206void rs600_pm_prepare(struct radeon_device *rdev)
207{
208 struct drm_device *ddev = rdev->ddev;
209 struct drm_crtc *crtc;
210 struct radeon_crtc *radeon_crtc;
211 u32 tmp;
212
213 /* disable any active CRTCs */
214 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
215 radeon_crtc = to_radeon_crtc(crtc);
216 if (radeon_crtc->enabled) {
217 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
218 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
219 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
220 }
221 }
222}
223
224void rs600_pm_finish(struct radeon_device *rdev)
225{
226 struct drm_device *ddev = rdev->ddev;
227 struct drm_crtc *crtc;
228 struct radeon_crtc *radeon_crtc;
229 u32 tmp;
230
231 /* enable any active CRTCs */
232 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
233 radeon_crtc = to_radeon_crtc(crtc);
234 if (radeon_crtc->enabled) {
235 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
236 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
237 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
238 }
239 }
240}
241
Alex Deucherdcfdd402009-12-04 15:04:19 -0500242/* hpd for digital panel detect/disconnect */
243bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
244{
245 u32 tmp;
246 bool connected = false;
247
248 switch (hpd) {
249 case RADEON_HPD_1:
250 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
251 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
252 connected = true;
253 break;
254 case RADEON_HPD_2:
255 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
256 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
257 connected = true;
258 break;
259 default:
260 break;
261 }
262 return connected;
263}
264
265void rs600_hpd_set_polarity(struct radeon_device *rdev,
266 enum radeon_hpd_id hpd)
267{
268 u32 tmp;
269 bool connected = rs600_hpd_sense(rdev, hpd);
270
271 switch (hpd) {
272 case RADEON_HPD_1:
273 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
274 if (connected)
275 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
276 else
277 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
278 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
279 break;
280 case RADEON_HPD_2:
281 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
282 if (connected)
283 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
284 else
285 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
286 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
287 break;
288 default:
289 break;
290 }
291}
292
293void rs600_hpd_init(struct radeon_device *rdev)
294{
295 struct drm_device *dev = rdev->ddev;
296 struct drm_connector *connector;
297
298 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
299 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
300 switch (radeon_connector->hpd.hpd) {
301 case RADEON_HPD_1:
302 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
303 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
304 rdev->irq.hpd[0] = true;
305 break;
306 case RADEON_HPD_2:
307 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
308 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
309 rdev->irq.hpd[1] = true;
310 break;
311 default:
312 break;
313 }
Alex Deucher64912e92011-11-03 11:21:39 -0400314 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500315 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100316 if (rdev->irq.installed)
317 rs600_irq_set(rdev);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500318}
319
320void rs600_hpd_fini(struct radeon_device *rdev)
321{
322 struct drm_device *dev = rdev->ddev;
323 struct drm_connector *connector;
324
325 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
326 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
327 switch (radeon_connector->hpd.hpd) {
328 case RADEON_HPD_1:
329 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
330 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
331 rdev->irq.hpd[0] = false;
332 break;
333 case RADEON_HPD_2:
334 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
335 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
336 rdev->irq.hpd[1] = false;
337 break;
338 default:
339 break;
340 }
341 }
342}
343
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000344int rs600_asic_reset(struct radeon_device *rdev)
345{
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000346 struct rv515_mc_save save;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500347 u32 status, tmp;
348 int ret = 0;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000349
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000350 status = RREG32(R_000E40_RBBM_STATUS);
351 if (!G_000E40_GUI_ACTIVE(status)) {
352 return 0;
353 }
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500354 /* Stops all mc clients */
355 rv515_mc_stop(rdev, &save);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000356 status = RREG32(R_000E40_RBBM_STATUS);
357 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
358 /* stop CP */
359 WREG32(RADEON_CP_CSQ_CNTL, 0);
360 tmp = RREG32(RADEON_CP_RB_CNTL);
361 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
362 WREG32(RADEON_CP_RB_RPTR_WR, 0);
363 WREG32(RADEON_CP_RB_WPTR, 0);
364 WREG32(RADEON_CP_RB_CNTL, tmp);
365 pci_save_state(rdev->pdev);
366 /* disable bus mastering */
Michel Dänzer642ce522012-01-12 16:04:11 +0100367 pci_clear_master(rdev->pdev);
368 mdelay(1);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000369 /* reset GA+VAP */
370 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
371 S_0000F0_SOFT_RESET_GA(1));
372 RREG32(R_0000F0_RBBM_SOFT_RESET);
373 mdelay(500);
374 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
375 mdelay(1);
376 status = RREG32(R_000E40_RBBM_STATUS);
377 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
378 /* reset CP */
379 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
380 RREG32(R_0000F0_RBBM_SOFT_RESET);
381 mdelay(500);
382 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
383 mdelay(1);
384 status = RREG32(R_000E40_RBBM_STATUS);
385 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
386 /* reset MC */
387 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
388 RREG32(R_0000F0_RBBM_SOFT_RESET);
389 mdelay(500);
390 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
391 mdelay(1);
392 status = RREG32(R_000E40_RBBM_STATUS);
393 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
394 /* restore PCI & busmastering */
395 pci_restore_state(rdev->pdev);
396 /* Check if GPU is idle */
397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
398 dev_err(rdev->dev, "failed to reset GPU\n");
399 rdev->gpu_lockup = true;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500400 ret = -1;
401 } else
402 dev_info(rdev->dev, "GPU reset succeed\n");
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000403 rv515_mc_resume(rdev, &save);
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500404 return ret;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000405}
406
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200407/*
408 * GART.
409 */
410void rs600_gart_tlb_flush(struct radeon_device *rdev)
411{
412 uint32_t tmp;
413
Jerome Glissec010f802009-09-30 22:09:06 +0200414 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
415 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
416 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200417
Jerome Glissec010f802009-09-30 22:09:06 +0200418 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse30f69f32010-04-16 18:46:35 +0200419 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
Jerome Glissec010f802009-09-30 22:09:06 +0200420 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200421
Jerome Glissec010f802009-09-30 22:09:06 +0200422 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
423 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
424 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
425 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200426}
427
Jerome Glisse4aac0472009-09-14 18:29:49 +0200428int rs600_gart_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200429{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200430 int r;
431
Jerome Glissec9a1be92011-11-03 11:16:49 -0400432 if (rdev->gart.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000433 WARN(1, "RS600 GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200434 return 0;
435 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200436 /* Initialize common gart structure */
437 r = radeon_gart_init(rdev);
438 if (r) {
439 return r;
440 }
441 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200442 return radeon_gart_table_vram_alloc(rdev);
443}
444
Alex Deuchere22e6d22011-07-11 20:27:23 +0000445static int rs600_gart_enable(struct radeon_device *rdev)
Jerome Glisse4aac0472009-09-14 18:29:49 +0200446{
Jerome Glissec010f802009-09-30 22:09:06 +0200447 u32 tmp;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200448 int r, i;
449
Jerome Glissec9a1be92011-11-03 11:16:49 -0400450 if (rdev->gart.robj == NULL) {
Jerome Glisse4aac0472009-09-14 18:29:49 +0200451 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
452 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200453 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200454 r = radeon_gart_table_vram_pin(rdev);
455 if (r)
456 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000457 radeon_gart_restore(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200458 /* Enable bus master */
Alex Deuchere22e6d22011-07-11 20:27:23 +0000459 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
460 WREG32(RADEON_BUS_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200461 /* FIXME: setup default page */
Jerome Glissec010f802009-09-30 22:09:06 +0200462 WREG32_MC(R_000100_MC_PT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500463 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
464 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
465
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200466 for (i = 0; i < 19; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200467 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
Alex Deucher4f15d242009-12-05 17:55:37 -0500468 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
469 S_00016C_SYSTEM_ACCESS_MODE_MASK(
470 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
471 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
472 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
473 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
474 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
475 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200476 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200477 /* enable first context */
Jerome Glissec010f802009-09-30 22:09:06 +0200478 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500479 S_000102_ENABLE_PAGE_TABLE(1) |
480 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
481
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200482 /* disable all other contexts */
Alex Deucher4f15d242009-12-05 17:55:37 -0500483 for (i = 1; i < 8; i++)
Jerome Glissec010f802009-09-30 22:09:06 +0200484 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200485
486 /* setup the page table */
Jerome Glissec010f802009-09-30 22:09:06 +0200487 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
Alex Deucher4f15d242009-12-05 17:55:37 -0500488 rdev->gart.table_addr);
489 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
490 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
Jerome Glissec010f802009-09-30 22:09:06 +0200491 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200492
Alex Deucher4f15d242009-12-05 17:55:37 -0500493 /* System context maps to VRAM space */
494 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
495 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
496
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200497 /* enable page tables */
Jerome Glissec010f802009-09-30 22:09:06 +0200498 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
499 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
500 tmp = RREG32_MC(R_000009_MC_CNTL1);
501 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200502 rs600_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000503 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
504 (unsigned)(rdev->mc.gtt_size >> 20),
505 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200506 rdev->gart.ready = true;
507 return 0;
508}
509
510void rs600_gart_disable(struct radeon_device *rdev)
511{
Jerome Glisse4c788672009-11-20 14:29:23 +0100512 u32 tmp;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200513
514 /* FIXME: disable out of gart access */
Jerome Glissec010f802009-09-30 22:09:06 +0200515 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
516 tmp = RREG32_MC(R_000009_MC_CNTL1);
517 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400518 radeon_gart_table_vram_unpin(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200519}
520
521void rs600_gart_fini(struct radeon_device *rdev)
522{
Jerome Glissef9274562010-03-17 14:44:29 +0000523 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200524 rs600_gart_disable(rdev);
525 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200526}
527
528#define R600_PTE_VALID (1 << 0)
529#define R600_PTE_SYSTEM (1 << 1)
530#define R600_PTE_SNOOPED (1 << 2)
531#define R600_PTE_READABLE (1 << 5)
532#define R600_PTE_WRITEABLE (1 << 6)
533
534int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
535{
Jerome Glissec9a1be92011-11-03 11:16:49 -0400536 void __iomem *ptr = (void *)rdev->gart.ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200537
538 if (i < 0 || i > rdev->gart.num_gpu_pages) {
539 return -EINVAL;
540 }
541 addr = addr & 0xFFFFFFFFFFFFF000ULL;
Christian Königfa1bd9b2014-06-04 15:29:56 +0200542 if (addr != rdev->dummy_page.addr)
543 addr |= R600_PTE_VALID | R600_PTE_READABLE |
544 R600_PTE_WRITEABLE;
545 addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +0000546 writeq(addr, ptr + (i * 8));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200547 return 0;
548}
549
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200550int rs600_irq_set(struct radeon_device *rdev)
551{
552 uint32_t tmp = 0;
553 uint32_t mode_int = 0;
Alex Deucherdcfdd402009-12-04 15:04:19 -0500554 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
555 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
556 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
557 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200558
Jerome Glisse003e69f2010-01-07 15:39:14 +0100559 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000560 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +0100561 WREG32(R_000040_GEN_INT_CNTL, 0);
562 return -EINVAL;
563 }
Alex Deucher1b370782011-11-17 20:13:28 -0500564 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200565 tmp |= S_000040_SW_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200566 }
Alex Deucher2031f772010-04-22 12:52:11 -0400567 if (rdev->irq.gui_idle) {
568 tmp |= S_000040_GUI_IDLE(1);
569 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500570 if (rdev->irq.crtc_vblank_int[0] ||
571 rdev->irq.pflip[0]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200572 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200573 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500574 if (rdev->irq.crtc_vblank_int[1] ||
575 rdev->irq.pflip[1]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200576 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200577 }
Alex Deucherdcfdd402009-12-04 15:04:19 -0500578 if (rdev->irq.hpd[0]) {
579 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
580 }
581 if (rdev->irq.hpd[1]) {
582 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
583 }
Jerome Glissec010f802009-09-30 22:09:06 +0200584 WREG32(R_000040_GEN_INT_CNTL, tmp);
585 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500586 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
587 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600588
589 /* posting read */
590 RREG32(R_000040_GEN_INT_CNTL);
591
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200592 return 0;
593}
594
Alex Deucher6f34be52010-11-21 10:59:01 -0500595static inline u32 rs600_irq_ack(struct radeon_device *rdev)
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200596{
Jerome Glisse01ceae82009-10-07 11:08:22 +0200597 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
Alex Deucher2031f772010-04-22 12:52:11 -0400598 uint32_t irq_mask = S_000044_SW_INT(1);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500599 u32 tmp;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200600
Alex Deucher2031f772010-04-22 12:52:11 -0400601 /* the interrupt works, but the status bit is permanently asserted */
602 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
603 if (!rdev->irq.gui_idle_acked)
604 irq_mask |= S_000044_GUI_IDLE_STAT(1);
605 }
606
Jerome Glisse01ceae82009-10-07 11:08:22 +0200607 if (G_000044_DISPLAY_INT_STAT(irqs)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500608 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
609 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200610 WREG32(R_006534_D1MODE_VBLANK_STATUS,
611 S_006534_D1MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200612 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500613 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200614 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
615 S_006D34_D2MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200616 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500617 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500618 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
619 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
620 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
621 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500622 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500623 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
624 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
625 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
626 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200627 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -0500628 rdev->irq.stat_regs.r500.disp_int = 0;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200629 }
630
631 if (irqs) {
Jerome Glisse01ceae82009-10-07 11:08:22 +0200632 WREG32(R_000044_GEN_INT_STATUS, irqs);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200633 }
634 return irqs & irq_mask;
635}
636
Jerome Glisseac447df2009-09-30 22:18:43 +0200637void rs600_irq_disable(struct radeon_device *rdev)
638{
Jerome Glisseac447df2009-09-30 22:18:43 +0200639 WREG32(R_000040_GEN_INT_CNTL, 0);
640 WREG32(R_006540_DxMODE_INT_MASK, 0);
641 /* Wait and acknowledge irq */
642 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -0500643 rs600_irq_ack(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200644}
645
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200646int rs600_irq_process(struct radeon_device *rdev)
647{
Alex Deucher6f34be52010-11-21 10:59:01 -0500648 u32 status, msi_rearm;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500649 bool queue_hotplug = false;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200650
Alex Deucher2031f772010-04-22 12:52:11 -0400651 /* reset gui idle ack. the status bit is broken */
652 rdev->irq.gui_idle_acked = false;
653
Alex Deucher6f34be52010-11-21 10:59:01 -0500654 status = rs600_irq_ack(rdev);
655 if (!status && !rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200656 return IRQ_NONE;
657 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500658 while (status || rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200659 /* SW interrupt */
Alex Deucher6f34be52010-11-21 10:59:01 -0500660 if (G_000044_SW_INT(status)) {
Alex Deucher74652802011-08-25 13:39:48 -0400661 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher6f34be52010-11-21 10:59:01 -0500662 }
Alex Deucher2031f772010-04-22 12:52:11 -0400663 /* GUI idle */
664 if (G_000040_GUI_IDLE(status)) {
665 rdev->irq.gui_idle_acked = true;
666 rdev->pm.gui_idle = true;
667 wake_up(&rdev->irq.idle_queue);
668 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200669 /* Vertical blank interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -0500670 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500671 if (rdev->irq.crtc_vblank_int[0]) {
672 drm_handle_vblank(rdev->ddev, 0);
673 rdev->pm.vblank_sync = true;
674 wake_up(&rdev->irq.vblank_queue);
675 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500676 if (rdev->irq.pflip[0])
677 radeon_crtc_handle_flip(rdev, 0);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100678 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500679 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500680 if (rdev->irq.crtc_vblank_int[1]) {
681 drm_handle_vblank(rdev->ddev, 1);
682 rdev->pm.vblank_sync = true;
683 wake_up(&rdev->irq.vblank_queue);
684 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500685 if (rdev->irq.pflip[1])
686 radeon_crtc_handle_flip(rdev, 1);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100687 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500688 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500689 queue_hotplug = true;
690 DRM_DEBUG("HPD1\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500691 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500692 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500693 queue_hotplug = true;
694 DRM_DEBUG("HPD2\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500695 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500696 status = rs600_irq_ack(rdev);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200697 }
Alex Deucher2031f772010-04-22 12:52:11 -0400698 /* reset gui idle ack. the status bit is broken */
699 rdev->irq.gui_idle_acked = false;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500700 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +0100701 schedule_work(&rdev->hotplug_work);
Alex Deucher3e5cb982009-10-16 12:21:24 -0400702 if (rdev->msi_enabled) {
703 switch (rdev->family) {
704 case CHIP_RS600:
705 case CHIP_RS690:
706 case CHIP_RS740:
707 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
708 WREG32(RADEON_BUS_CNTL, msi_rearm);
709 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
710 break;
711 default:
Alex Deucherb7f5b7d2012-02-13 16:36:34 -0500712 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
Alex Deucher3e5cb982009-10-16 12:21:24 -0400713 break;
714 }
715 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200716 return IRQ_HANDLED;
717}
718
719u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
720{
721 if (crtc == 0)
Jerome Glissec010f802009-09-30 22:09:06 +0200722 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200723 else
Jerome Glissec010f802009-09-30 22:09:06 +0200724 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200725}
726
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200727int rs600_mc_wait_for_idle(struct radeon_device *rdev)
728{
729 unsigned i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200730
731 for (i = 0; i < rdev->usec_timeout; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200732 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200733 return 0;
Jerome Glissec010f802009-09-30 22:09:06 +0200734 udelay(1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200735 }
736 return -1;
737}
738
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200739void rs600_gpu_init(struct radeon_device *rdev)
740{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200741 r420_pipes_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200742 /* Wait for mc idle */
743 if (rs600_mc_wait_for_idle(rdev))
744 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200745}
746
Jerome Glissed594e462010-02-17 21:54:29 +0000747void rs600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200748{
Jerome Glissed594e462010-02-17 21:54:29 +0000749 u64 base;
750
Jordan Crouse01d73a62010-05-27 13:40:24 -0600751 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
752 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200753 rdev->mc.vram_is_ddr = true;
754 rdev->mc.vram_width = 128;
Alex Deucher722f2942009-12-03 16:18:19 -0500755 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
756 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
Jerome Glisse51e5fcd2010-02-19 14:33:54 +0000757 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000758 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
759 base = RREG32_MC(R_000004_MC_FB_LOCATION);
760 base = G_000004_MC_FB_START(base) << 16;
761 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -0400762 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +0000763 radeon_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -0400764 radeon_update_bandwidth_info(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200765}
766
Jerome Glissec93bb852009-07-13 21:04:08 +0200767void rs600_bandwidth_update(struct radeon_device *rdev)
768{
Alex Deucherf46c0122010-03-31 00:33:27 -0400769 struct drm_display_mode *mode0 = NULL;
770 struct drm_display_mode *mode1 = NULL;
771 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
772 /* FIXME: implement full support */
773
774 radeon_update_display_priority(rdev);
775
776 if (rdev->mode_info.crtcs[0]->base.enabled)
777 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
778 if (rdev->mode_info.crtcs[1]->base.enabled)
779 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
780
781 rs690_line_buffer_adjust(rdev, mode0, mode1);
782
783 if (rdev->disp_priority == 2) {
784 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
785 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
786 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
787 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
788 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
789 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
790 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
791 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
792 }
Jerome Glissec93bb852009-07-13 21:04:08 +0200793}
794
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200795uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
796{
Jerome Glissec010f802009-09-30 22:09:06 +0200797 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
798 S_000070_MC_IND_CITF_ARB0(1));
799 return RREG32(R_000074_MC_IND_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200800}
801
802void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
803{
Jerome Glissec010f802009-09-30 22:09:06 +0200804 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
805 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
806 WREG32(R_000074_MC_IND_DATA, v);
807}
808
809void rs600_debugfs(struct radeon_device *rdev)
810{
811 if (r100_debugfs_rbbm_init(rdev))
812 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200813}
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000814
Jerome Glisse3bc68532009-10-01 09:39:24 +0200815void rs600_set_safe_registers(struct radeon_device *rdev)
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000816{
817 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
818 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
Jerome Glisse3bc68532009-10-01 09:39:24 +0200819}
820
Jerome Glissec010f802009-09-30 22:09:06 +0200821static void rs600_mc_program(struct radeon_device *rdev)
822{
823 struct rv515_mc_save save;
824
825 /* Stops all mc clients */
826 rv515_mc_stop(rdev, &save);
827
828 /* Wait for mc idle */
829 if (rs600_mc_wait_for_idle(rdev))
830 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
831
832 /* FIXME: What does AGP means for such chipset ? */
833 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
834 WREG32_MC(R_000006_AGP_BASE, 0);
835 WREG32_MC(R_000007_AGP_BASE_2, 0);
836 /* Program MC */
837 WREG32_MC(R_000004_MC_FB_LOCATION,
838 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
839 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
840 WREG32(R_000134_HDP_FB_LOCATION,
841 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
842
843 rv515_mc_resume(rdev, &save);
844}
845
846static int rs600_startup(struct radeon_device *rdev)
847{
848 int r;
849
850 rs600_mc_program(rdev);
851 /* Resume clock */
852 rv515_clock_startup(rdev);
853 /* Initialize GPU configuration (# pipes, ...) */
854 rs600_gpu_init(rdev);
855 /* Initialize GART (initialize after TTM so we can allocate
856 * memory through TTM but finalize after TTM) */
857 r = rs600_gart_enable(rdev);
858 if (r)
859 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -0400860
861 /* allocate wb buffer */
862 r = radeon_wb_init(rdev);
863 if (r)
864 return r;
865
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000866 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
867 if (r) {
868 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
869 return r;
870 }
871
Jerome Glissec010f802009-09-30 22:09:06 +0200872 /* Enable IRQ */
Adis Hamzićf6321ee2013-06-02 16:47:54 +0200873 if (!rdev->irq.installed) {
874 r = radeon_irq_kms_init(rdev);
875 if (r)
876 return r;
877 }
878
Jerome Glissec010f802009-09-30 22:09:06 +0200879 rs600_irq_set(rdev);
Jerome Glissecafe6602010-01-07 12:39:21 +0100880 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
Jerome Glissec010f802009-09-30 22:09:06 +0200881 /* 1M ring buffer */
882 r = r100_cp_init(rdev, 1024 * 1024);
883 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100884 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200885 return r;
886 }
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200887
888 r = r600_audio_init(rdev);
889 if (r) {
890 dev_err(rdev->dev, "failed initializing audio\n");
891 return r;
892 }
893
Jerome Glisseb15ba512011-11-15 11:48:34 -0500894 r = radeon_ib_pool_start(rdev);
895 if (r)
896 return r;
897
Alex Deucherf7128122012-02-23 17:53:45 -0500898 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500899 if (r) {
900 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
901 rdev->accel_working = false;
902 return r;
903 }
904
Jerome Glissec010f802009-09-30 22:09:06 +0200905 return 0;
906}
907
908int rs600_resume(struct radeon_device *rdev)
909{
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500910 int r;
911
Jerome Glissec010f802009-09-30 22:09:06 +0200912 /* Make sur GART are not working */
913 rs600_gart_disable(rdev);
914 /* Resume clock before doing reset */
915 rv515_clock_startup(rdev);
916 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000917 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200918 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
919 RREG32(R_000E40_RBBM_STATUS),
920 RREG32(R_0007C0_CP_STAT));
921 }
922 /* post */
923 atom_asic_init(rdev->mode_info.atom_context);
924 /* Resume clock after posting */
925 rv515_clock_startup(rdev);
Dave Airlie550e2d92009-12-09 14:15:38 +1000926 /* Initialize surface registers */
927 radeon_surface_init(rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500928
929 rdev->accel_working = true;
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500930 r = rs600_startup(rdev);
931 if (r) {
932 rdev->accel_working = false;
933 }
934 return r;
Jerome Glissec010f802009-09-30 22:09:06 +0200935}
936
937int rs600_suspend(struct radeon_device *rdev)
938{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500939 radeon_ib_pool_suspend(rdev);
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200940 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200941 r100_cp_disable(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400942 radeon_wb_disable(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200943 rs600_irq_disable(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200944 rs600_gart_disable(rdev);
945 return 0;
946}
947
948void rs600_fini(struct radeon_device *rdev)
949{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200950 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200951 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400952 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200953 r100_ib_fini(rdev);
954 radeon_gem_fini(rdev);
955 rs600_gart_fini(rdev);
956 radeon_irq_kms_fini(rdev);
957 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +0100958 radeon_bo_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200959 radeon_atombios_fini(rdev);
960 kfree(rdev->bios);
961 rdev->bios = NULL;
962}
963
Jerome Glisse3bc68532009-10-01 09:39:24 +0200964int rs600_init(struct radeon_device *rdev)
965{
Jerome Glissec010f802009-09-30 22:09:06 +0200966 int r;
967
Jerome Glissec010f802009-09-30 22:09:06 +0200968 /* Disable VGA */
969 rv515_vga_render_disable(rdev);
970 /* Initialize scratch registers */
971 radeon_scratch_init(rdev);
972 /* Initialize surface registers */
973 radeon_surface_init(rdev);
Dave Airlie4c712e62010-07-15 12:13:50 +1000974 /* restore some register to sane defaults */
975 r100_restore_sanity(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200976 /* BIOS */
977 if (!radeon_get_bios(rdev)) {
978 if (ASIC_IS_AVIVO(rdev))
979 return -EINVAL;
980 }
981 if (rdev->is_atom_bios) {
982 r = radeon_atombios_init(rdev);
983 if (r)
984 return r;
985 } else {
986 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
987 return -EINVAL;
988 }
989 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000990 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200991 dev_warn(rdev->dev,
992 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
993 RREG32(R_000E40_RBBM_STATUS),
994 RREG32(R_0007C0_CP_STAT));
995 }
996 /* check if cards are posted or not */
Dave Airlie72542d72009-12-01 14:06:31 +1000997 if (radeon_boot_test_post_card(rdev) == false)
998 return -EINVAL;
999
Jerome Glissec010f802009-09-30 22:09:06 +02001000 /* Initialize clocks */
1001 radeon_get_clock_info(rdev->ddev);
Jerome Glissed594e462010-02-17 21:54:29 +00001002 /* initialize memory controller */
1003 rs600_mc_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +02001004 rs600_debugfs(rdev);
1005 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001006 r = radeon_fence_driver_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +02001007 if (r)
1008 return r;
Jerome Glissec010f802009-09-30 22:09:06 +02001009 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01001010 r = radeon_bo_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +02001011 if (r)
1012 return r;
1013 r = rs600_gart_init(rdev);
1014 if (r)
1015 return r;
1016 rs600_set_safe_registers(rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -05001017
1018 r = radeon_ib_pool_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +02001019 rdev->accel_working = true;
Jerome Glisseb15ba512011-11-15 11:48:34 -05001020 if (r) {
1021 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1022 rdev->accel_working = false;
1023 }
1024
Jerome Glissec010f802009-09-30 22:09:06 +02001025 r = rs600_startup(rdev);
1026 if (r) {
1027 /* Somethings want wront with the accel init stop accel */
1028 dev_err(rdev->dev, "Disabling GPU acceleration\n");
Jerome Glissec010f802009-09-30 22:09:06 +02001029 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001030 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +02001031 r100_ib_fini(rdev);
1032 rs600_gart_fini(rdev);
1033 radeon_irq_kms_fini(rdev);
1034 rdev->accel_working = false;
1035 }
Dave Airlie3f7dc91a2009-08-27 11:10:15 +10001036 return 0;
1037}