blob: 6a8776eae1aebfd28f0dc97910c5218e78ac63d7 [file] [log] [blame]
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050027#include "drmP.h"
28#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000029#include "radeon_asic.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050030#include "radeon_drm.h"
Alex Deucher0fcdb612010-03-24 13:20:41 -040031#include "evergreend.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050032#include "atom.h"
33#include "avivod.h"
34#include "evergreen_reg.h"
Alex Deucher2281a372010-10-21 13:31:38 -040035#include "evergreen_blit_shaders.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050036
Alex Deucherfe251e22010-03-24 13:36:43 -040037#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39
Alex Deucherdfbc8b92012-08-15 17:13:53 -040040static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050050static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040052void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -050053extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050055
Jerome Glisse285484e2011-12-16 17:03:42 -050056void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split)
59{
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
64 switch (*bankw) {
65 default:
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
70 }
71 switch (*bankh) {
72 default:
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
77 }
78 switch (*mtaspect) {
79 default:
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
84 }
85}
86
Alex Deucherd054ac12011-09-01 17:46:15 +000087void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
88{
89 u16 ctl, v;
90 int cap, err;
91
92 cap = pci_pcie_cap(rdev->pdev);
93 if (!cap)
94 return;
95
96 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
97 if (err)
98 return;
99
100 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
101
102 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
103 * to avoid hangs or perfomance issues
104 */
105 if ((v == 0) || (v == 6) || (v == 7)) {
106 ctl &= ~PCI_EXP_DEVCTL_READRQ;
107 ctl |= (2 << 12);
108 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
109 }
110}
111
Alex Deucher3ae19b72012-02-23 17:53:37 -0500112void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
113{
Alex Deucher3ae19b72012-02-23 17:53:37 -0500114 int i;
115
Alex Deucherdfbc8b92012-08-15 17:13:53 -0400116 if (crtc >= rdev->num_crtc)
117 return;
118
119 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
Alex Deucher3ae19b72012-02-23 17:53:37 -0500120 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucherdfbc8b92012-08-15 17:13:53 -0400121 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500122 break;
123 udelay(1);
124 }
125 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucherdfbc8b92012-08-15 17:13:53 -0400126 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
Alex Deucher3ae19b72012-02-23 17:53:37 -0500127 break;
128 udelay(1);
129 }
130 }
131}
132
Alex Deucher6f34be52010-11-21 10:59:01 -0500133void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
134{
Alex Deucher6f34be52010-11-21 10:59:01 -0500135 /* enable the pflip int */
136 radeon_irq_kms_pflip_irq_get(rdev, crtc);
137}
138
139void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
140{
141 /* disable the pflip int */
142 radeon_irq_kms_pflip_irq_put(rdev, crtc);
143}
144
145u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
146{
147 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
148 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500149 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500150
151 /* Lock the graphics update lock */
152 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
153 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
154
155 /* update the scanout addresses */
156 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
157 upper_32_bits(crtc_base));
158 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
159 (u32)crtc_base);
160
161 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
162 upper_32_bits(crtc_base));
163 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
164 (u32)crtc_base);
165
166 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500167 for (i = 0; i < rdev->usec_timeout; i++) {
168 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
169 break;
170 udelay(1);
171 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500172 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
173
174 /* Unlock the lock, so double-buffering can take place inside vblank */
175 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
176 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
177
178 /* Return current update_pending status: */
179 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
180}
181
Alex Deucher21a81222010-07-02 12:58:16 -0400182/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500183int evergreen_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400184{
Alex Deucher1c88d742011-06-14 19:15:53 +0000185 u32 temp, toffset;
186 int actual_temp = 0;
Alex Deucher21a81222010-07-02 12:58:16 -0400187
Alex Deucher67b3f822011-05-25 18:45:37 -0400188 if (rdev->family == CHIP_JUNIPER) {
189 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
190 TOFFSET_SHIFT;
191 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
192 TS0_ADC_DOUT_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400193
Alex Deucher67b3f822011-05-25 18:45:37 -0400194 if (toffset & 0x100)
195 actual_temp = temp / 2 - (0x200 - toffset);
196 else
197 actual_temp = temp / 2 + toffset;
198
199 actual_temp = actual_temp * 1000;
200
201 } else {
202 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
203 ASIC_T_SHIFT;
204
205 if (temp & 0x400)
206 actual_temp = -256;
207 else if (temp & 0x200)
208 actual_temp = 255;
209 else if (temp & 0x100) {
210 actual_temp = temp & 0x1ff;
211 actual_temp |= ~0x1ff;
212 } else
213 actual_temp = temp & 0xff;
214
215 actual_temp = (actual_temp * 1000) / 2;
216 }
217
218 return actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400219}
220
Alex Deucher20d391d2011-02-01 16:12:34 -0500221int sumo_get_temp(struct radeon_device *rdev)
Alex Deuchere33df252010-11-22 17:56:32 -0500222{
223 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
Alex Deucher20d391d2011-02-01 16:12:34 -0500224 int actual_temp = temp - 49;
Alex Deuchere33df252010-11-22 17:56:32 -0500225
226 return actual_temp * 1000;
227}
228
Alex Deuchera4c9e2e2011-11-04 10:09:41 -0400229void sumo_pm_init_profile(struct radeon_device *rdev)
230{
231 int idx;
232
233 /* default */
234 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
235 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
236 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
237 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
238
239 /* low,mid sh/mh */
240 if (rdev->flags & RADEON_IS_MOBILITY)
241 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
242 else
243 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
244
245 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
246 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
247 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
248 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
249
250 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
251 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
252 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
253 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
254
255 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
256 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
257 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
258 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
259
260 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
261 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
262 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
263 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
264
265 /* high sh/mh */
266 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
267 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
268 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
269 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
270 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
271 rdev->pm.power_state[idx].num_clock_modes - 1;
272
273 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
274 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
275 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
276 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
277 rdev->pm.power_state[idx].num_clock_modes - 1;
278}
279
Alex Deucher49e02b72010-04-23 17:57:27 -0400280void evergreen_pm_misc(struct radeon_device *rdev)
281{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400282 int req_ps_idx = rdev->pm.requested_power_state_index;
283 int req_cm_idx = rdev->pm.requested_clock_mode_index;
284 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
285 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher49e02b72010-04-23 17:57:27 -0400286
Alex Deucher2feea492011-04-12 14:49:24 -0400287 if (voltage->type == VOLTAGE_SW) {
Alex Deuchera377e182011-06-20 13:00:31 -0400288 /* 0xff01 is a flag rather then an actual voltage */
289 if (voltage->voltage == 0xff01)
290 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400291 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400292 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400293 rdev->pm.current_vddc = voltage->voltage;
Alex Deucher2feea492011-04-12 14:49:24 -0400294 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
295 }
Alex Deuchera377e182011-06-20 13:00:31 -0400296 /* 0xff01 is a flag rather then an actual voltage */
297 if (voltage->vddci == 0xff01)
298 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400299 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
300 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
301 rdev->pm.current_vddci = voltage->vddci;
302 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
Alex Deucher4d601732010-06-07 18:15:18 -0400303 }
304 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400305}
306
307void evergreen_pm_prepare(struct radeon_device *rdev)
308{
309 struct drm_device *ddev = rdev->ddev;
310 struct drm_crtc *crtc;
311 struct radeon_crtc *radeon_crtc;
312 u32 tmp;
313
314 /* disable any active CRTCs */
315 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
316 radeon_crtc = to_radeon_crtc(crtc);
317 if (radeon_crtc->enabled) {
318 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
319 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
320 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
321 }
322 }
323}
324
325void evergreen_pm_finish(struct radeon_device *rdev)
326{
327 struct drm_device *ddev = rdev->ddev;
328 struct drm_crtc *crtc;
329 struct radeon_crtc *radeon_crtc;
330 u32 tmp;
331
332 /* enable any active CRTCs */
333 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
334 radeon_crtc = to_radeon_crtc(crtc);
335 if (radeon_crtc->enabled) {
336 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
337 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
338 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
339 }
340 }
341}
342
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500343bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
344{
345 bool connected = false;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500346
347 switch (hpd) {
348 case RADEON_HPD_1:
349 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
350 connected = true;
351 break;
352 case RADEON_HPD_2:
353 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
354 connected = true;
355 break;
356 case RADEON_HPD_3:
357 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
358 connected = true;
359 break;
360 case RADEON_HPD_4:
361 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
362 connected = true;
363 break;
364 case RADEON_HPD_5:
365 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
366 connected = true;
367 break;
368 case RADEON_HPD_6:
369 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
370 connected = true;
371 break;
372 default:
373 break;
374 }
375
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500376 return connected;
377}
378
379void evergreen_hpd_set_polarity(struct radeon_device *rdev,
380 enum radeon_hpd_id hpd)
381{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500382 u32 tmp;
383 bool connected = evergreen_hpd_sense(rdev, hpd);
384
385 switch (hpd) {
386 case RADEON_HPD_1:
387 tmp = RREG32(DC_HPD1_INT_CONTROL);
388 if (connected)
389 tmp &= ~DC_HPDx_INT_POLARITY;
390 else
391 tmp |= DC_HPDx_INT_POLARITY;
392 WREG32(DC_HPD1_INT_CONTROL, tmp);
393 break;
394 case RADEON_HPD_2:
395 tmp = RREG32(DC_HPD2_INT_CONTROL);
396 if (connected)
397 tmp &= ~DC_HPDx_INT_POLARITY;
398 else
399 tmp |= DC_HPDx_INT_POLARITY;
400 WREG32(DC_HPD2_INT_CONTROL, tmp);
401 break;
402 case RADEON_HPD_3:
403 tmp = RREG32(DC_HPD3_INT_CONTROL);
404 if (connected)
405 tmp &= ~DC_HPDx_INT_POLARITY;
406 else
407 tmp |= DC_HPDx_INT_POLARITY;
408 WREG32(DC_HPD3_INT_CONTROL, tmp);
409 break;
410 case RADEON_HPD_4:
411 tmp = RREG32(DC_HPD4_INT_CONTROL);
412 if (connected)
413 tmp &= ~DC_HPDx_INT_POLARITY;
414 else
415 tmp |= DC_HPDx_INT_POLARITY;
416 WREG32(DC_HPD4_INT_CONTROL, tmp);
417 break;
418 case RADEON_HPD_5:
419 tmp = RREG32(DC_HPD5_INT_CONTROL);
420 if (connected)
421 tmp &= ~DC_HPDx_INT_POLARITY;
422 else
423 tmp |= DC_HPDx_INT_POLARITY;
424 WREG32(DC_HPD5_INT_CONTROL, tmp);
425 break;
426 case RADEON_HPD_6:
427 tmp = RREG32(DC_HPD6_INT_CONTROL);
428 if (connected)
429 tmp &= ~DC_HPDx_INT_POLARITY;
430 else
431 tmp |= DC_HPDx_INT_POLARITY;
432 WREG32(DC_HPD6_INT_CONTROL, tmp);
433 break;
434 default:
435 break;
436 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500437}
438
439void evergreen_hpd_init(struct radeon_device *rdev)
440{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500441 struct drm_device *dev = rdev->ddev;
442 struct drm_connector *connector;
443 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
444 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500445
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500446 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
447 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
Alex Deucher6f0faf52013-04-11 12:45:34 -0400448
449 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
450 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
451 /* don't try to enable hpd on eDP or LVDS avoid breaking the
452 * aux dp channel on imac and help (but not completely fix)
453 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
454 * also avoid interrupt storms during dpms.
455 */
456 continue;
457 }
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500458 switch (radeon_connector->hpd.hpd) {
459 case RADEON_HPD_1:
460 WREG32(DC_HPD1_CONTROL, tmp);
461 rdev->irq.hpd[0] = true;
462 break;
463 case RADEON_HPD_2:
464 WREG32(DC_HPD2_CONTROL, tmp);
465 rdev->irq.hpd[1] = true;
466 break;
467 case RADEON_HPD_3:
468 WREG32(DC_HPD3_CONTROL, tmp);
469 rdev->irq.hpd[2] = true;
470 break;
471 case RADEON_HPD_4:
472 WREG32(DC_HPD4_CONTROL, tmp);
473 rdev->irq.hpd[3] = true;
474 break;
475 case RADEON_HPD_5:
476 WREG32(DC_HPD5_CONTROL, tmp);
477 rdev->irq.hpd[4] = true;
478 break;
479 case RADEON_HPD_6:
480 WREG32(DC_HPD6_CONTROL, tmp);
481 rdev->irq.hpd[5] = true;
482 break;
483 default:
484 break;
485 }
Alex Deucher64912e92011-11-03 11:21:39 -0400486 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500487 }
488 if (rdev->irq.installed)
489 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500490}
491
492void evergreen_hpd_fini(struct radeon_device *rdev)
493{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500494 struct drm_device *dev = rdev->ddev;
495 struct drm_connector *connector;
496
497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
498 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
499 switch (radeon_connector->hpd.hpd) {
500 case RADEON_HPD_1:
501 WREG32(DC_HPD1_CONTROL, 0);
502 rdev->irq.hpd[0] = false;
503 break;
504 case RADEON_HPD_2:
505 WREG32(DC_HPD2_CONTROL, 0);
506 rdev->irq.hpd[1] = false;
507 break;
508 case RADEON_HPD_3:
509 WREG32(DC_HPD3_CONTROL, 0);
510 rdev->irq.hpd[2] = false;
511 break;
512 case RADEON_HPD_4:
513 WREG32(DC_HPD4_CONTROL, 0);
514 rdev->irq.hpd[3] = false;
515 break;
516 case RADEON_HPD_5:
517 WREG32(DC_HPD5_CONTROL, 0);
518 rdev->irq.hpd[4] = false;
519 break;
520 case RADEON_HPD_6:
521 WREG32(DC_HPD6_CONTROL, 0);
522 rdev->irq.hpd[5] = false;
523 break;
524 default:
525 break;
526 }
527 }
528}
529
Alex Deucherf9d9c362010-10-22 02:51:05 -0400530/* watermark setup */
531
532static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
533 struct radeon_crtc *radeon_crtc,
534 struct drm_display_mode *mode,
535 struct drm_display_mode *other_mode)
536{
Alex Deucher12dfc842011-04-14 19:07:34 -0400537 u32 tmp;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400538 /*
539 * Line Buffer Setup
540 * There are 3 line buffers, each one shared by 2 display controllers.
541 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
542 * the display controllers. The paritioning is done via one of four
543 * preset allocations specified in bits 2:0:
544 * first display controller
545 * 0 - first half of lb (3840 * 2)
546 * 1 - first 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400547 * 2 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400548 * 3 - first 1/4 of lb (1920 * 2)
549 * second display controller
550 * 4 - second half of lb (3840 * 2)
551 * 5 - second 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400552 * 6 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400553 * 7 - last 1/4 of lb (1920 * 2)
554 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400555 /* this can get tricky if we have two large displays on a paired group
556 * of crtcs. Ideally for multiple large displays we'd assign them to
557 * non-linked crtcs for maximum line buffer allocation.
558 */
559 if (radeon_crtc->base.enabled && mode) {
560 if (other_mode)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400561 tmp = 0; /* 1/2 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400562 else
563 tmp = 2; /* whole */
564 } else
565 tmp = 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400566
567 /* second controller of the pair uses second half of the lb */
568 if (radeon_crtc->crtc_id % 2)
569 tmp += 4;
570 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
571
Alex Deucher12dfc842011-04-14 19:07:34 -0400572 if (radeon_crtc->base.enabled && mode) {
573 switch (tmp) {
574 case 0:
575 case 4:
576 default:
577 if (ASIC_IS_DCE5(rdev))
578 return 4096 * 2;
579 else
580 return 3840 * 2;
581 case 1:
582 case 5:
583 if (ASIC_IS_DCE5(rdev))
584 return 6144 * 2;
585 else
586 return 5760 * 2;
587 case 2:
588 case 6:
589 if (ASIC_IS_DCE5(rdev))
590 return 8192 * 2;
591 else
592 return 7680 * 2;
593 case 3:
594 case 7:
595 if (ASIC_IS_DCE5(rdev))
596 return 2048 * 2;
597 else
598 return 1920 * 2;
599 }
Alex Deucherf9d9c362010-10-22 02:51:05 -0400600 }
Alex Deucher12dfc842011-04-14 19:07:34 -0400601
602 /* controller not enabled, so no lb used */
603 return 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400604}
605
Alex Deucherca7db222012-03-20 17:18:30 -0400606u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400607{
608 u32 tmp = RREG32(MC_SHARED_CHMAP);
609
610 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
611 case 0:
612 default:
613 return 1;
614 case 1:
615 return 2;
616 case 2:
617 return 4;
618 case 3:
619 return 8;
620 }
621}
622
623struct evergreen_wm_params {
624 u32 dram_channels; /* number of dram channels */
625 u32 yclk; /* bandwidth per dram data pin in kHz */
626 u32 sclk; /* engine clock in kHz */
627 u32 disp_clk; /* display clock in kHz */
628 u32 src_width; /* viewport width */
629 u32 active_time; /* active display time in ns */
630 u32 blank_time; /* blank time in ns */
631 bool interlaced; /* mode is interlaced */
632 fixed20_12 vsc; /* vertical scale ratio */
633 u32 num_heads; /* number of active crtcs */
634 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
635 u32 lb_size; /* line buffer allocated to pipe */
636 u32 vtaps; /* vertical scaler taps */
637};
638
639static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
640{
641 /* Calculate DRAM Bandwidth and the part allocated to display. */
642 fixed20_12 dram_efficiency; /* 0.7 */
643 fixed20_12 yclk, dram_channels, bandwidth;
644 fixed20_12 a;
645
646 a.full = dfixed_const(1000);
647 yclk.full = dfixed_const(wm->yclk);
648 yclk.full = dfixed_div(yclk, a);
649 dram_channels.full = dfixed_const(wm->dram_channels * 4);
650 a.full = dfixed_const(10);
651 dram_efficiency.full = dfixed_const(7);
652 dram_efficiency.full = dfixed_div(dram_efficiency, a);
653 bandwidth.full = dfixed_mul(dram_channels, yclk);
654 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
655
656 return dfixed_trunc(bandwidth);
657}
658
659static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
660{
661 /* Calculate DRAM Bandwidth and the part allocated to display. */
662 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
663 fixed20_12 yclk, dram_channels, bandwidth;
664 fixed20_12 a;
665
666 a.full = dfixed_const(1000);
667 yclk.full = dfixed_const(wm->yclk);
668 yclk.full = dfixed_div(yclk, a);
669 dram_channels.full = dfixed_const(wm->dram_channels * 4);
670 a.full = dfixed_const(10);
671 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
672 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
673 bandwidth.full = dfixed_mul(dram_channels, yclk);
674 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
675
676 return dfixed_trunc(bandwidth);
677}
678
679static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
680{
681 /* Calculate the display Data return Bandwidth */
682 fixed20_12 return_efficiency; /* 0.8 */
683 fixed20_12 sclk, bandwidth;
684 fixed20_12 a;
685
686 a.full = dfixed_const(1000);
687 sclk.full = dfixed_const(wm->sclk);
688 sclk.full = dfixed_div(sclk, a);
689 a.full = dfixed_const(10);
690 return_efficiency.full = dfixed_const(8);
691 return_efficiency.full = dfixed_div(return_efficiency, a);
692 a.full = dfixed_const(32);
693 bandwidth.full = dfixed_mul(a, sclk);
694 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
695
696 return dfixed_trunc(bandwidth);
697}
698
699static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
700{
701 /* Calculate the DMIF Request Bandwidth */
702 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
703 fixed20_12 disp_clk, bandwidth;
704 fixed20_12 a;
705
706 a.full = dfixed_const(1000);
707 disp_clk.full = dfixed_const(wm->disp_clk);
708 disp_clk.full = dfixed_div(disp_clk, a);
709 a.full = dfixed_const(10);
710 disp_clk_request_efficiency.full = dfixed_const(8);
711 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
712 a.full = dfixed_const(32);
713 bandwidth.full = dfixed_mul(a, disp_clk);
714 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
715
716 return dfixed_trunc(bandwidth);
717}
718
719static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
720{
721 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
722 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
723 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
724 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
725
726 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
727}
728
729static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
730{
731 /* Calculate the display mode Average Bandwidth
732 * DisplayMode should contain the source and destination dimensions,
733 * timing, etc.
734 */
735 fixed20_12 bpp;
736 fixed20_12 line_time;
737 fixed20_12 src_width;
738 fixed20_12 bandwidth;
739 fixed20_12 a;
740
741 a.full = dfixed_const(1000);
742 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
743 line_time.full = dfixed_div(line_time, a);
744 bpp.full = dfixed_const(wm->bytes_per_pixel);
745 src_width.full = dfixed_const(wm->src_width);
746 bandwidth.full = dfixed_mul(src_width, bpp);
747 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
748 bandwidth.full = dfixed_div(bandwidth, line_time);
749
750 return dfixed_trunc(bandwidth);
751}
752
753static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
754{
755 /* First calcualte the latency in ns */
756 u32 mc_latency = 2000; /* 2000 ns. */
757 u32 available_bandwidth = evergreen_available_bandwidth(wm);
758 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
759 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
760 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
761 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
762 (wm->num_heads * cursor_line_pair_return_time);
763 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
764 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
765 fixed20_12 a, b, c;
766
767 if (wm->num_heads == 0)
768 return 0;
769
770 a.full = dfixed_const(2);
771 b.full = dfixed_const(1);
772 if ((wm->vsc.full > a.full) ||
773 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
774 (wm->vtaps >= 5) ||
775 ((wm->vsc.full >= a.full) && wm->interlaced))
776 max_src_lines_per_dst_line = 4;
777 else
778 max_src_lines_per_dst_line = 2;
779
780 a.full = dfixed_const(available_bandwidth);
781 b.full = dfixed_const(wm->num_heads);
782 a.full = dfixed_div(a, b);
783
784 b.full = dfixed_const(1000);
785 c.full = dfixed_const(wm->disp_clk);
786 b.full = dfixed_div(c, b);
787 c.full = dfixed_const(wm->bytes_per_pixel);
788 b.full = dfixed_mul(b, c);
789
790 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
791
792 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
793 b.full = dfixed_const(1000);
794 c.full = dfixed_const(lb_fill_bw);
795 b.full = dfixed_div(c, b);
796 a.full = dfixed_div(a, b);
797 line_fill_time = dfixed_trunc(a);
798
799 if (line_fill_time < wm->active_time)
800 return latency;
801 else
802 return latency + (line_fill_time - wm->active_time);
803
804}
805
806static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
807{
808 if (evergreen_average_bandwidth(wm) <=
809 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
810 return true;
811 else
812 return false;
813};
814
815static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
816{
817 if (evergreen_average_bandwidth(wm) <=
818 (evergreen_available_bandwidth(wm) / wm->num_heads))
819 return true;
820 else
821 return false;
822};
823
824static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
825{
826 u32 lb_partitions = wm->lb_size / wm->src_width;
827 u32 line_time = wm->active_time + wm->blank_time;
828 u32 latency_tolerant_lines;
829 u32 latency_hiding;
830 fixed20_12 a;
831
832 a.full = dfixed_const(1);
833 if (wm->vsc.full > a.full)
834 latency_tolerant_lines = 1;
835 else {
836 if (lb_partitions <= (wm->vtaps + 1))
837 latency_tolerant_lines = 1;
838 else
839 latency_tolerant_lines = 2;
840 }
841
842 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
843
844 if (evergreen_latency_watermark(wm) <= latency_hiding)
845 return true;
846 else
847 return false;
848}
849
850static void evergreen_program_watermarks(struct radeon_device *rdev,
851 struct radeon_crtc *radeon_crtc,
852 u32 lb_size, u32 num_heads)
853{
854 struct drm_display_mode *mode = &radeon_crtc->base.mode;
855 struct evergreen_wm_params wm;
856 u32 pixel_period;
857 u32 line_time = 0;
858 u32 latency_watermark_a = 0, latency_watermark_b = 0;
859 u32 priority_a_mark = 0, priority_b_mark = 0;
860 u32 priority_a_cnt = PRIORITY_OFF;
861 u32 priority_b_cnt = PRIORITY_OFF;
862 u32 pipe_offset = radeon_crtc->crtc_id * 16;
863 u32 tmp, arb_control3;
864 fixed20_12 a, b, c;
865
866 if (radeon_crtc->base.enabled && num_heads && mode) {
867 pixel_period = 1000000 / (u32)mode->clock;
868 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
869 priority_a_cnt = 0;
870 priority_b_cnt = 0;
871
872 wm.yclk = rdev->pm.current_mclk * 10;
873 wm.sclk = rdev->pm.current_sclk * 10;
874 wm.disp_clk = mode->clock;
875 wm.src_width = mode->crtc_hdisplay;
876 wm.active_time = mode->crtc_hdisplay * pixel_period;
877 wm.blank_time = line_time - wm.active_time;
878 wm.interlaced = false;
879 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
880 wm.interlaced = true;
881 wm.vsc = radeon_crtc->vsc;
882 wm.vtaps = 1;
883 if (radeon_crtc->rmx_type != RMX_OFF)
884 wm.vtaps = 2;
885 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
886 wm.lb_size = lb_size;
887 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
888 wm.num_heads = num_heads;
889
890 /* set for high clocks */
891 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
892 /* set for low clocks */
893 /* wm.yclk = low clk; wm.sclk = low clk */
894 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
895
896 /* possibly force display priority to high */
897 /* should really do this at mode validation time... */
898 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
899 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
900 !evergreen_check_latency_hiding(&wm) ||
901 (rdev->disp_priority == 2)) {
Alex Deucher92bdfd42011-08-04 17:28:40 +0000902 DRM_DEBUG_KMS("force priority to high\n");
Alex Deucherf9d9c362010-10-22 02:51:05 -0400903 priority_a_cnt |= PRIORITY_ALWAYS_ON;
904 priority_b_cnt |= PRIORITY_ALWAYS_ON;
905 }
906
907 a.full = dfixed_const(1000);
908 b.full = dfixed_const(mode->clock);
909 b.full = dfixed_div(b, a);
910 c.full = dfixed_const(latency_watermark_a);
911 c.full = dfixed_mul(c, b);
912 c.full = dfixed_mul(c, radeon_crtc->hsc);
913 c.full = dfixed_div(c, a);
914 a.full = dfixed_const(16);
915 c.full = dfixed_div(c, a);
916 priority_a_mark = dfixed_trunc(c);
917 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
918
919 a.full = dfixed_const(1000);
920 b.full = dfixed_const(mode->clock);
921 b.full = dfixed_div(b, a);
922 c.full = dfixed_const(latency_watermark_b);
923 c.full = dfixed_mul(c, b);
924 c.full = dfixed_mul(c, radeon_crtc->hsc);
925 c.full = dfixed_div(c, a);
926 a.full = dfixed_const(16);
927 c.full = dfixed_div(c, a);
928 priority_b_mark = dfixed_trunc(c);
929 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
930 }
931
932 /* select wm A */
933 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
934 tmp = arb_control3;
935 tmp &= ~LATENCY_WATERMARK_MASK(3);
936 tmp |= LATENCY_WATERMARK_MASK(1);
937 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
938 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
939 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
940 LATENCY_HIGH_WATERMARK(line_time)));
941 /* select wm B */
942 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
943 tmp &= ~LATENCY_WATERMARK_MASK(3);
944 tmp |= LATENCY_WATERMARK_MASK(2);
945 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
946 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
947 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
948 LATENCY_HIGH_WATERMARK(line_time)));
949 /* restore original selection */
950 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
951
952 /* write the priority marks */
953 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
954 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
955
956}
957
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500958void evergreen_bandwidth_update(struct radeon_device *rdev)
959{
Alex Deucherf9d9c362010-10-22 02:51:05 -0400960 struct drm_display_mode *mode0 = NULL;
961 struct drm_display_mode *mode1 = NULL;
962 u32 num_heads = 0, lb_size;
963 int i;
964
965 radeon_update_display_priority(rdev);
966
967 for (i = 0; i < rdev->num_crtc; i++) {
968 if (rdev->mode_info.crtcs[i]->base.enabled)
969 num_heads++;
970 }
971 for (i = 0; i < rdev->num_crtc; i += 2) {
972 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
973 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
974 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
975 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
976 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
977 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
978 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500979}
980
Alex Deucherb9952a82011-03-02 20:07:33 -0500981int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500982{
983 unsigned i;
984 u32 tmp;
985
986 for (i = 0; i < rdev->usec_timeout; i++) {
987 /* read MC_STATUS */
988 tmp = RREG32(SRBM_STATUS) & 0x1F00;
989 if (!tmp)
990 return 0;
991 udelay(1);
992 }
993 return -1;
994}
995
996/*
997 * GART
998 */
Alex Deucher0fcdb612010-03-24 13:20:41 -0400999void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1000{
1001 unsigned i;
1002 u32 tmp;
1003
Alex Deucher6f2f48a2010-12-15 11:01:56 -05001004 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1005
Alex Deucher0fcdb612010-03-24 13:20:41 -04001006 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1007 for (i = 0; i < rdev->usec_timeout; i++) {
1008 /* read MC_STATUS */
1009 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1010 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1011 if (tmp == 2) {
1012 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1013 return;
1014 }
1015 if (tmp) {
1016 return;
1017 }
1018 udelay(1);
1019 }
1020}
1021
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001022int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1023{
1024 u32 tmp;
Alex Deucher0fcdb612010-03-24 13:20:41 -04001025 int r;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001026
Jerome Glissec9a1be92011-11-03 11:16:49 -04001027 if (rdev->gart.robj == NULL) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001028 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1029 return -EINVAL;
1030 }
1031 r = radeon_gart_table_vram_pin(rdev);
1032 if (r)
1033 return r;
Dave Airlie82568562010-02-05 16:00:07 +10001034 radeon_gart_restore(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001035 /* Setup L2 cache */
1036 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1037 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1038 EFFECTIVE_L2_QUEUE_SIZE(7));
1039 WREG32(VM_L2_CNTL2, 0);
1040 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1041 /* Setup TLB control */
1042 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1043 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1044 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1045 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001046 if (rdev->flags & RADEON_IS_IGP) {
1047 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1048 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1049 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1050 } else {
1051 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1052 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1053 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucherfe3777a2012-05-31 18:54:43 -04001054 if ((rdev->family == CHIP_JUNIPER) ||
1055 (rdev->family == CHIP_CYPRESS) ||
1056 (rdev->family == CHIP_HEMLOCK) ||
1057 (rdev->family == CHIP_BARTS))
1058 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001059 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001060 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1061 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1062 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1063 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1064 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1065 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1066 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1067 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1068 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1069 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1070 (u32)(rdev->dummy_page.addr >> 12));
Alex Deucher0fcdb612010-03-24 13:20:41 -04001071 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001072
Alex Deucher0fcdb612010-03-24 13:20:41 -04001073 evergreen_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +00001074 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1075 (unsigned)(rdev->mc.gtt_size >> 20),
1076 (unsigned long long)rdev->gart.table_addr);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001077 rdev->gart.ready = true;
1078 return 0;
1079}
1080
1081void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1082{
1083 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001084
1085 /* Disable all tables */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001086 WREG32(VM_CONTEXT0_CNTL, 0);
1087 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001088
1089 /* Setup L2 cache */
1090 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1091 EFFECTIVE_L2_QUEUE_SIZE(7));
1092 WREG32(VM_L2_CNTL2, 0);
1093 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1094 /* Setup TLB control */
1095 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1096 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1097 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1098 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1099 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1100 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1101 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1102 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -04001103 radeon_gart_table_vram_unpin(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001104}
1105
1106void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1107{
1108 evergreen_pcie_gart_disable(rdev);
1109 radeon_gart_table_vram_free(rdev);
1110 radeon_gart_fini(rdev);
1111}
1112
1113
1114void evergreen_agp_enable(struct radeon_device *rdev)
1115{
1116 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001117
1118 /* Setup L2 cache */
1119 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1120 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1121 EFFECTIVE_L2_QUEUE_SIZE(7));
1122 WREG32(VM_L2_CNTL2, 0);
1123 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1124 /* Setup TLB control */
1125 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1126 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1127 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1128 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1129 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1130 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1131 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1132 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1133 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1134 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1135 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Alex Deucher0fcdb612010-03-24 13:20:41 -04001136 WREG32(VM_CONTEXT0_CNTL, 0);
1137 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001138}
1139
Alex Deucherb9952a82011-03-02 20:07:33 -05001140void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001141{
Alex Deuchera0c246c2012-08-15 17:18:42 -04001142 u32 crtc_enabled, tmp, frame_count, blackout;
1143 int i, j;
1144
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001145 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1146 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001147
Alex Deuchera0c246c2012-08-15 17:18:42 -04001148 /* disable VGA render */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001149 WREG32(VGA_RENDER_CONTROL, 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001150 /* blank the display controllers */
1151 for (i = 0; i < rdev->num_crtc; i++) {
1152 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1153 if (crtc_enabled) {
1154 save->crtc_enabled[i] = true;
1155 if (ASIC_IS_DCE6(rdev)) {
1156 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1157 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1158 radeon_wait_for_vblank(rdev, i);
Alex Deucher10939f32013-04-10 19:08:14 -04001159 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001160 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1161 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1162 }
1163 } else {
1164 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1165 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1166 radeon_wait_for_vblank(rdev, i);
Alex Deucher10939f32013-04-10 19:08:14 -04001167 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001168 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1169 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Alex Deucher10939f32013-04-10 19:08:14 -04001170 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001171 }
1172 }
1173 /* wait for the next frame */
1174 frame_count = radeon_get_vblank_counter(rdev, i);
1175 for (j = 0; j < rdev->usec_timeout; j++) {
1176 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1177 break;
1178 udelay(1);
1179 }
Alex Deucher10939f32013-04-10 19:08:14 -04001180
1181 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1182 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1183 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1184 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
1185 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1186 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1187 save->crtc_enabled[i] = false;
1188 /* ***** */
Alex Deucher86d80952012-11-19 09:11:27 -05001189 } else {
1190 save->crtc_enabled[i] = false;
Alex Deuchera0c246c2012-08-15 17:18:42 -04001191 }
Alex Deucher18007402010-11-22 17:56:28 -05001192 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001193
Alex Deuchera0c246c2012-08-15 17:18:42 -04001194 radeon_mc_wait_for_idle(rdev);
1195
1196 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1197 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1198 /* Block CPU access */
1199 WREG32(BIF_FB_EN, 0);
1200 /* blackout the MC */
1201 blackout &= ~BLACKOUT_MODE_MASK;
1202 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
Alex Deucherb7eff392011-07-08 11:44:56 -04001203 }
Alex Deucher36509fc2013-01-31 09:00:52 -05001204 /* wait for the MC to settle */
1205 udelay(100);
Alex Deucher62d6ec12013-04-10 09:58:42 -04001206
1207 /* lock double buffered regs */
1208 for (i = 0; i < rdev->num_crtc; i++) {
1209 if (save->crtc_enabled[i]) {
1210 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1211 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
1212 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1213 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1214 }
1215 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1216 if (!(tmp & 1)) {
1217 tmp |= 1;
1218 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1219 }
1220 }
1221 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001222}
1223
Alex Deucherb9952a82011-03-02 20:07:33 -05001224void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001225{
Alex Deuchera0c246c2012-08-15 17:18:42 -04001226 u32 tmp, frame_count;
1227 int i, j;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001228
Alex Deuchera0c246c2012-08-15 17:18:42 -04001229 /* update crtc base addresses */
1230 for (i = 0; i < rdev->num_crtc; i++) {
1231 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001232 upper_32_bits(rdev->mc.vram_start));
Alex Deuchera0c246c2012-08-15 17:18:42 -04001233 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001234 upper_32_bits(rdev->mc.vram_start));
Alex Deuchera0c246c2012-08-15 17:18:42 -04001235 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001236 (u32)rdev->mc.vram_start);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001237 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001238 (u32)rdev->mc.vram_start);
Alex Deucherb7eff392011-07-08 11:44:56 -04001239 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001240 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1241 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001242
Alex Deucher62d6ec12013-04-10 09:58:42 -04001243 /* unlock regs and wait for update */
1244 for (i = 0; i < rdev->num_crtc; i++) {
1245 if (save->crtc_enabled[i]) {
1246 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
1247 if ((tmp & 0x3) != 0) {
1248 tmp &= ~0x3;
1249 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1250 }
1251 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1252 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
1253 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1254 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1255 }
1256 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1257 if (tmp & 1) {
1258 tmp &= ~1;
1259 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1260 }
1261 for (j = 0; j < rdev->usec_timeout; j++) {
1262 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1263 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
1264 break;
1265 udelay(1);
1266 }
1267 }
1268 }
1269
Alex Deuchera0c246c2012-08-15 17:18:42 -04001270 /* unblackout the MC */
1271 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1272 tmp &= ~BLACKOUT_MODE_MASK;
1273 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1274 /* allow CPU access */
1275 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1276
1277 for (i = 0; i < rdev->num_crtc; i++) {
1278 if (save->crtc_enabled) {
1279 if (ASIC_IS_DCE6(rdev)) {
1280 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1281 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitec7d3c172013-01-26 11:10:58 -05001282 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001283 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitec7d3c172013-01-26 11:10:58 -05001284 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001285 } else {
1286 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1287 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitec7d3c172013-01-26 11:10:58 -05001288 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001289 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitec7d3c172013-01-26 11:10:58 -05001290 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001291 }
1292 /* wait for the next frame */
1293 frame_count = radeon_get_vblank_counter(rdev, i);
1294 for (j = 0; j < rdev->usec_timeout; j++) {
1295 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1296 break;
1297 udelay(1);
1298 }
1299 }
1300 }
1301 /* Unlock vga access */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001302 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1303 mdelay(1);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001304 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1305}
1306
Alex Deucher755d8192011-03-02 20:07:34 -05001307void evergreen_mc_program(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001308{
1309 struct evergreen_mc_save save;
1310 u32 tmp;
1311 int i, j;
1312
1313 /* Initialize HDP */
1314 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1315 WREG32((0x2c14 + j), 0x00000000);
1316 WREG32((0x2c18 + j), 0x00000000);
1317 WREG32((0x2c1c + j), 0x00000000);
1318 WREG32((0x2c20 + j), 0x00000000);
1319 WREG32((0x2c24 + j), 0x00000000);
1320 }
1321 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1322
1323 evergreen_mc_stop(rdev, &save);
1324 if (evergreen_mc_wait_for_idle(rdev)) {
1325 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1326 }
1327 /* Lockout access through VGA aperture*/
1328 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1329 /* Update configuration */
1330 if (rdev->flags & RADEON_IS_AGP) {
1331 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1332 /* VRAM before AGP */
1333 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1334 rdev->mc.vram_start >> 12);
1335 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1336 rdev->mc.gtt_end >> 12);
1337 } else {
1338 /* VRAM after AGP */
1339 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1340 rdev->mc.gtt_start >> 12);
1341 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1342 rdev->mc.vram_end >> 12);
1343 }
1344 } else {
1345 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1346 rdev->mc.vram_start >> 12);
1347 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1348 rdev->mc.vram_end >> 12);
1349 }
Alex Deucher3b9832f2011-11-10 08:59:39 -05001350 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001351 /* llano/ontario only */
1352 if ((rdev->family == CHIP_PALM) ||
1353 (rdev->family == CHIP_SUMO) ||
1354 (rdev->family == CHIP_SUMO2)) {
Alex Deucherb4183e32010-12-15 11:04:10 -05001355 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1356 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1357 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1358 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1359 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001360 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1361 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1362 WREG32(MC_VM_FB_LOCATION, tmp);
1363 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
Alex Deucherc46cb4d2011-01-06 19:12:37 -05001364 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001365 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001366 if (rdev->flags & RADEON_IS_AGP) {
1367 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1368 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1369 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1370 } else {
1371 WREG32(MC_VM_AGP_BASE, 0);
1372 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1373 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1374 }
1375 if (evergreen_mc_wait_for_idle(rdev)) {
1376 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1377 }
1378 evergreen_mc_resume(rdev, &save);
1379 /* we need to own VRAM, so turn off the VGA renderer here
1380 * to stop it overwriting our objects */
1381 rv515_vga_render_disable(rdev);
1382}
1383
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001384/*
1385 * CP.
1386 */
Alex Deucher12920592011-02-02 12:37:40 -05001387void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1388{
Christian Könige32eb502011-10-23 12:56:27 +02001389 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Christian König7b1f2482011-09-23 15:11:23 +02001390
Alex Deucher12920592011-02-02 12:37:40 -05001391 /* set to DX10/11 mode */
Christian Könige32eb502011-10-23 12:56:27 +02001392 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1393 radeon_ring_write(ring, 1);
Alex Deucher12920592011-02-02 12:37:40 -05001394 /* FIXME: implement */
Christian Könige32eb502011-10-23 12:56:27 +02001395 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1396 radeon_ring_write(ring,
Alex Deucher0f234f52011-02-13 19:06:33 -05001397#ifdef __BIG_ENDIAN
1398 (2 << 0) |
1399#endif
1400 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02001401 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1402 radeon_ring_write(ring, ib->length_dw);
Alex Deucher12920592011-02-02 12:37:40 -05001403}
1404
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001405
1406static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1407{
Alex Deucherfe251e22010-03-24 13:36:43 -04001408 const __be32 *fw_data;
1409 int i;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001410
Alex Deucherfe251e22010-03-24 13:36:43 -04001411 if (!rdev->me_fw || !rdev->pfp_fw)
1412 return -EINVAL;
1413
1414 r700_cp_stop(rdev);
Alex Deucher0f234f52011-02-13 19:06:33 -05001415 WREG32(CP_RB_CNTL,
1416#ifdef __BIG_ENDIAN
1417 BUF_SWAP_32BIT |
1418#endif
1419 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Alex Deucherfe251e22010-03-24 13:36:43 -04001420
1421 fw_data = (const __be32 *)rdev->pfp_fw->data;
1422 WREG32(CP_PFP_UCODE_ADDR, 0);
1423 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1424 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1425 WREG32(CP_PFP_UCODE_ADDR, 0);
1426
1427 fw_data = (const __be32 *)rdev->me_fw->data;
1428 WREG32(CP_ME_RAM_WADDR, 0);
1429 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1430 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1431
1432 WREG32(CP_PFP_UCODE_ADDR, 0);
1433 WREG32(CP_ME_RAM_WADDR, 0);
1434 WREG32(CP_ME_RAM_RADDR, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001435 return 0;
1436}
1437
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001438static int evergreen_cp_start(struct radeon_device *rdev)
1439{
Christian Könige32eb502011-10-23 12:56:27 +02001440 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher2281a372010-10-21 13:31:38 -04001441 int r, i;
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001442 uint32_t cp_me;
1443
Christian Könige32eb502011-10-23 12:56:27 +02001444 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001445 if (r) {
1446 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1447 return r;
1448 }
Christian Könige32eb502011-10-23 12:56:27 +02001449 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1450 radeon_ring_write(ring, 0x1);
1451 radeon_ring_write(ring, 0x0);
1452 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1453 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1454 radeon_ring_write(ring, 0);
1455 radeon_ring_write(ring, 0);
1456 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001457
1458 cp_me = 0xff;
1459 WREG32(CP_ME_CNTL, cp_me);
1460
Christian Könige32eb502011-10-23 12:56:27 +02001461 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001462 if (r) {
1463 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1464 return r;
1465 }
Alex Deucher2281a372010-10-21 13:31:38 -04001466
1467 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001468 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1469 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001470
1471 for (i = 0; i < evergreen_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001472 radeon_ring_write(ring, evergreen_default_state[i]);
Alex Deucher2281a372010-10-21 13:31:38 -04001473
Christian Könige32eb502011-10-23 12:56:27 +02001474 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1475 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001476
1477 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001478 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1479 radeon_ring_write(ring, 0);
Alex Deucher2281a372010-10-21 13:31:38 -04001480
1481 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001482 radeon_ring_write(ring, 0xc0026f00);
1483 radeon_ring_write(ring, 0x00000000);
1484 radeon_ring_write(ring, 0x00000000);
1485 radeon_ring_write(ring, 0x00000000);
Alex Deucher2281a372010-10-21 13:31:38 -04001486
1487 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001488 radeon_ring_write(ring, 0xc0036f00);
1489 radeon_ring_write(ring, 0x00000bc4);
1490 radeon_ring_write(ring, 0xffffffff);
1491 radeon_ring_write(ring, 0xffffffff);
1492 radeon_ring_write(ring, 0xffffffff);
Alex Deucher2281a372010-10-21 13:31:38 -04001493
Christian Könige32eb502011-10-23 12:56:27 +02001494 radeon_ring_write(ring, 0xc0026900);
1495 radeon_ring_write(ring, 0x00000316);
1496 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1497 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher18ff84d2011-02-02 12:37:41 -05001498
Christian Könige32eb502011-10-23 12:56:27 +02001499 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001500
1501 return 0;
1502}
1503
Alex Deucherfe251e22010-03-24 13:36:43 -04001504int evergreen_cp_resume(struct radeon_device *rdev)
1505{
Christian Könige32eb502011-10-23 12:56:27 +02001506 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -04001507 u32 tmp;
1508 u32 rb_bufsz;
1509 int r;
1510
1511 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1512 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1513 SOFT_RESET_PA |
1514 SOFT_RESET_SH |
1515 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001516 SOFT_RESET_SPI |
Alex Deucherfe251e22010-03-24 13:36:43 -04001517 SOFT_RESET_SX));
1518 RREG32(GRBM_SOFT_RESET);
1519 mdelay(15);
1520 WREG32(GRBM_SOFT_RESET, 0);
1521 RREG32(GRBM_SOFT_RESET);
1522
1523 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02001524 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04001525 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucherfe251e22010-03-24 13:36:43 -04001526#ifdef __BIG_ENDIAN
1527 tmp |= BUF_SWAP_32BIT;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001528#endif
Alex Deucherfe251e22010-03-24 13:36:43 -04001529 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02001530 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f12012-01-20 14:47:43 -05001531 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucherfe251e22010-03-24 13:36:43 -04001532
1533 /* Set the write pointer delay */
1534 WREG32(CP_RB_WPTR_DELAY, 0);
1535
1536 /* Initialize the ring buffer's read and write pointers */
1537 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1538 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02001539 ring->wptr = 0;
1540 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04001541
1542 /* set the wb address wether it's enabled or not */
Alex Deucher0f234f52011-02-13 19:06:33 -05001543 WREG32(CP_RB_RPTR_ADDR,
Alex Deucher0f234f52011-02-13 19:06:33 -05001544 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04001545 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1546 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1547
1548 if (rdev->wb.enabled)
1549 WREG32(SCRATCH_UMSK, 0xff);
1550 else {
1551 tmp |= RB_NO_UPDATE;
1552 WREG32(SCRATCH_UMSK, 0);
1553 }
1554
Alex Deucherfe251e22010-03-24 13:36:43 -04001555 mdelay(1);
1556 WREG32(CP_RB_CNTL, tmp);
1557
Christian Könige32eb502011-10-23 12:56:27 +02001558 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Alex Deucherfe251e22010-03-24 13:36:43 -04001559 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1560
Christian Könige32eb502011-10-23 12:56:27 +02001561 ring->rptr = RREG32(CP_RB_RPTR);
Alex Deucherfe251e22010-03-24 13:36:43 -04001562
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001563 evergreen_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001564 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05001565 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Alex Deucherfe251e22010-03-24 13:36:43 -04001566 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001567 ring->ready = false;
Alex Deucherfe251e22010-03-24 13:36:43 -04001568 return r;
1569 }
1570 return 0;
1571}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001572
1573/*
1574 * Core functions
1575 */
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001576static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1577 u32 num_tile_pipes,
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001578 u32 num_backends,
1579 u32 backend_disable_mask)
1580{
1581 u32 backend_map = 0;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001582 u32 enabled_backends_mask = 0;
1583 u32 enabled_backends_count = 0;
1584 u32 cur_pipe;
1585 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1586 u32 cur_backend = 0;
1587 u32 i;
1588 bool force_no_swizzle;
1589
1590 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1591 num_tile_pipes = EVERGREEN_MAX_PIPES;
1592 if (num_tile_pipes < 1)
1593 num_tile_pipes = 1;
1594 if (num_backends > EVERGREEN_MAX_BACKENDS)
1595 num_backends = EVERGREEN_MAX_BACKENDS;
1596 if (num_backends < 1)
1597 num_backends = 1;
1598
1599 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1600 if (((backend_disable_mask >> i) & 1) == 0) {
1601 enabled_backends_mask |= (1 << i);
1602 ++enabled_backends_count;
1603 }
1604 if (enabled_backends_count == num_backends)
1605 break;
1606 }
1607
1608 if (enabled_backends_count == 0) {
1609 enabled_backends_mask = 1;
1610 enabled_backends_count = 1;
1611 }
1612
1613 if (enabled_backends_count != num_backends)
1614 num_backends = enabled_backends_count;
1615
1616 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1617 switch (rdev->family) {
1618 case CHIP_CEDAR:
1619 case CHIP_REDWOOD:
Alex Deucherd5e455e2010-11-22 17:56:29 -05001620 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04001621 case CHIP_SUMO:
1622 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05001623 case CHIP_TURKS:
1624 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001625 force_no_swizzle = false;
1626 break;
1627 case CHIP_CYPRESS:
1628 case CHIP_HEMLOCK:
1629 case CHIP_JUNIPER:
Alex Deucheradb68fa2011-01-06 21:19:24 -05001630 case CHIP_BARTS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001631 default:
1632 force_no_swizzle = true;
1633 break;
1634 }
1635 if (force_no_swizzle) {
1636 bool last_backend_enabled = false;
1637
1638 force_no_swizzle = false;
1639 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1640 if (((enabled_backends_mask >> i) & 1) == 1) {
1641 if (last_backend_enabled)
1642 force_no_swizzle = true;
1643 last_backend_enabled = true;
1644 } else
1645 last_backend_enabled = false;
1646 }
1647 }
1648
1649 switch (num_tile_pipes) {
1650 case 1:
1651 case 3:
1652 case 5:
1653 case 7:
1654 DRM_ERROR("odd number of pipes!\n");
1655 break;
1656 case 2:
1657 swizzle_pipe[0] = 0;
1658 swizzle_pipe[1] = 1;
1659 break;
1660 case 4:
1661 if (force_no_swizzle) {
1662 swizzle_pipe[0] = 0;
1663 swizzle_pipe[1] = 1;
1664 swizzle_pipe[2] = 2;
1665 swizzle_pipe[3] = 3;
1666 } else {
1667 swizzle_pipe[0] = 0;
1668 swizzle_pipe[1] = 2;
1669 swizzle_pipe[2] = 1;
1670 swizzle_pipe[3] = 3;
1671 }
1672 break;
1673 case 6:
1674 if (force_no_swizzle) {
1675 swizzle_pipe[0] = 0;
1676 swizzle_pipe[1] = 1;
1677 swizzle_pipe[2] = 2;
1678 swizzle_pipe[3] = 3;
1679 swizzle_pipe[4] = 4;
1680 swizzle_pipe[5] = 5;
1681 } else {
1682 swizzle_pipe[0] = 0;
1683 swizzle_pipe[1] = 2;
1684 swizzle_pipe[2] = 4;
1685 swizzle_pipe[3] = 1;
1686 swizzle_pipe[4] = 3;
1687 swizzle_pipe[5] = 5;
1688 }
1689 break;
1690 case 8:
1691 if (force_no_swizzle) {
1692 swizzle_pipe[0] = 0;
1693 swizzle_pipe[1] = 1;
1694 swizzle_pipe[2] = 2;
1695 swizzle_pipe[3] = 3;
1696 swizzle_pipe[4] = 4;
1697 swizzle_pipe[5] = 5;
1698 swizzle_pipe[6] = 6;
1699 swizzle_pipe[7] = 7;
1700 } else {
1701 swizzle_pipe[0] = 0;
1702 swizzle_pipe[1] = 2;
1703 swizzle_pipe[2] = 4;
1704 swizzle_pipe[3] = 6;
1705 swizzle_pipe[4] = 1;
1706 swizzle_pipe[5] = 3;
1707 swizzle_pipe[6] = 5;
1708 swizzle_pipe[7] = 7;
1709 }
1710 break;
1711 }
1712
1713 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1714 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1715 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1716
1717 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1718
1719 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1720 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001721
1722 return backend_map;
1723}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001724
1725static void evergreen_gpu_init(struct radeon_device *rdev)
1726{
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001727 u32 cc_rb_backend_disable = 0;
1728 u32 cc_gc_shader_pipe_config;
1729 u32 gb_addr_config = 0;
1730 u32 mc_shared_chmap, mc_arb_ramcfg;
1731 u32 gb_backend_map;
1732 u32 grbm_gfx_index;
1733 u32 sx_debug_1;
1734 u32 smx_dc_ctl0;
1735 u32 sq_config;
1736 u32 sq_lds_resource_mgmt;
1737 u32 sq_gpr_resource_mgmt_1;
1738 u32 sq_gpr_resource_mgmt_2;
1739 u32 sq_gpr_resource_mgmt_3;
1740 u32 sq_thread_resource_mgmt;
1741 u32 sq_thread_resource_mgmt_2;
1742 u32 sq_stack_resource_mgmt_1;
1743 u32 sq_stack_resource_mgmt_2;
1744 u32 sq_stack_resource_mgmt_3;
1745 u32 vgt_cache_invalidation;
Alex Deucherf25a5c62011-05-19 11:07:57 -04001746 u32 hdp_host_path_cntl, tmp;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001747 int i, j, num_shader_engines, ps_thread_count;
1748
1749 switch (rdev->family) {
1750 case CHIP_CYPRESS:
1751 case CHIP_HEMLOCK:
1752 rdev->config.evergreen.num_ses = 2;
1753 rdev->config.evergreen.max_pipes = 4;
1754 rdev->config.evergreen.max_tile_pipes = 8;
1755 rdev->config.evergreen.max_simds = 10;
1756 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1757 rdev->config.evergreen.max_gprs = 256;
1758 rdev->config.evergreen.max_threads = 248;
1759 rdev->config.evergreen.max_gs_threads = 32;
1760 rdev->config.evergreen.max_stack_entries = 512;
1761 rdev->config.evergreen.sx_num_of_sets = 4;
1762 rdev->config.evergreen.sx_max_export_size = 256;
1763 rdev->config.evergreen.sx_max_export_pos_size = 64;
1764 rdev->config.evergreen.sx_max_export_smx_size = 192;
1765 rdev->config.evergreen.max_hw_contexts = 8;
1766 rdev->config.evergreen.sq_num_cf_insts = 2;
1767
1768 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1769 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1770 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1771 break;
1772 case CHIP_JUNIPER:
1773 rdev->config.evergreen.num_ses = 1;
1774 rdev->config.evergreen.max_pipes = 4;
1775 rdev->config.evergreen.max_tile_pipes = 4;
1776 rdev->config.evergreen.max_simds = 10;
1777 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1778 rdev->config.evergreen.max_gprs = 256;
1779 rdev->config.evergreen.max_threads = 248;
1780 rdev->config.evergreen.max_gs_threads = 32;
1781 rdev->config.evergreen.max_stack_entries = 512;
1782 rdev->config.evergreen.sx_num_of_sets = 4;
1783 rdev->config.evergreen.sx_max_export_size = 256;
1784 rdev->config.evergreen.sx_max_export_pos_size = 64;
1785 rdev->config.evergreen.sx_max_export_smx_size = 192;
1786 rdev->config.evergreen.max_hw_contexts = 8;
1787 rdev->config.evergreen.sq_num_cf_insts = 2;
1788
1789 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1790 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1791 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1792 break;
1793 case CHIP_REDWOOD:
1794 rdev->config.evergreen.num_ses = 1;
1795 rdev->config.evergreen.max_pipes = 4;
1796 rdev->config.evergreen.max_tile_pipes = 4;
1797 rdev->config.evergreen.max_simds = 5;
1798 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1799 rdev->config.evergreen.max_gprs = 256;
1800 rdev->config.evergreen.max_threads = 248;
1801 rdev->config.evergreen.max_gs_threads = 32;
1802 rdev->config.evergreen.max_stack_entries = 256;
1803 rdev->config.evergreen.sx_num_of_sets = 4;
1804 rdev->config.evergreen.sx_max_export_size = 256;
1805 rdev->config.evergreen.sx_max_export_pos_size = 64;
1806 rdev->config.evergreen.sx_max_export_smx_size = 192;
1807 rdev->config.evergreen.max_hw_contexts = 8;
1808 rdev->config.evergreen.sq_num_cf_insts = 2;
1809
1810 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1811 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1812 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1813 break;
1814 case CHIP_CEDAR:
1815 default:
1816 rdev->config.evergreen.num_ses = 1;
1817 rdev->config.evergreen.max_pipes = 2;
1818 rdev->config.evergreen.max_tile_pipes = 2;
1819 rdev->config.evergreen.max_simds = 2;
1820 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1821 rdev->config.evergreen.max_gprs = 256;
1822 rdev->config.evergreen.max_threads = 192;
1823 rdev->config.evergreen.max_gs_threads = 16;
1824 rdev->config.evergreen.max_stack_entries = 256;
1825 rdev->config.evergreen.sx_num_of_sets = 4;
1826 rdev->config.evergreen.sx_max_export_size = 128;
1827 rdev->config.evergreen.sx_max_export_pos_size = 32;
1828 rdev->config.evergreen.sx_max_export_smx_size = 96;
1829 rdev->config.evergreen.max_hw_contexts = 4;
1830 rdev->config.evergreen.sq_num_cf_insts = 1;
1831
1832 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1833 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1834 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1835 break;
Alex Deucherd5e455e2010-11-22 17:56:29 -05001836 case CHIP_PALM:
1837 rdev->config.evergreen.num_ses = 1;
1838 rdev->config.evergreen.max_pipes = 2;
1839 rdev->config.evergreen.max_tile_pipes = 2;
1840 rdev->config.evergreen.max_simds = 2;
1841 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1842 rdev->config.evergreen.max_gprs = 256;
1843 rdev->config.evergreen.max_threads = 192;
1844 rdev->config.evergreen.max_gs_threads = 16;
1845 rdev->config.evergreen.max_stack_entries = 256;
1846 rdev->config.evergreen.sx_num_of_sets = 4;
1847 rdev->config.evergreen.sx_max_export_size = 128;
1848 rdev->config.evergreen.sx_max_export_pos_size = 32;
1849 rdev->config.evergreen.sx_max_export_smx_size = 96;
1850 rdev->config.evergreen.max_hw_contexts = 4;
1851 rdev->config.evergreen.sq_num_cf_insts = 1;
1852
1853 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1854 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1855 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1856 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04001857 case CHIP_SUMO:
1858 rdev->config.evergreen.num_ses = 1;
1859 rdev->config.evergreen.max_pipes = 4;
1860 rdev->config.evergreen.max_tile_pipes = 2;
1861 if (rdev->pdev->device == 0x9648)
1862 rdev->config.evergreen.max_simds = 3;
1863 else if ((rdev->pdev->device == 0x9647) ||
1864 (rdev->pdev->device == 0x964a))
1865 rdev->config.evergreen.max_simds = 4;
1866 else
1867 rdev->config.evergreen.max_simds = 5;
1868 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1869 rdev->config.evergreen.max_gprs = 256;
1870 rdev->config.evergreen.max_threads = 248;
1871 rdev->config.evergreen.max_gs_threads = 32;
1872 rdev->config.evergreen.max_stack_entries = 256;
1873 rdev->config.evergreen.sx_num_of_sets = 4;
1874 rdev->config.evergreen.sx_max_export_size = 256;
1875 rdev->config.evergreen.sx_max_export_pos_size = 64;
1876 rdev->config.evergreen.sx_max_export_smx_size = 192;
1877 rdev->config.evergreen.max_hw_contexts = 8;
1878 rdev->config.evergreen.sq_num_cf_insts = 2;
1879
1880 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1881 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1882 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1883 break;
1884 case CHIP_SUMO2:
1885 rdev->config.evergreen.num_ses = 1;
1886 rdev->config.evergreen.max_pipes = 4;
1887 rdev->config.evergreen.max_tile_pipes = 4;
1888 rdev->config.evergreen.max_simds = 2;
1889 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1890 rdev->config.evergreen.max_gprs = 256;
1891 rdev->config.evergreen.max_threads = 248;
1892 rdev->config.evergreen.max_gs_threads = 32;
1893 rdev->config.evergreen.max_stack_entries = 512;
1894 rdev->config.evergreen.sx_num_of_sets = 4;
1895 rdev->config.evergreen.sx_max_export_size = 256;
1896 rdev->config.evergreen.sx_max_export_pos_size = 64;
1897 rdev->config.evergreen.sx_max_export_smx_size = 192;
1898 rdev->config.evergreen.max_hw_contexts = 8;
1899 rdev->config.evergreen.sq_num_cf_insts = 2;
1900
1901 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1902 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1903 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1904 break;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001905 case CHIP_BARTS:
1906 rdev->config.evergreen.num_ses = 2;
1907 rdev->config.evergreen.max_pipes = 4;
1908 rdev->config.evergreen.max_tile_pipes = 8;
1909 rdev->config.evergreen.max_simds = 7;
1910 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1911 rdev->config.evergreen.max_gprs = 256;
1912 rdev->config.evergreen.max_threads = 248;
1913 rdev->config.evergreen.max_gs_threads = 32;
1914 rdev->config.evergreen.max_stack_entries = 512;
1915 rdev->config.evergreen.sx_num_of_sets = 4;
1916 rdev->config.evergreen.sx_max_export_size = 256;
1917 rdev->config.evergreen.sx_max_export_pos_size = 64;
1918 rdev->config.evergreen.sx_max_export_smx_size = 192;
1919 rdev->config.evergreen.max_hw_contexts = 8;
1920 rdev->config.evergreen.sq_num_cf_insts = 2;
1921
1922 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1923 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1924 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1925 break;
1926 case CHIP_TURKS:
1927 rdev->config.evergreen.num_ses = 1;
1928 rdev->config.evergreen.max_pipes = 4;
1929 rdev->config.evergreen.max_tile_pipes = 4;
1930 rdev->config.evergreen.max_simds = 6;
1931 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1932 rdev->config.evergreen.max_gprs = 256;
1933 rdev->config.evergreen.max_threads = 248;
1934 rdev->config.evergreen.max_gs_threads = 32;
1935 rdev->config.evergreen.max_stack_entries = 256;
1936 rdev->config.evergreen.sx_num_of_sets = 4;
1937 rdev->config.evergreen.sx_max_export_size = 256;
1938 rdev->config.evergreen.sx_max_export_pos_size = 64;
1939 rdev->config.evergreen.sx_max_export_smx_size = 192;
1940 rdev->config.evergreen.max_hw_contexts = 8;
1941 rdev->config.evergreen.sq_num_cf_insts = 2;
1942
1943 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1944 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1945 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1946 break;
1947 case CHIP_CAICOS:
1948 rdev->config.evergreen.num_ses = 1;
1949 rdev->config.evergreen.max_pipes = 4;
1950 rdev->config.evergreen.max_tile_pipes = 2;
1951 rdev->config.evergreen.max_simds = 2;
1952 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1953 rdev->config.evergreen.max_gprs = 256;
1954 rdev->config.evergreen.max_threads = 192;
1955 rdev->config.evergreen.max_gs_threads = 16;
1956 rdev->config.evergreen.max_stack_entries = 256;
1957 rdev->config.evergreen.sx_num_of_sets = 4;
1958 rdev->config.evergreen.sx_max_export_size = 128;
1959 rdev->config.evergreen.sx_max_export_pos_size = 32;
1960 rdev->config.evergreen.sx_max_export_smx_size = 96;
1961 rdev->config.evergreen.max_hw_contexts = 4;
1962 rdev->config.evergreen.sq_num_cf_insts = 1;
1963
1964 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1965 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1966 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1967 break;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001968 }
1969
1970 /* Initialize HDP */
1971 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1972 WREG32((0x2c14 + j), 0x00000000);
1973 WREG32((0x2c18 + j), 0x00000000);
1974 WREG32((0x2c1c + j), 0x00000000);
1975 WREG32((0x2c20 + j), 0x00000000);
1976 WREG32((0x2c24 + j), 0x00000000);
1977 }
1978
1979 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1980
Alex Deucherd054ac12011-09-01 17:46:15 +00001981 evergreen_fix_pci_max_read_req_size(rdev);
1982
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001983 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1984
1985 cc_gc_shader_pipe_config |=
1986 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1987 & EVERGREEN_MAX_PIPES_MASK);
1988 cc_gc_shader_pipe_config |=
1989 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1990 & EVERGREEN_MAX_SIMDS_MASK);
1991
1992 cc_rb_backend_disable =
1993 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1994 & EVERGREEN_MAX_BACKENDS_MASK);
1995
1996
1997 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001998 if ((rdev->family == CHIP_PALM) ||
1999 (rdev->family == CHIP_SUMO) ||
2000 (rdev->family == CHIP_SUMO2))
Alex Deucherd9282fc2011-05-11 03:15:24 -04002001 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
2002 else
2003 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002004
2005 switch (rdev->config.evergreen.max_tile_pipes) {
2006 case 1:
2007 default:
2008 gb_addr_config |= NUM_PIPES(0);
2009 break;
2010 case 2:
2011 gb_addr_config |= NUM_PIPES(1);
2012 break;
2013 case 4:
2014 gb_addr_config |= NUM_PIPES(2);
2015 break;
2016 case 8:
2017 gb_addr_config |= NUM_PIPES(3);
2018 break;
2019 }
2020
2021 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2022 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
2023 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
2024 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
2025 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
2026 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
2027
2028 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
2029 gb_addr_config |= ROW_SIZE(2);
2030 else
2031 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
2032
2033 if (rdev->ddev->pdev->device == 0x689e) {
2034 u32 efuse_straps_4;
2035 u32 efuse_straps_3;
2036 u8 efuse_box_bit_131_124;
2037
2038 WREG32(RCU_IND_INDEX, 0x204);
2039 efuse_straps_4 = RREG32(RCU_IND_DATA);
2040 WREG32(RCU_IND_INDEX, 0x203);
2041 efuse_straps_3 = RREG32(RCU_IND_DATA);
2042 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
2043
2044 switch(efuse_box_bit_131_124) {
2045 case 0x00:
2046 gb_backend_map = 0x76543210;
2047 break;
2048 case 0x55:
2049 gb_backend_map = 0x77553311;
2050 break;
2051 case 0x56:
2052 gb_backend_map = 0x77553300;
2053 break;
2054 case 0x59:
2055 gb_backend_map = 0x77552211;
2056 break;
2057 case 0x66:
2058 gb_backend_map = 0x77443300;
2059 break;
2060 case 0x99:
2061 gb_backend_map = 0x66552211;
2062 break;
2063 case 0x5a:
2064 gb_backend_map = 0x77552200;
2065 break;
2066 case 0xaa:
2067 gb_backend_map = 0x66442200;
2068 break;
2069 case 0x95:
2070 gb_backend_map = 0x66553311;
2071 break;
2072 default:
2073 DRM_ERROR("bad backend map, using default\n");
2074 gb_backend_map =
2075 evergreen_get_tile_pipe_to_backend_map(rdev,
2076 rdev->config.evergreen.max_tile_pipes,
2077 rdev->config.evergreen.max_backends,
2078 ((EVERGREEN_MAX_BACKENDS_MASK <<
2079 rdev->config.evergreen.max_backends) &
2080 EVERGREEN_MAX_BACKENDS_MASK));
2081 break;
2082 }
2083 } else if (rdev->ddev->pdev->device == 0x68b9) {
2084 u32 efuse_straps_3;
2085 u8 efuse_box_bit_127_124;
2086
2087 WREG32(RCU_IND_INDEX, 0x203);
2088 efuse_straps_3 = RREG32(RCU_IND_DATA);
Alex Deucherd31dba52010-10-11 12:41:32 -04002089 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002090
2091 switch(efuse_box_bit_127_124) {
2092 case 0x0:
2093 gb_backend_map = 0x00003210;
2094 break;
2095 case 0x5:
2096 case 0x6:
2097 case 0x9:
2098 case 0xa:
2099 gb_backend_map = 0x00003311;
2100 break;
2101 default:
2102 DRM_ERROR("bad backend map, using default\n");
2103 gb_backend_map =
2104 evergreen_get_tile_pipe_to_backend_map(rdev,
2105 rdev->config.evergreen.max_tile_pipes,
2106 rdev->config.evergreen.max_backends,
2107 ((EVERGREEN_MAX_BACKENDS_MASK <<
2108 rdev->config.evergreen.max_backends) &
2109 EVERGREEN_MAX_BACKENDS_MASK));
2110 break;
2111 }
Alex Deucherb741be82010-09-09 19:15:23 -04002112 } else {
2113 switch (rdev->family) {
2114 case CHIP_CYPRESS:
2115 case CHIP_HEMLOCK:
Alex Deucher03f40092011-01-06 21:19:25 -05002116 case CHIP_BARTS:
Alex Deucherb741be82010-09-09 19:15:23 -04002117 gb_backend_map = 0x66442200;
2118 break;
2119 case CHIP_JUNIPER:
Alex Deucher9a4a0b92011-07-11 19:45:32 +00002120 gb_backend_map = 0x00002200;
Alex Deucherb741be82010-09-09 19:15:23 -04002121 break;
2122 default:
2123 gb_backend_map =
2124 evergreen_get_tile_pipe_to_backend_map(rdev,
2125 rdev->config.evergreen.max_tile_pipes,
2126 rdev->config.evergreen.max_backends,
2127 ((EVERGREEN_MAX_BACKENDS_MASK <<
2128 rdev->config.evergreen.max_backends) &
2129 EVERGREEN_MAX_BACKENDS_MASK));
2130 }
2131 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002132
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002133 /* setup tiling info dword. gb_addr_config is not adequate since it does
2134 * not have bank info, so create a custom tiling dword.
2135 * bits 3:0 num_pipes
2136 * bits 7:4 num_banks
2137 * bits 11:8 group_size
2138 * bits 15:12 row_size
2139 */
2140 rdev->config.evergreen.tile_config = 0;
2141 switch (rdev->config.evergreen.max_tile_pipes) {
2142 case 1:
2143 default:
2144 rdev->config.evergreen.tile_config |= (0 << 0);
2145 break;
2146 case 2:
2147 rdev->config.evergreen.tile_config |= (1 << 0);
2148 break;
2149 case 4:
2150 rdev->config.evergreen.tile_config |= (2 << 0);
2151 break;
2152 case 8:
2153 rdev->config.evergreen.tile_config |= (3 << 0);
2154 break;
2155 }
Alex Deucherd698a342011-06-23 00:49:29 -04002156 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
Alex Deucher5bfa4872011-05-20 12:35:22 -04002157 if (rdev->flags & RADEON_IS_IGP)
Alex Deucherd698a342011-06-23 00:49:29 -04002158 rdev->config.evergreen.tile_config |= 1 << 4;
Alex Deucherd8d09be2012-05-31 18:53:36 -04002159 else {
Alex Deucher75a75712012-07-31 11:01:10 -04002160 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
2161 case 0: /* four banks */
Alex Deucherd8d09be2012-05-31 18:53:36 -04002162 rdev->config.evergreen.tile_config |= 0 << 4;
Alex Deucher75a75712012-07-31 11:01:10 -04002163 break;
2164 case 1: /* eight banks */
2165 rdev->config.evergreen.tile_config |= 1 << 4;
2166 break;
2167 case 2: /* sixteen banks */
2168 default:
2169 rdev->config.evergreen.tile_config |= 2 << 4;
2170 break;
2171 }
Alex Deucherd8d09be2012-05-31 18:53:36 -04002172 }
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002173 rdev->config.evergreen.tile_config |=
2174 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
2175 rdev->config.evergreen.tile_config |=
2176 ((gb_addr_config & 0x30000000) >> 28) << 12;
2177
Alex Deuchere55b9422011-07-15 19:53:52 +00002178 rdev->config.evergreen.backend_map = gb_backend_map;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002179 WREG32(GB_BACKEND_MAP, gb_backend_map);
2180 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2181 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2182 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2183
2184 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2185 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2186
2187 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
2188 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
2189 u32 sp = cc_gc_shader_pipe_config;
2190 u32 gfx = grbm_gfx_index | SE_INDEX(i);
2191
2192 if (i == num_shader_engines) {
2193 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
2194 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
2195 }
2196
2197 WREG32(GRBM_GFX_INDEX, gfx);
2198 WREG32(RLC_GFX_INDEX, gfx);
2199
2200 WREG32(CC_RB_BACKEND_DISABLE, rb);
2201 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
2202 WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
2203 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
Jerome Glisse888e4b92012-05-31 19:00:24 -04002204 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002205
Jerome Glisse888e4b92012-05-31 19:00:24 -04002206 grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002207 WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
2208 WREG32(RLC_GFX_INDEX, grbm_gfx_index);
2209
2210 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2211 WREG32(CGTS_TCC_DISABLE, 0);
2212 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2213 WREG32(CGTS_USER_TCC_DISABLE, 0);
2214
2215 /* set HW defaults for 3D engine */
2216 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2217 ROQ_IB2_START(0x2b)));
2218
2219 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2220
2221 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2222 SYNC_GRADIENT |
2223 SYNC_WALKER |
2224 SYNC_ALIGNER));
2225
2226 sx_debug_1 = RREG32(SX_DEBUG_1);
2227 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2228 WREG32(SX_DEBUG_1, sx_debug_1);
2229
2230
2231 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2232 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2233 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2234 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2235
Alex Deucher789ed2a2012-06-14 22:06:36 +02002236 if (rdev->family <= CHIP_SUMO2)
2237 WREG32(SMX_SAR_CTL0, 0x00010000);
2238
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002239 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2240 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2241 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2242
2243 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2244 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2245 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2246
2247 WREG32(VGT_NUM_INSTANCES, 1);
2248 WREG32(SPI_CONFIG_CNTL, 0);
2249 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2250 WREG32(CP_PERFMON_CNTL, 0);
2251
2252 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2253 FETCH_FIFO_HIWATER(0x4) |
2254 DONE_FIFO_HIWATER(0xe0) |
2255 ALU_UPDATE_FIFO_HIWATER(0x8)));
2256
2257 sq_config = RREG32(SQ_CONFIG);
2258 sq_config &= ~(PS_PRIO(3) |
2259 VS_PRIO(3) |
2260 GS_PRIO(3) |
2261 ES_PRIO(3));
2262 sq_config |= (VC_ENABLE |
2263 EXPORT_SRC_C |
2264 PS_PRIO(0) |
2265 VS_PRIO(1) |
2266 GS_PRIO(2) |
2267 ES_PRIO(3));
2268
Alex Deucherd5e455e2010-11-22 17:56:29 -05002269 switch (rdev->family) {
2270 case CHIP_CEDAR:
2271 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002272 case CHIP_SUMO:
2273 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002274 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002275 /* no vertex cache */
2276 sq_config &= ~VC_ENABLE;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002277 break;
2278 default:
2279 break;
2280 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002281
2282 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2283
2284 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2285 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2286 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2287 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2288 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2289 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2290 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2291
Alex Deucherd5e455e2010-11-22 17:56:29 -05002292 switch (rdev->family) {
2293 case CHIP_CEDAR:
2294 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002295 case CHIP_SUMO:
2296 case CHIP_SUMO2:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002297 ps_thread_count = 96;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002298 break;
2299 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002300 ps_thread_count = 128;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002301 break;
2302 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002303
2304 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
Alex Deucherf96b35c2010-06-16 12:24:07 -04002305 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2306 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2307 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2308 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2309 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002310
2311 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2312 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2313 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2314 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2315 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2316 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2317
2318 WREG32(SQ_CONFIG, sq_config);
2319 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2320 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2321 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2322 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2323 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2324 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2325 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2326 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2327 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2328 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2329
2330 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2331 FORCE_EOV_MAX_REZ_CNT(255)));
2332
Alex Deucherd5e455e2010-11-22 17:56:29 -05002333 switch (rdev->family) {
2334 case CHIP_CEDAR:
2335 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002336 case CHIP_SUMO:
2337 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002338 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002339 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002340 break;
2341 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002342 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002343 break;
2344 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002345 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2346 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2347
2348 WREG32(VGT_GS_VERTEX_REUSE, 16);
Alex Deucher12920592011-02-02 12:37:40 -05002349 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002350 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2351
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002352 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2353 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2354
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002355 WREG32(CB_PERF_CTR0_SEL_0, 0);
2356 WREG32(CB_PERF_CTR0_SEL_1, 0);
2357 WREG32(CB_PERF_CTR1_SEL_0, 0);
2358 WREG32(CB_PERF_CTR1_SEL_1, 0);
2359 WREG32(CB_PERF_CTR2_SEL_0, 0);
2360 WREG32(CB_PERF_CTR2_SEL_1, 0);
2361 WREG32(CB_PERF_CTR3_SEL_0, 0);
2362 WREG32(CB_PERF_CTR3_SEL_1, 0);
2363
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002364 /* clear render buffer base addresses */
2365 WREG32(CB_COLOR0_BASE, 0);
2366 WREG32(CB_COLOR1_BASE, 0);
2367 WREG32(CB_COLOR2_BASE, 0);
2368 WREG32(CB_COLOR3_BASE, 0);
2369 WREG32(CB_COLOR4_BASE, 0);
2370 WREG32(CB_COLOR5_BASE, 0);
2371 WREG32(CB_COLOR6_BASE, 0);
2372 WREG32(CB_COLOR7_BASE, 0);
2373 WREG32(CB_COLOR8_BASE, 0);
2374 WREG32(CB_COLOR9_BASE, 0);
2375 WREG32(CB_COLOR10_BASE, 0);
2376 WREG32(CB_COLOR11_BASE, 0);
2377
2378 /* set the shader const cache sizes to 0 */
2379 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2380 WREG32(i, 0);
2381 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2382 WREG32(i, 0);
2383
Alex Deucherf25a5c62011-05-19 11:07:57 -04002384 tmp = RREG32(HDP_MISC_CNTL);
2385 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2386 WREG32(HDP_MISC_CNTL, tmp);
2387
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002388 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2389 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2390
2391 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2392
2393 udelay(50);
2394
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002395}
2396
2397int evergreen_mc_init(struct radeon_device *rdev)
2398{
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002399 u32 tmp;
2400 int chansize, numchan;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002401
2402 /* Get VRAM informations */
2403 rdev->mc.vram_is_ddr = true;
Alex Deucher05b3ef62012-03-20 17:18:37 -04002404 if ((rdev->family == CHIP_PALM) ||
2405 (rdev->family == CHIP_SUMO) ||
2406 (rdev->family == CHIP_SUMO2))
Alex Deucher82084412011-07-01 13:18:28 -04002407 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2408 else
2409 tmp = RREG32(MC_ARB_RAMCFG);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002410 if (tmp & CHANSIZE_OVERRIDE) {
2411 chansize = 16;
2412 } else if (tmp & CHANSIZE_MASK) {
2413 chansize = 64;
2414 } else {
2415 chansize = 32;
2416 }
2417 tmp = RREG32(MC_SHARED_CHMAP);
2418 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2419 case 0:
2420 default:
2421 numchan = 1;
2422 break;
2423 case 1:
2424 numchan = 2;
2425 break;
2426 case 2:
2427 numchan = 4;
2428 break;
2429 case 3:
2430 numchan = 8;
2431 break;
2432 }
2433 rdev->mc.vram_width = numchan * chansize;
2434 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06002435 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2436 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002437 /* Setup GPU memory space */
Alex Deucher05b3ef62012-03-20 17:18:37 -04002438 if ((rdev->family == CHIP_PALM) ||
2439 (rdev->family == CHIP_SUMO) ||
2440 (rdev->family == CHIP_SUMO2)) {
Alex Deucher6eb18f82010-11-22 17:56:27 -05002441 /* size in bytes on fusion */
2442 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2443 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2444 } else {
Alex Deucher05b3ef62012-03-20 17:18:37 -04002445 /* size in MB on evergreen/cayman/tn */
Niels Ole Salscheidera91ffa32013-05-18 21:19:23 +02002446 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
2447 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
Alex Deucher6eb18f82010-11-22 17:56:27 -05002448 }
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00002449 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05002450 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04002451 radeon_update_bandwidth_info(rdev);
2452
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002453 return 0;
2454}
Jerome Glissed594e462010-02-17 21:54:29 +00002455
Christian Könige32eb502011-10-23 12:56:27 +02002456bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse225758d2010-03-09 14:45:10 +00002457{
Alex Deucher17db7042010-12-21 16:05:39 -05002458 u32 srbm_status;
2459 u32 grbm_status;
2460 u32 grbm_status_se0, grbm_status_se1;
2461 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2462 int r;
2463
2464 srbm_status = RREG32(SRBM_STATUS);
2465 grbm_status = RREG32(GRBM_STATUS);
2466 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2467 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2468 if (!(grbm_status & GUI_ACTIVE)) {
Christian Könige32eb502011-10-23 12:56:27 +02002469 r100_gpu_lockup_update(lockup, ring);
Alex Deucher17db7042010-12-21 16:05:39 -05002470 return false;
2471 }
2472 /* force CP activities */
Christian Könige32eb502011-10-23 12:56:27 +02002473 r = radeon_ring_lock(rdev, ring, 2);
Alex Deucher17db7042010-12-21 16:05:39 -05002474 if (!r) {
2475 /* PACKET2 NOP */
Christian Könige32eb502011-10-23 12:56:27 +02002476 radeon_ring_write(ring, 0x80000000);
2477 radeon_ring_write(ring, 0x80000000);
2478 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher17db7042010-12-21 16:05:39 -05002479 }
Christian Könige32eb502011-10-23 12:56:27 +02002480 ring->rptr = RREG32(CP_RB_RPTR);
2481 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
Jerome Glisse225758d2010-03-09 14:45:10 +00002482}
2483
Alex Deucher747943e2010-03-24 13:26:36 -04002484static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2485{
2486 struct evergreen_mc_save save;
Alex Deucher747943e2010-03-24 13:26:36 -04002487 u32 grbm_reset = 0;
2488
Alex Deucher8d96fe92011-01-21 15:38:22 +00002489 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2490 return 0;
2491
Alex Deucher747943e2010-03-24 13:26:36 -04002492 dev_info(rdev->dev, "GPU softreset \n");
2493 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2494 RREG32(GRBM_STATUS));
2495 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2496 RREG32(GRBM_STATUS_SE0));
2497 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2498 RREG32(GRBM_STATUS_SE1));
2499 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2500 RREG32(SRBM_STATUS));
2501 evergreen_mc_stop(rdev, &save);
2502 if (evergreen_mc_wait_for_idle(rdev)) {
2503 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2504 }
2505 /* Disable CP parsing/prefetching */
2506 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2507
2508 /* reset all the gfx blocks */
2509 grbm_reset = (SOFT_RESET_CP |
2510 SOFT_RESET_CB |
2511 SOFT_RESET_DB |
2512 SOFT_RESET_PA |
2513 SOFT_RESET_SC |
2514 SOFT_RESET_SPI |
2515 SOFT_RESET_SH |
2516 SOFT_RESET_SX |
2517 SOFT_RESET_TC |
2518 SOFT_RESET_TA |
2519 SOFT_RESET_VC |
2520 SOFT_RESET_VGT);
2521
2522 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2523 WREG32(GRBM_SOFT_RESET, grbm_reset);
2524 (void)RREG32(GRBM_SOFT_RESET);
2525 udelay(50);
2526 WREG32(GRBM_SOFT_RESET, 0);
2527 (void)RREG32(GRBM_SOFT_RESET);
Alex Deucher747943e2010-03-24 13:26:36 -04002528 /* Wait a little for things to settle down */
2529 udelay(50);
2530 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2531 RREG32(GRBM_STATUS));
2532 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2533 RREG32(GRBM_STATUS_SE0));
2534 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2535 RREG32(GRBM_STATUS_SE1));
2536 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2537 RREG32(SRBM_STATUS));
Alex Deucher747943e2010-03-24 13:26:36 -04002538 evergreen_mc_resume(rdev, &save);
2539 return 0;
2540}
2541
Jerome Glissea2d07b72010-03-09 14:45:11 +00002542int evergreen_asic_reset(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002543{
Alex Deucher747943e2010-03-24 13:26:36 -04002544 return evergreen_gpu_soft_reset(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002545}
2546
Alex Deucher45f9a392010-03-24 13:55:51 -04002547/* Interrupts */
2548
2549u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2550{
2551 switch (crtc) {
2552 case 0:
2553 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2554 case 1:
2555 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2556 case 2:
2557 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2558 case 3:
2559 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2560 case 4:
2561 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2562 case 5:
2563 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2564 default:
2565 return 0;
2566 }
2567}
2568
2569void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2570{
2571 u32 tmp;
2572
Alex Deucher1b370782011-11-17 20:13:28 -05002573 if (rdev->family >= CHIP_CAYMAN) {
2574 cayman_cp_int_cntl_setup(rdev, 0,
2575 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2576 cayman_cp_int_cntl_setup(rdev, 1, 0);
2577 cayman_cp_int_cntl_setup(rdev, 2, 0);
2578 } else
2579 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher45f9a392010-03-24 13:55:51 -04002580 WREG32(GRBM_INT_CNTL, 0);
2581 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2582 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002583 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002584 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2585 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002586 }
2587 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002588 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2589 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2590 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002591
2592 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2593 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002594 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002595 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2596 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002597 }
2598 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002599 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2600 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2601 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002602
Alex Deucher05b3ef62012-03-20 17:18:37 -04002603 /* only one DAC on DCE6 */
2604 if (!ASIC_IS_DCE6(rdev))
2605 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
Alex Deucher45f9a392010-03-24 13:55:51 -04002606 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2607
2608 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2609 WREG32(DC_HPD1_INT_CONTROL, tmp);
2610 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2611 WREG32(DC_HPD2_INT_CONTROL, tmp);
2612 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2613 WREG32(DC_HPD3_INT_CONTROL, tmp);
2614 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2615 WREG32(DC_HPD4_INT_CONTROL, tmp);
2616 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2617 WREG32(DC_HPD5_INT_CONTROL, tmp);
2618 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2619 WREG32(DC_HPD6_INT_CONTROL, tmp);
2620
2621}
2622
2623int evergreen_irq_set(struct radeon_device *rdev)
2624{
2625 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
Alex Deucher1b370782011-11-17 20:13:28 -05002626 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002627 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2628 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
Alex Deucher2031f772010-04-22 12:52:11 -04002629 u32 grbm_int_cntl = 0;
Alex Deucher6f34be52010-11-21 10:59:01 -05002630 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002631
2632 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00002633 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Alex Deucher45f9a392010-03-24 13:55:51 -04002634 return -EINVAL;
2635 }
2636 /* don't enable anything if the ih is disabled */
2637 if (!rdev->ih.enabled) {
2638 r600_disable_interrupts(rdev);
2639 /* force the active interrupt state to all disabled */
2640 evergreen_disable_interrupt_state(rdev);
2641 return 0;
2642 }
2643
2644 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2645 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2646 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2647 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2648 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2649 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2650
Alex Deucher1b370782011-11-17 20:13:28 -05002651 if (rdev->family >= CHIP_CAYMAN) {
2652 /* enable CP interrupts on all rings */
2653 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
2654 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2655 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2656 }
2657 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
2658 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2659 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2660 }
2661 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
2662 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2663 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2664 }
2665 } else {
2666 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
2667 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2668 cp_int_cntl |= RB_INT_ENABLE;
2669 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2670 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002671 }
Alex Deucher1b370782011-11-17 20:13:28 -05002672
Alex Deucher6f34be52010-11-21 10:59:01 -05002673 if (rdev->irq.crtc_vblank_int[0] ||
2674 rdev->irq.pflip[0]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002675 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2676 crtc1 |= VBLANK_INT_MASK;
2677 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002678 if (rdev->irq.crtc_vblank_int[1] ||
2679 rdev->irq.pflip[1]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002680 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2681 crtc2 |= VBLANK_INT_MASK;
2682 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002683 if (rdev->irq.crtc_vblank_int[2] ||
2684 rdev->irq.pflip[2]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002685 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2686 crtc3 |= VBLANK_INT_MASK;
2687 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002688 if (rdev->irq.crtc_vblank_int[3] ||
2689 rdev->irq.pflip[3]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002690 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2691 crtc4 |= VBLANK_INT_MASK;
2692 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002693 if (rdev->irq.crtc_vblank_int[4] ||
2694 rdev->irq.pflip[4]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002695 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2696 crtc5 |= VBLANK_INT_MASK;
2697 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002698 if (rdev->irq.crtc_vblank_int[5] ||
2699 rdev->irq.pflip[5]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002700 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2701 crtc6 |= VBLANK_INT_MASK;
2702 }
2703 if (rdev->irq.hpd[0]) {
2704 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2705 hpd1 |= DC_HPDx_INT_EN;
2706 }
2707 if (rdev->irq.hpd[1]) {
2708 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2709 hpd2 |= DC_HPDx_INT_EN;
2710 }
2711 if (rdev->irq.hpd[2]) {
2712 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2713 hpd3 |= DC_HPDx_INT_EN;
2714 }
2715 if (rdev->irq.hpd[3]) {
2716 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2717 hpd4 |= DC_HPDx_INT_EN;
2718 }
2719 if (rdev->irq.hpd[4]) {
2720 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2721 hpd5 |= DC_HPDx_INT_EN;
2722 }
2723 if (rdev->irq.hpd[5]) {
2724 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2725 hpd6 |= DC_HPDx_INT_EN;
2726 }
Alex Deucher2031f772010-04-22 12:52:11 -04002727 if (rdev->irq.gui_idle) {
2728 DRM_DEBUG("gui idle\n");
2729 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2730 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002731
Alex Deucher1b370782011-11-17 20:13:28 -05002732 if (rdev->family >= CHIP_CAYMAN) {
2733 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
2734 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
2735 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2736 } else
2737 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher2031f772010-04-22 12:52:11 -04002738 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deucher45f9a392010-03-24 13:55:51 -04002739
2740 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2741 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
Alex Deucherb7eff392011-07-08 11:44:56 -04002742 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002743 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2744 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
Alex Deucherb7eff392011-07-08 11:44:56 -04002745 }
2746 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002747 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2748 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2749 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002750
Alex Deucher6f34be52010-11-21 10:59:01 -05002751 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2752 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
Alex Deucherb7eff392011-07-08 11:44:56 -04002753 if (rdev->num_crtc >= 4) {
2754 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2755 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2756 }
2757 if (rdev->num_crtc >= 6) {
2758 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2759 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2760 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002761
Alex Deucher45f9a392010-03-24 13:55:51 -04002762 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2763 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2764 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2765 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2766 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2767 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2768
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002769 return 0;
2770}
2771
Andi Kleencbdd4502011-10-13 16:08:46 -07002772static void evergreen_irq_ack(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002773{
2774 u32 tmp;
2775
Alex Deucher6f34be52010-11-21 10:59:01 -05002776 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2777 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2778 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2779 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2780 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2781 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2782 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2783 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
Alex Deucherb7eff392011-07-08 11:44:56 -04002784 if (rdev->num_crtc >= 4) {
2785 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2786 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2787 }
2788 if (rdev->num_crtc >= 6) {
2789 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2790 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2791 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002792
Alex Deucher6f34be52010-11-21 10:59:01 -05002793 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2794 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2795 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2796 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
Alex Deucher6f34be52010-11-21 10:59:01 -05002797 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002798 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002799 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002800 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002801 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002802 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002803 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002804 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2805
Alex Deucherb7eff392011-07-08 11:44:56 -04002806 if (rdev->num_crtc >= 4) {
2807 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2808 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2809 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2810 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2811 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2812 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2813 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2814 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2815 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2816 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2817 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2818 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2819 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002820
Alex Deucherb7eff392011-07-08 11:44:56 -04002821 if (rdev->num_crtc >= 6) {
2822 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2823 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2824 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2825 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2826 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2827 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2828 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2829 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2830 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2831 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2832 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2833 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2834 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002835
Alex Deucher6f34be52010-11-21 10:59:01 -05002836 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002837 tmp = RREG32(DC_HPD1_INT_CONTROL);
2838 tmp |= DC_HPDx_INT_ACK;
2839 WREG32(DC_HPD1_INT_CONTROL, tmp);
2840 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002841 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002842 tmp = RREG32(DC_HPD2_INT_CONTROL);
2843 tmp |= DC_HPDx_INT_ACK;
2844 WREG32(DC_HPD2_INT_CONTROL, tmp);
2845 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002846 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002847 tmp = RREG32(DC_HPD3_INT_CONTROL);
2848 tmp |= DC_HPDx_INT_ACK;
2849 WREG32(DC_HPD3_INT_CONTROL, tmp);
2850 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002851 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002852 tmp = RREG32(DC_HPD4_INT_CONTROL);
2853 tmp |= DC_HPDx_INT_ACK;
2854 WREG32(DC_HPD4_INT_CONTROL, tmp);
2855 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002856 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002857 tmp = RREG32(DC_HPD5_INT_CONTROL);
2858 tmp |= DC_HPDx_INT_ACK;
2859 WREG32(DC_HPD5_INT_CONTROL, tmp);
2860 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002861 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002862 tmp = RREG32(DC_HPD5_INT_CONTROL);
2863 tmp |= DC_HPDx_INT_ACK;
2864 WREG32(DC_HPD6_INT_CONTROL, tmp);
2865 }
2866}
2867
2868void evergreen_irq_disable(struct radeon_device *rdev)
2869{
Alex Deucher45f9a392010-03-24 13:55:51 -04002870 r600_disable_interrupts(rdev);
2871 /* Wait and acknowledge irq */
2872 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05002873 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04002874 evergreen_disable_interrupt_state(rdev);
2875}
2876
Alex Deucher755d8192011-03-02 20:07:34 -05002877void evergreen_irq_suspend(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002878{
2879 evergreen_irq_disable(rdev);
2880 r600_rlc_stop(rdev);
2881}
2882
Andi Kleencbdd4502011-10-13 16:08:46 -07002883static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002884{
2885 u32 wptr, tmp;
2886
Alex Deucher724c80e2010-08-27 18:25:25 -04002887 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04002888 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04002889 else
2890 wptr = RREG32(IH_RB_WPTR);
Alex Deucher45f9a392010-03-24 13:55:51 -04002891
2892 if (wptr & RB_OVERFLOW) {
2893 /* When a ring buffer overflow happen start parsing interrupt
2894 * from the last not overwritten vector (wptr + 16). Hopefully
2895 * this should allow us to catchup.
2896 */
2897 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2898 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2899 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2900 tmp = RREG32(IH_RB_CNTL);
2901 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2902 WREG32(IH_RB_CNTL, tmp);
2903 }
2904 return (wptr & rdev->ih.ptr_mask);
2905}
2906
2907int evergreen_irq_process(struct radeon_device *rdev)
2908{
Dave Airlie682f1a52011-06-18 03:59:51 +00002909 u32 wptr;
2910 u32 rptr;
Alex Deucher45f9a392010-03-24 13:55:51 -04002911 u32 src_id, src_data;
2912 u32 ring_index;
Alex Deucher45f9a392010-03-24 13:55:51 -04002913 unsigned long flags;
2914 bool queue_hotplug = false;
2915
Dave Airlie682f1a52011-06-18 03:59:51 +00002916 if (!rdev->ih.enabled || rdev->shutdown)
Alex Deucher45f9a392010-03-24 13:55:51 -04002917 return IRQ_NONE;
2918
Dave Airlie682f1a52011-06-18 03:59:51 +00002919 wptr = evergreen_get_ih_wptr(rdev);
2920 rptr = rdev->ih.rptr;
2921 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Alex Deucher45f9a392010-03-24 13:55:51 -04002922
Dave Airlie682f1a52011-06-18 03:59:51 +00002923 spin_lock_irqsave(&rdev->ih.lock, flags);
Alex Deucher45f9a392010-03-24 13:55:51 -04002924 if (rptr == wptr) {
2925 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2926 return IRQ_NONE;
2927 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002928restart_ih:
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10002929 /* Order reading of wptr vs. reading of IH ring data */
2930 rmb();
2931
Alex Deucher45f9a392010-03-24 13:55:51 -04002932 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05002933 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04002934
2935 rdev->ih.wptr = wptr;
2936 while (rptr != wptr) {
2937 /* wptr/rptr are in bytes! */
2938 ring_index = rptr / 4;
Alex Deucher0f234f52011-02-13 19:06:33 -05002939 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2940 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucher45f9a392010-03-24 13:55:51 -04002941
2942 switch (src_id) {
2943 case 1: /* D1 vblank/vline */
2944 switch (src_data) {
2945 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05002946 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05002947 if (rdev->irq.crtc_vblank_int[0]) {
2948 drm_handle_vblank(rdev->ddev, 0);
2949 rdev->pm.vblank_sync = true;
2950 wake_up(&rdev->irq.vblank_queue);
2951 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05002952 if (rdev->irq.pflip[0])
2953 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05002954 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002955 DRM_DEBUG("IH: D1 vblank\n");
2956 }
2957 break;
2958 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05002959 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2960 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002961 DRM_DEBUG("IH: D1 vline\n");
2962 }
2963 break;
2964 default:
2965 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2966 break;
2967 }
2968 break;
2969 case 2: /* D2 vblank/vline */
2970 switch (src_data) {
2971 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05002972 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05002973 if (rdev->irq.crtc_vblank_int[1]) {
2974 drm_handle_vblank(rdev->ddev, 1);
2975 rdev->pm.vblank_sync = true;
2976 wake_up(&rdev->irq.vblank_queue);
2977 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05002978 if (rdev->irq.pflip[1])
2979 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05002980 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002981 DRM_DEBUG("IH: D2 vblank\n");
2982 }
2983 break;
2984 case 1: /* D2 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05002985 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2986 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002987 DRM_DEBUG("IH: D2 vline\n");
2988 }
2989 break;
2990 default:
2991 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2992 break;
2993 }
2994 break;
2995 case 3: /* D3 vblank/vline */
2996 switch (src_data) {
2997 case 0: /* D3 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05002998 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2999 if (rdev->irq.crtc_vblank_int[2]) {
3000 drm_handle_vblank(rdev->ddev, 2);
3001 rdev->pm.vblank_sync = true;
3002 wake_up(&rdev->irq.vblank_queue);
3003 }
3004 if (rdev->irq.pflip[2])
3005 radeon_crtc_handle_flip(rdev, 2);
3006 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003007 DRM_DEBUG("IH: D3 vblank\n");
3008 }
3009 break;
3010 case 1: /* D3 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003011 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3012 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003013 DRM_DEBUG("IH: D3 vline\n");
3014 }
3015 break;
3016 default:
3017 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3018 break;
3019 }
3020 break;
3021 case 4: /* D4 vblank/vline */
3022 switch (src_data) {
3023 case 0: /* D4 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003024 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3025 if (rdev->irq.crtc_vblank_int[3]) {
3026 drm_handle_vblank(rdev->ddev, 3);
3027 rdev->pm.vblank_sync = true;
3028 wake_up(&rdev->irq.vblank_queue);
3029 }
3030 if (rdev->irq.pflip[3])
3031 radeon_crtc_handle_flip(rdev, 3);
3032 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003033 DRM_DEBUG("IH: D4 vblank\n");
3034 }
3035 break;
3036 case 1: /* D4 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003037 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3038 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003039 DRM_DEBUG("IH: D4 vline\n");
3040 }
3041 break;
3042 default:
3043 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3044 break;
3045 }
3046 break;
3047 case 5: /* D5 vblank/vline */
3048 switch (src_data) {
3049 case 0: /* D5 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003050 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3051 if (rdev->irq.crtc_vblank_int[4]) {
3052 drm_handle_vblank(rdev->ddev, 4);
3053 rdev->pm.vblank_sync = true;
3054 wake_up(&rdev->irq.vblank_queue);
3055 }
3056 if (rdev->irq.pflip[4])
3057 radeon_crtc_handle_flip(rdev, 4);
3058 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003059 DRM_DEBUG("IH: D5 vblank\n");
3060 }
3061 break;
3062 case 1: /* D5 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003063 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3064 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003065 DRM_DEBUG("IH: D5 vline\n");
3066 }
3067 break;
3068 default:
3069 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3070 break;
3071 }
3072 break;
3073 case 6: /* D6 vblank/vline */
3074 switch (src_data) {
3075 case 0: /* D6 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003076 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3077 if (rdev->irq.crtc_vblank_int[5]) {
3078 drm_handle_vblank(rdev->ddev, 5);
3079 rdev->pm.vblank_sync = true;
3080 wake_up(&rdev->irq.vblank_queue);
3081 }
3082 if (rdev->irq.pflip[5])
3083 radeon_crtc_handle_flip(rdev, 5);
3084 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003085 DRM_DEBUG("IH: D6 vblank\n");
3086 }
3087 break;
3088 case 1: /* D6 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003089 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3090 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003091 DRM_DEBUG("IH: D6 vline\n");
3092 }
3093 break;
3094 default:
3095 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3096 break;
3097 }
3098 break;
3099 case 42: /* HPD hotplug */
3100 switch (src_data) {
3101 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003102 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3103 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003104 queue_hotplug = true;
3105 DRM_DEBUG("IH: HPD1\n");
3106 }
3107 break;
3108 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003109 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3110 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003111 queue_hotplug = true;
3112 DRM_DEBUG("IH: HPD2\n");
3113 }
3114 break;
3115 case 2:
Alex Deucher6f34be52010-11-21 10:59:01 -05003116 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3117 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003118 queue_hotplug = true;
3119 DRM_DEBUG("IH: HPD3\n");
3120 }
3121 break;
3122 case 3:
Alex Deucher6f34be52010-11-21 10:59:01 -05003123 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3124 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003125 queue_hotplug = true;
3126 DRM_DEBUG("IH: HPD4\n");
3127 }
3128 break;
3129 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003130 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3131 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003132 queue_hotplug = true;
3133 DRM_DEBUG("IH: HPD5\n");
3134 }
3135 break;
3136 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003137 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3138 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003139 queue_hotplug = true;
3140 DRM_DEBUG("IH: HPD6\n");
3141 }
3142 break;
3143 default:
3144 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3145 break;
3146 }
3147 break;
3148 case 176: /* CP_INT in ring buffer */
3149 case 177: /* CP_INT in IB1 */
3150 case 178: /* CP_INT in IB2 */
3151 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04003152 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003153 break;
3154 case 181: /* CP EOP event */
3155 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher1b370782011-11-17 20:13:28 -05003156 if (rdev->family >= CHIP_CAYMAN) {
3157 switch (src_data) {
3158 case 0:
3159 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3160 break;
3161 case 1:
3162 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3163 break;
3164 case 2:
3165 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3166 break;
3167 }
3168 } else
3169 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003170 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003171 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003172 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003173 rdev->pm.gui_idle = true;
3174 wake_up(&rdev->irq.idle_queue);
3175 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003176 default:
3177 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3178 break;
3179 }
3180
3181 /* wptr/rptr are in bytes! */
3182 rptr += 16;
3183 rptr &= rdev->ih.ptr_mask;
3184 }
3185 /* make sure wptr hasn't changed while processing */
3186 wptr = evergreen_get_ih_wptr(rdev);
3187 if (wptr != rdev->ih.wptr)
3188 goto restart_ih;
3189 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003190 schedule_work(&rdev->hotplug_work);
Alex Deucher45f9a392010-03-24 13:55:51 -04003191 rdev->ih.rptr = rptr;
3192 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3193 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3194 return IRQ_HANDLED;
3195}
3196
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003197static int evergreen_startup(struct radeon_device *rdev)
3198{
Christian Könige32eb502011-10-23 12:56:27 +02003199 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003200 int r;
3201
Alex Deucher9e46a482011-01-06 18:49:35 -05003202 /* enable pcie gen2 link */
Ilija Hadziccd540332011-09-20 10:22:57 -04003203 evergreen_pcie_gen2_enable(rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -05003204
Alex Deucher0af62b02011-01-06 21:19:31 -05003205 if (ASIC_IS_DCE5(rdev)) {
3206 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3207 r = ni_init_microcode(rdev);
3208 if (r) {
3209 DRM_ERROR("Failed to load firmware!\n");
3210 return r;
3211 }
3212 }
Alex Deucher755d8192011-03-02 20:07:34 -05003213 r = ni_mc_load_microcode(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003214 if (r) {
Alex Deucher0af62b02011-01-06 21:19:31 -05003215 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003216 return r;
3217 }
Alex Deucher0af62b02011-01-06 21:19:31 -05003218 } else {
3219 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3220 r = r600_init_microcode(rdev);
3221 if (r) {
3222 DRM_ERROR("Failed to load firmware!\n");
3223 return r;
3224 }
3225 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003226 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003227
Alex Deucher16cdf042011-10-28 10:30:02 -04003228 r = r600_vram_scratch_init(rdev);
3229 if (r)
3230 return r;
3231
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003232 evergreen_mc_program(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003233 if (rdev->flags & RADEON_IS_AGP) {
Alex Deucher0fcdb612010-03-24 13:20:41 -04003234 evergreen_agp_enable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003235 } else {
3236 r = evergreen_pcie_gart_enable(rdev);
3237 if (r)
3238 return r;
3239 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003240 evergreen_gpu_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003241
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003242 r = evergreen_blit_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003243 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003244 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003245 rdev->asic->copy.copy = NULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003246 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003247 }
3248
Alex Deucher724c80e2010-08-27 18:25:25 -04003249 /* allocate wb buffer */
3250 r = radeon_wb_init(rdev);
3251 if (r)
3252 return r;
3253
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003254 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3255 if (r) {
3256 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3257 return r;
3258 }
3259
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003260 /* Enable IRQ */
3261 r = r600_irq_init(rdev);
3262 if (r) {
3263 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3264 radeon_irq_kms_fini(rdev);
3265 return r;
3266 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003267 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003268
Christian Könige32eb502011-10-23 12:56:27 +02003269 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003270 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3271 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003272 if (r)
3273 return r;
3274 r = evergreen_cp_load_microcode(rdev);
3275 if (r)
3276 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003277 r = evergreen_cp_resume(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003278 if (r)
3279 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003280
Jerome Glisseb15ba512011-11-15 11:48:34 -05003281 r = radeon_ib_pool_start(rdev);
3282 if (r)
3283 return r;
3284
Alex Deucherf7128122012-02-23 17:53:45 -05003285 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003286 if (r) {
3287 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3288 rdev->accel_working = false;
Matthijs Kooijman3fe89a02012-02-02 21:23:11 +01003289 return r;
Dave Airlie7a7e8732012-01-03 09:43:28 +00003290 }
3291
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003292 r = r600_audio_init(rdev);
3293 if (r) {
3294 DRM_ERROR("radeon: audio init failed\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -05003295 return r;
3296 }
3297
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003298 return 0;
3299}
3300
3301int evergreen_resume(struct radeon_device *rdev)
3302{
3303 int r;
3304
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003305 /* reset the asic, the gfx blocks are often in a bad state
3306 * after the driver is unloaded or after a resume
3307 */
3308 if (radeon_asic_reset(rdev))
3309 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003310 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3311 * posting will perform necessary task to bring back GPU into good
3312 * shape.
3313 */
3314 /* post card */
3315 atom_asic_init(rdev->mode_info.atom_context);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003316
Jerome Glisseb15ba512011-11-15 11:48:34 -05003317 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003318 r = evergreen_startup(rdev);
3319 if (r) {
Alex Deucher755d8192011-03-02 20:07:34 -05003320 DRM_ERROR("evergreen startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05003321 rdev->accel_working = false;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003322 return r;
3323 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003324
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003325 return r;
3326
3327}
3328
3329int evergreen_suspend(struct radeon_device *rdev)
3330{
Christian Könige32eb502011-10-23 12:56:27 +02003331 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian König7b1f2482011-09-23 15:11:23 +02003332
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003333 r600_audio_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003334 /* FIXME: we should wait for ring to be empty */
Jerome Glisseb15ba512011-11-15 11:48:34 -05003335 radeon_ib_pool_suspend(rdev);
3336 r600_blit_suspend(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003337 r700_cp_stop(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02003338 ring->ready = false;
Alex Deucher45f9a392010-03-24 13:55:51 -04003339 evergreen_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003340 radeon_wb_disable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003341 evergreen_pcie_gart_disable(rdev);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003342
3343 return 0;
3344}
3345
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003346/* Plan is to move initialization in that function and use
3347 * helper function so that radeon_device_init pretty much
3348 * do nothing more than calling asic specific function. This
3349 * should also allow to remove a bunch of callback function
3350 * like vram_info.
3351 */
3352int evergreen_init(struct radeon_device *rdev)
3353{
3354 int r;
3355
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003356 /* This don't do much */
3357 r = radeon_gem_init(rdev);
3358 if (r)
3359 return r;
3360 /* Read BIOS */
3361 if (!radeon_get_bios(rdev)) {
3362 if (ASIC_IS_AVIVO(rdev))
3363 return -EINVAL;
3364 }
3365 /* Must be an ATOMBIOS */
3366 if (!rdev->is_atom_bios) {
Alex Deucher755d8192011-03-02 20:07:34 -05003367 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003368 return -EINVAL;
3369 }
3370 r = radeon_atombios_init(rdev);
3371 if (r)
3372 return r;
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003373 /* reset the asic, the gfx blocks are often in a bad state
3374 * after the driver is unloaded or after a resume
3375 */
3376 if (radeon_asic_reset(rdev))
3377 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003378 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05003379 if (!radeon_card_posted(rdev)) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003380 if (!rdev->bios) {
3381 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3382 return -EINVAL;
3383 }
3384 DRM_INFO("GPU not posted. posting now...\n");
3385 atom_asic_init(rdev->mode_info.atom_context);
3386 }
3387 /* Initialize scratch registers */
3388 r600_scratch_init(rdev);
3389 /* Initialize surface registers */
3390 radeon_surface_init(rdev);
3391 /* Initialize clocks */
3392 radeon_get_clock_info(rdev->ddev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003393 /* Fence driver */
3394 r = radeon_fence_driver_init(rdev);
3395 if (r)
3396 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00003397 /* initialize AGP */
3398 if (rdev->flags & RADEON_IS_AGP) {
3399 r = radeon_agp_init(rdev);
3400 if (r)
3401 radeon_agp_disable(rdev);
3402 }
3403 /* initialize memory controller */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003404 r = evergreen_mc_init(rdev);
3405 if (r)
3406 return r;
3407 /* Memory manager */
3408 r = radeon_bo_init(rdev);
3409 if (r)
3410 return r;
Alex Deucher45f9a392010-03-24 13:55:51 -04003411
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003412 r = radeon_irq_kms_init(rdev);
3413 if (r)
3414 return r;
3415
Christian Könige32eb502011-10-23 12:56:27 +02003416 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3417 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003418
3419 rdev->ih.ring_obj = NULL;
3420 r600_ih_ring_init(rdev, 64 * 1024);
3421
3422 r = r600_pcie_gart_init(rdev);
3423 if (r)
3424 return r;
Alex Deucher0fcdb612010-03-24 13:20:41 -04003425
Jerome Glisseb15ba512011-11-15 11:48:34 -05003426 r = radeon_ib_pool_init(rdev);
Alex Deucher148a03b2010-06-03 19:00:03 -04003427 rdev->accel_working = true;
Jerome Glisseb15ba512011-11-15 11:48:34 -05003428 if (r) {
3429 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3430 rdev->accel_working = false;
3431 }
3432
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003433 r = evergreen_startup(rdev);
3434 if (r) {
Alex Deucherfe251e22010-03-24 13:36:43 -04003435 dev_err(rdev->dev, "disabling GPU acceleration\n");
3436 r700_cp_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04003437 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003438 radeon_wb_fini(rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003439 r100_ib_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04003440 radeon_irq_kms_fini(rdev);
Alex Deucher0fcdb612010-03-24 13:20:41 -04003441 evergreen_pcie_gart_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003442 rdev->accel_working = false;
3443 }
Alex Deucher77e00f22011-12-21 11:58:17 -05003444
3445 /* Don't start up if the MC ucode is missing on BTC parts.
3446 * The default clocks and voltages before the MC ucode
3447 * is loaded are not suffient for advanced operations.
3448 */
3449 if (ASIC_IS_DCE5(rdev)) {
3450 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
3451 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3452 return -EINVAL;
3453 }
3454 }
3455
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003456 return 0;
3457}
3458
3459void evergreen_fini(struct radeon_device *rdev)
3460{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003461 r600_audio_fini(rdev);
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003462 r600_blit_fini(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003463 r700_cp_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003464 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003465 radeon_wb_fini(rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003466 r100_ib_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003467 radeon_irq_kms_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003468 evergreen_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04003469 r600_vram_scratch_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003470 radeon_gem_fini(rdev);
Christian König15d33322011-09-15 19:02:22 +02003471 radeon_semaphore_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003472 radeon_fence_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003473 radeon_agp_fini(rdev);
3474 radeon_bo_fini(rdev);
3475 radeon_atombios_fini(rdev);
3476 kfree(rdev->bios);
3477 rdev->bios = NULL;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003478}
Alex Deucher9e46a482011-01-06 18:49:35 -05003479
Ilija Hadzicb07759b2011-09-20 10:22:58 -04003480void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
Alex Deucher9e46a482011-01-06 18:49:35 -05003481{
3482 u32 link_width_cntl, speed_cntl;
3483
Alex Deucherd42dd572011-01-12 20:05:11 -05003484 if (radeon_pcie_gen2 == 0)
3485 return;
3486
Alex Deucher9e46a482011-01-06 18:49:35 -05003487 if (rdev->flags & RADEON_IS_IGP)
3488 return;
3489
3490 if (!(rdev->flags & RADEON_IS_PCIE))
3491 return;
3492
3493 /* x2 cards have a special sequence */
3494 if (ASIC_IS_X2(rdev))
3495 return;
3496
3497 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3498 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3499 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3500
3501 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3502 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3503 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3504
3505 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3506 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3507 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3508
3509 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3510 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3511 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3512
3513 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3514 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3515 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3516
3517 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3518 speed_cntl |= LC_GEN2_EN_STRAP;
3519 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3520
3521 } else {
3522 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3523 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3524 if (1)
3525 link_width_cntl |= LC_UPCONFIGURE_DIS;
3526 else
3527 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3528 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3529 }
3530}