blob: 3b313f05243d54ef6ba87ea2447e18da0aef0003 [file] [log] [blame]
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050027#include "drmP.h"
28#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000029#include "radeon_asic.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050030#include "radeon_drm.h"
Alex Deucher0fcdb612010-03-24 13:20:41 -040031#include "evergreend.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050032#include "atom.h"
33#include "avivod.h"
34#include "evergreen_reg.h"
Alex Deucher2281a372010-10-21 13:31:38 -040035#include "evergreen_blit_shaders.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050036
Alex Deucherfe251e22010-03-24 13:36:43 -040037#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39
Alex Deucherdfbc8b92012-08-15 17:13:53 -040040static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050050static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040052void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -050053extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050055
Jerome Glisse285484e2011-12-16 17:03:42 -050056void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split)
59{
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
64 switch (*bankw) {
65 default:
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
70 }
71 switch (*bankh) {
72 default:
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
77 }
78 switch (*mtaspect) {
79 default:
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
84 }
85}
86
Alex Deucherd054ac12011-09-01 17:46:15 +000087void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
88{
89 u16 ctl, v;
90 int cap, err;
91
92 cap = pci_pcie_cap(rdev->pdev);
93 if (!cap)
94 return;
95
96 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
97 if (err)
98 return;
99
100 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
101
102 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
103 * to avoid hangs or perfomance issues
104 */
105 if ((v == 0) || (v == 6) || (v == 7)) {
106 ctl &= ~PCI_EXP_DEVCTL_READRQ;
107 ctl |= (2 << 12);
108 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
109 }
110}
111
Alex Deucher3ae19b72012-02-23 17:53:37 -0500112void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
113{
Alex Deucher3ae19b72012-02-23 17:53:37 -0500114 int i;
115
Alex Deucherdfbc8b92012-08-15 17:13:53 -0400116 if (crtc >= rdev->num_crtc)
117 return;
118
119 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
Alex Deucher3ae19b72012-02-23 17:53:37 -0500120 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucherdfbc8b92012-08-15 17:13:53 -0400121 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500122 break;
123 udelay(1);
124 }
125 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucherdfbc8b92012-08-15 17:13:53 -0400126 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
Alex Deucher3ae19b72012-02-23 17:53:37 -0500127 break;
128 udelay(1);
129 }
130 }
131}
132
Alex Deucher6f34be52010-11-21 10:59:01 -0500133void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
134{
Alex Deucher6f34be52010-11-21 10:59:01 -0500135 /* enable the pflip int */
136 radeon_irq_kms_pflip_irq_get(rdev, crtc);
137}
138
139void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
140{
141 /* disable the pflip int */
142 radeon_irq_kms_pflip_irq_put(rdev, crtc);
143}
144
145u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
146{
147 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
148 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500149 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500150
151 /* Lock the graphics update lock */
152 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
153 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
154
155 /* update the scanout addresses */
156 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
157 upper_32_bits(crtc_base));
158 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
159 (u32)crtc_base);
160
161 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
162 upper_32_bits(crtc_base));
163 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
164 (u32)crtc_base);
165
166 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500167 for (i = 0; i < rdev->usec_timeout; i++) {
168 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
169 break;
170 udelay(1);
171 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500172 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
173
174 /* Unlock the lock, so double-buffering can take place inside vblank */
175 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
176 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
177
178 /* Return current update_pending status: */
179 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
180}
181
Alex Deucher21a81222010-07-02 12:58:16 -0400182/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500183int evergreen_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400184{
Alex Deucher1c88d742011-06-14 19:15:53 +0000185 u32 temp, toffset;
186 int actual_temp = 0;
Alex Deucher21a81222010-07-02 12:58:16 -0400187
Alex Deucher67b3f822011-05-25 18:45:37 -0400188 if (rdev->family == CHIP_JUNIPER) {
189 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
190 TOFFSET_SHIFT;
191 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
192 TS0_ADC_DOUT_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400193
Alex Deucher67b3f822011-05-25 18:45:37 -0400194 if (toffset & 0x100)
195 actual_temp = temp / 2 - (0x200 - toffset);
196 else
197 actual_temp = temp / 2 + toffset;
198
199 actual_temp = actual_temp * 1000;
200
201 } else {
202 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
203 ASIC_T_SHIFT;
204
205 if (temp & 0x400)
206 actual_temp = -256;
207 else if (temp & 0x200)
208 actual_temp = 255;
209 else if (temp & 0x100) {
210 actual_temp = temp & 0x1ff;
211 actual_temp |= ~0x1ff;
212 } else
213 actual_temp = temp & 0xff;
214
215 actual_temp = (actual_temp * 1000) / 2;
216 }
217
218 return actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400219}
220
Alex Deucher20d391d2011-02-01 16:12:34 -0500221int sumo_get_temp(struct radeon_device *rdev)
Alex Deuchere33df252010-11-22 17:56:32 -0500222{
223 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
Alex Deucher20d391d2011-02-01 16:12:34 -0500224 int actual_temp = temp - 49;
Alex Deuchere33df252010-11-22 17:56:32 -0500225
226 return actual_temp * 1000;
227}
228
Alex Deuchera4c9e2e2011-11-04 10:09:41 -0400229void sumo_pm_init_profile(struct radeon_device *rdev)
230{
231 int idx;
232
233 /* default */
234 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
235 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
236 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
237 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
238
239 /* low,mid sh/mh */
240 if (rdev->flags & RADEON_IS_MOBILITY)
241 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
242 else
243 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
244
245 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
246 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
247 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
248 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
249
250 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
251 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
252 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
253 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
254
255 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
256 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
257 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
258 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
259
260 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
261 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
262 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
263 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
264
265 /* high sh/mh */
266 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
267 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
268 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
269 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
270 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
271 rdev->pm.power_state[idx].num_clock_modes - 1;
272
273 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
274 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
275 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
276 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
277 rdev->pm.power_state[idx].num_clock_modes - 1;
278}
279
Alex Deucher49e02b72010-04-23 17:57:27 -0400280void evergreen_pm_misc(struct radeon_device *rdev)
281{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400282 int req_ps_idx = rdev->pm.requested_power_state_index;
283 int req_cm_idx = rdev->pm.requested_clock_mode_index;
284 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
285 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher49e02b72010-04-23 17:57:27 -0400286
Alex Deucher2feea492011-04-12 14:49:24 -0400287 if (voltage->type == VOLTAGE_SW) {
Alex Deuchera377e182011-06-20 13:00:31 -0400288 /* 0xff01 is a flag rather then an actual voltage */
289 if (voltage->voltage == 0xff01)
290 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400291 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400292 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400293 rdev->pm.current_vddc = voltage->voltage;
Alex Deucher2feea492011-04-12 14:49:24 -0400294 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
295 }
Alex Deuchera377e182011-06-20 13:00:31 -0400296 /* 0xff01 is a flag rather then an actual voltage */
297 if (voltage->vddci == 0xff01)
298 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400299 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
300 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
301 rdev->pm.current_vddci = voltage->vddci;
302 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
Alex Deucher4d601732010-06-07 18:15:18 -0400303 }
304 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400305}
306
307void evergreen_pm_prepare(struct radeon_device *rdev)
308{
309 struct drm_device *ddev = rdev->ddev;
310 struct drm_crtc *crtc;
311 struct radeon_crtc *radeon_crtc;
312 u32 tmp;
313
314 /* disable any active CRTCs */
315 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
316 radeon_crtc = to_radeon_crtc(crtc);
317 if (radeon_crtc->enabled) {
318 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
319 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
320 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
321 }
322 }
323}
324
325void evergreen_pm_finish(struct radeon_device *rdev)
326{
327 struct drm_device *ddev = rdev->ddev;
328 struct drm_crtc *crtc;
329 struct radeon_crtc *radeon_crtc;
330 u32 tmp;
331
332 /* enable any active CRTCs */
333 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
334 radeon_crtc = to_radeon_crtc(crtc);
335 if (radeon_crtc->enabled) {
336 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
337 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
338 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
339 }
340 }
341}
342
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500343bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
344{
345 bool connected = false;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500346
347 switch (hpd) {
348 case RADEON_HPD_1:
349 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
350 connected = true;
351 break;
352 case RADEON_HPD_2:
353 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
354 connected = true;
355 break;
356 case RADEON_HPD_3:
357 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
358 connected = true;
359 break;
360 case RADEON_HPD_4:
361 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
362 connected = true;
363 break;
364 case RADEON_HPD_5:
365 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
366 connected = true;
367 break;
368 case RADEON_HPD_6:
369 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
370 connected = true;
371 break;
372 default:
373 break;
374 }
375
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500376 return connected;
377}
378
379void evergreen_hpd_set_polarity(struct radeon_device *rdev,
380 enum radeon_hpd_id hpd)
381{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500382 u32 tmp;
383 bool connected = evergreen_hpd_sense(rdev, hpd);
384
385 switch (hpd) {
386 case RADEON_HPD_1:
387 tmp = RREG32(DC_HPD1_INT_CONTROL);
388 if (connected)
389 tmp &= ~DC_HPDx_INT_POLARITY;
390 else
391 tmp |= DC_HPDx_INT_POLARITY;
392 WREG32(DC_HPD1_INT_CONTROL, tmp);
393 break;
394 case RADEON_HPD_2:
395 tmp = RREG32(DC_HPD2_INT_CONTROL);
396 if (connected)
397 tmp &= ~DC_HPDx_INT_POLARITY;
398 else
399 tmp |= DC_HPDx_INT_POLARITY;
400 WREG32(DC_HPD2_INT_CONTROL, tmp);
401 break;
402 case RADEON_HPD_3:
403 tmp = RREG32(DC_HPD3_INT_CONTROL);
404 if (connected)
405 tmp &= ~DC_HPDx_INT_POLARITY;
406 else
407 tmp |= DC_HPDx_INT_POLARITY;
408 WREG32(DC_HPD3_INT_CONTROL, tmp);
409 break;
410 case RADEON_HPD_4:
411 tmp = RREG32(DC_HPD4_INT_CONTROL);
412 if (connected)
413 tmp &= ~DC_HPDx_INT_POLARITY;
414 else
415 tmp |= DC_HPDx_INT_POLARITY;
416 WREG32(DC_HPD4_INT_CONTROL, tmp);
417 break;
418 case RADEON_HPD_5:
419 tmp = RREG32(DC_HPD5_INT_CONTROL);
420 if (connected)
421 tmp &= ~DC_HPDx_INT_POLARITY;
422 else
423 tmp |= DC_HPDx_INT_POLARITY;
424 WREG32(DC_HPD5_INT_CONTROL, tmp);
425 break;
426 case RADEON_HPD_6:
427 tmp = RREG32(DC_HPD6_INT_CONTROL);
428 if (connected)
429 tmp &= ~DC_HPDx_INT_POLARITY;
430 else
431 tmp |= DC_HPDx_INT_POLARITY;
432 WREG32(DC_HPD6_INT_CONTROL, tmp);
433 break;
434 default:
435 break;
436 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500437}
438
439void evergreen_hpd_init(struct radeon_device *rdev)
440{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500441 struct drm_device *dev = rdev->ddev;
442 struct drm_connector *connector;
443 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
444 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500445
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500446 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
447 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
448 switch (radeon_connector->hpd.hpd) {
449 case RADEON_HPD_1:
450 WREG32(DC_HPD1_CONTROL, tmp);
451 rdev->irq.hpd[0] = true;
452 break;
453 case RADEON_HPD_2:
454 WREG32(DC_HPD2_CONTROL, tmp);
455 rdev->irq.hpd[1] = true;
456 break;
457 case RADEON_HPD_3:
458 WREG32(DC_HPD3_CONTROL, tmp);
459 rdev->irq.hpd[2] = true;
460 break;
461 case RADEON_HPD_4:
462 WREG32(DC_HPD4_CONTROL, tmp);
463 rdev->irq.hpd[3] = true;
464 break;
465 case RADEON_HPD_5:
466 WREG32(DC_HPD5_CONTROL, tmp);
467 rdev->irq.hpd[4] = true;
468 break;
469 case RADEON_HPD_6:
470 WREG32(DC_HPD6_CONTROL, tmp);
471 rdev->irq.hpd[5] = true;
472 break;
473 default:
474 break;
475 }
Alex Deucher64912e92011-11-03 11:21:39 -0400476 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500477 }
478 if (rdev->irq.installed)
479 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500480}
481
482void evergreen_hpd_fini(struct radeon_device *rdev)
483{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500484 struct drm_device *dev = rdev->ddev;
485 struct drm_connector *connector;
486
487 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
488 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
489 switch (radeon_connector->hpd.hpd) {
490 case RADEON_HPD_1:
491 WREG32(DC_HPD1_CONTROL, 0);
492 rdev->irq.hpd[0] = false;
493 break;
494 case RADEON_HPD_2:
495 WREG32(DC_HPD2_CONTROL, 0);
496 rdev->irq.hpd[1] = false;
497 break;
498 case RADEON_HPD_3:
499 WREG32(DC_HPD3_CONTROL, 0);
500 rdev->irq.hpd[2] = false;
501 break;
502 case RADEON_HPD_4:
503 WREG32(DC_HPD4_CONTROL, 0);
504 rdev->irq.hpd[3] = false;
505 break;
506 case RADEON_HPD_5:
507 WREG32(DC_HPD5_CONTROL, 0);
508 rdev->irq.hpd[4] = false;
509 break;
510 case RADEON_HPD_6:
511 WREG32(DC_HPD6_CONTROL, 0);
512 rdev->irq.hpd[5] = false;
513 break;
514 default:
515 break;
516 }
517 }
518}
519
Alex Deucherf9d9c362010-10-22 02:51:05 -0400520/* watermark setup */
521
522static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
523 struct radeon_crtc *radeon_crtc,
524 struct drm_display_mode *mode,
525 struct drm_display_mode *other_mode)
526{
Alex Deucher12dfc842011-04-14 19:07:34 -0400527 u32 tmp;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400528 /*
529 * Line Buffer Setup
530 * There are 3 line buffers, each one shared by 2 display controllers.
531 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
532 * the display controllers. The paritioning is done via one of four
533 * preset allocations specified in bits 2:0:
534 * first display controller
535 * 0 - first half of lb (3840 * 2)
536 * 1 - first 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400537 * 2 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400538 * 3 - first 1/4 of lb (1920 * 2)
539 * second display controller
540 * 4 - second half of lb (3840 * 2)
541 * 5 - second 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400542 * 6 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400543 * 7 - last 1/4 of lb (1920 * 2)
544 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400545 /* this can get tricky if we have two large displays on a paired group
546 * of crtcs. Ideally for multiple large displays we'd assign them to
547 * non-linked crtcs for maximum line buffer allocation.
548 */
549 if (radeon_crtc->base.enabled && mode) {
550 if (other_mode)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400551 tmp = 0; /* 1/2 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400552 else
553 tmp = 2; /* whole */
554 } else
555 tmp = 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400556
557 /* second controller of the pair uses second half of the lb */
558 if (radeon_crtc->crtc_id % 2)
559 tmp += 4;
560 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
561
Alex Deucher12dfc842011-04-14 19:07:34 -0400562 if (radeon_crtc->base.enabled && mode) {
563 switch (tmp) {
564 case 0:
565 case 4:
566 default:
567 if (ASIC_IS_DCE5(rdev))
568 return 4096 * 2;
569 else
570 return 3840 * 2;
571 case 1:
572 case 5:
573 if (ASIC_IS_DCE5(rdev))
574 return 6144 * 2;
575 else
576 return 5760 * 2;
577 case 2:
578 case 6:
579 if (ASIC_IS_DCE5(rdev))
580 return 8192 * 2;
581 else
582 return 7680 * 2;
583 case 3:
584 case 7:
585 if (ASIC_IS_DCE5(rdev))
586 return 2048 * 2;
587 else
588 return 1920 * 2;
589 }
Alex Deucherf9d9c362010-10-22 02:51:05 -0400590 }
Alex Deucher12dfc842011-04-14 19:07:34 -0400591
592 /* controller not enabled, so no lb used */
593 return 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400594}
595
Alex Deucherca7db222012-03-20 17:18:30 -0400596u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400597{
598 u32 tmp = RREG32(MC_SHARED_CHMAP);
599
600 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
601 case 0:
602 default:
603 return 1;
604 case 1:
605 return 2;
606 case 2:
607 return 4;
608 case 3:
609 return 8;
610 }
611}
612
613struct evergreen_wm_params {
614 u32 dram_channels; /* number of dram channels */
615 u32 yclk; /* bandwidth per dram data pin in kHz */
616 u32 sclk; /* engine clock in kHz */
617 u32 disp_clk; /* display clock in kHz */
618 u32 src_width; /* viewport width */
619 u32 active_time; /* active display time in ns */
620 u32 blank_time; /* blank time in ns */
621 bool interlaced; /* mode is interlaced */
622 fixed20_12 vsc; /* vertical scale ratio */
623 u32 num_heads; /* number of active crtcs */
624 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
625 u32 lb_size; /* line buffer allocated to pipe */
626 u32 vtaps; /* vertical scaler taps */
627};
628
629static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
630{
631 /* Calculate DRAM Bandwidth and the part allocated to display. */
632 fixed20_12 dram_efficiency; /* 0.7 */
633 fixed20_12 yclk, dram_channels, bandwidth;
634 fixed20_12 a;
635
636 a.full = dfixed_const(1000);
637 yclk.full = dfixed_const(wm->yclk);
638 yclk.full = dfixed_div(yclk, a);
639 dram_channels.full = dfixed_const(wm->dram_channels * 4);
640 a.full = dfixed_const(10);
641 dram_efficiency.full = dfixed_const(7);
642 dram_efficiency.full = dfixed_div(dram_efficiency, a);
643 bandwidth.full = dfixed_mul(dram_channels, yclk);
644 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
645
646 return dfixed_trunc(bandwidth);
647}
648
649static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
650{
651 /* Calculate DRAM Bandwidth and the part allocated to display. */
652 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
653 fixed20_12 yclk, dram_channels, bandwidth;
654 fixed20_12 a;
655
656 a.full = dfixed_const(1000);
657 yclk.full = dfixed_const(wm->yclk);
658 yclk.full = dfixed_div(yclk, a);
659 dram_channels.full = dfixed_const(wm->dram_channels * 4);
660 a.full = dfixed_const(10);
661 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
662 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
663 bandwidth.full = dfixed_mul(dram_channels, yclk);
664 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
665
666 return dfixed_trunc(bandwidth);
667}
668
669static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
670{
671 /* Calculate the display Data return Bandwidth */
672 fixed20_12 return_efficiency; /* 0.8 */
673 fixed20_12 sclk, bandwidth;
674 fixed20_12 a;
675
676 a.full = dfixed_const(1000);
677 sclk.full = dfixed_const(wm->sclk);
678 sclk.full = dfixed_div(sclk, a);
679 a.full = dfixed_const(10);
680 return_efficiency.full = dfixed_const(8);
681 return_efficiency.full = dfixed_div(return_efficiency, a);
682 a.full = dfixed_const(32);
683 bandwidth.full = dfixed_mul(a, sclk);
684 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
685
686 return dfixed_trunc(bandwidth);
687}
688
689static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
690{
691 /* Calculate the DMIF Request Bandwidth */
692 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
693 fixed20_12 disp_clk, bandwidth;
694 fixed20_12 a;
695
696 a.full = dfixed_const(1000);
697 disp_clk.full = dfixed_const(wm->disp_clk);
698 disp_clk.full = dfixed_div(disp_clk, a);
699 a.full = dfixed_const(10);
700 disp_clk_request_efficiency.full = dfixed_const(8);
701 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
702 a.full = dfixed_const(32);
703 bandwidth.full = dfixed_mul(a, disp_clk);
704 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
705
706 return dfixed_trunc(bandwidth);
707}
708
709static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
710{
711 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
712 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
713 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
714 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
715
716 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
717}
718
719static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
720{
721 /* Calculate the display mode Average Bandwidth
722 * DisplayMode should contain the source and destination dimensions,
723 * timing, etc.
724 */
725 fixed20_12 bpp;
726 fixed20_12 line_time;
727 fixed20_12 src_width;
728 fixed20_12 bandwidth;
729 fixed20_12 a;
730
731 a.full = dfixed_const(1000);
732 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
733 line_time.full = dfixed_div(line_time, a);
734 bpp.full = dfixed_const(wm->bytes_per_pixel);
735 src_width.full = dfixed_const(wm->src_width);
736 bandwidth.full = dfixed_mul(src_width, bpp);
737 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
738 bandwidth.full = dfixed_div(bandwidth, line_time);
739
740 return dfixed_trunc(bandwidth);
741}
742
743static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
744{
745 /* First calcualte the latency in ns */
746 u32 mc_latency = 2000; /* 2000 ns. */
747 u32 available_bandwidth = evergreen_available_bandwidth(wm);
748 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
749 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
750 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
751 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
752 (wm->num_heads * cursor_line_pair_return_time);
753 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
754 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
755 fixed20_12 a, b, c;
756
757 if (wm->num_heads == 0)
758 return 0;
759
760 a.full = dfixed_const(2);
761 b.full = dfixed_const(1);
762 if ((wm->vsc.full > a.full) ||
763 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
764 (wm->vtaps >= 5) ||
765 ((wm->vsc.full >= a.full) && wm->interlaced))
766 max_src_lines_per_dst_line = 4;
767 else
768 max_src_lines_per_dst_line = 2;
769
770 a.full = dfixed_const(available_bandwidth);
771 b.full = dfixed_const(wm->num_heads);
772 a.full = dfixed_div(a, b);
773
774 b.full = dfixed_const(1000);
775 c.full = dfixed_const(wm->disp_clk);
776 b.full = dfixed_div(c, b);
777 c.full = dfixed_const(wm->bytes_per_pixel);
778 b.full = dfixed_mul(b, c);
779
780 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
781
782 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
783 b.full = dfixed_const(1000);
784 c.full = dfixed_const(lb_fill_bw);
785 b.full = dfixed_div(c, b);
786 a.full = dfixed_div(a, b);
787 line_fill_time = dfixed_trunc(a);
788
789 if (line_fill_time < wm->active_time)
790 return latency;
791 else
792 return latency + (line_fill_time - wm->active_time);
793
794}
795
796static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
797{
798 if (evergreen_average_bandwidth(wm) <=
799 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
800 return true;
801 else
802 return false;
803};
804
805static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
806{
807 if (evergreen_average_bandwidth(wm) <=
808 (evergreen_available_bandwidth(wm) / wm->num_heads))
809 return true;
810 else
811 return false;
812};
813
814static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
815{
816 u32 lb_partitions = wm->lb_size / wm->src_width;
817 u32 line_time = wm->active_time + wm->blank_time;
818 u32 latency_tolerant_lines;
819 u32 latency_hiding;
820 fixed20_12 a;
821
822 a.full = dfixed_const(1);
823 if (wm->vsc.full > a.full)
824 latency_tolerant_lines = 1;
825 else {
826 if (lb_partitions <= (wm->vtaps + 1))
827 latency_tolerant_lines = 1;
828 else
829 latency_tolerant_lines = 2;
830 }
831
832 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
833
834 if (evergreen_latency_watermark(wm) <= latency_hiding)
835 return true;
836 else
837 return false;
838}
839
840static void evergreen_program_watermarks(struct radeon_device *rdev,
841 struct radeon_crtc *radeon_crtc,
842 u32 lb_size, u32 num_heads)
843{
844 struct drm_display_mode *mode = &radeon_crtc->base.mode;
845 struct evergreen_wm_params wm;
846 u32 pixel_period;
847 u32 line_time = 0;
848 u32 latency_watermark_a = 0, latency_watermark_b = 0;
849 u32 priority_a_mark = 0, priority_b_mark = 0;
850 u32 priority_a_cnt = PRIORITY_OFF;
851 u32 priority_b_cnt = PRIORITY_OFF;
852 u32 pipe_offset = radeon_crtc->crtc_id * 16;
853 u32 tmp, arb_control3;
854 fixed20_12 a, b, c;
855
856 if (radeon_crtc->base.enabled && num_heads && mode) {
857 pixel_period = 1000000 / (u32)mode->clock;
858 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
859 priority_a_cnt = 0;
860 priority_b_cnt = 0;
861
862 wm.yclk = rdev->pm.current_mclk * 10;
863 wm.sclk = rdev->pm.current_sclk * 10;
864 wm.disp_clk = mode->clock;
865 wm.src_width = mode->crtc_hdisplay;
866 wm.active_time = mode->crtc_hdisplay * pixel_period;
867 wm.blank_time = line_time - wm.active_time;
868 wm.interlaced = false;
869 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
870 wm.interlaced = true;
871 wm.vsc = radeon_crtc->vsc;
872 wm.vtaps = 1;
873 if (radeon_crtc->rmx_type != RMX_OFF)
874 wm.vtaps = 2;
875 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
876 wm.lb_size = lb_size;
877 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
878 wm.num_heads = num_heads;
879
880 /* set for high clocks */
881 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
882 /* set for low clocks */
883 /* wm.yclk = low clk; wm.sclk = low clk */
884 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
885
886 /* possibly force display priority to high */
887 /* should really do this at mode validation time... */
888 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
889 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
890 !evergreen_check_latency_hiding(&wm) ||
891 (rdev->disp_priority == 2)) {
Alex Deucher92bdfd42011-08-04 17:28:40 +0000892 DRM_DEBUG_KMS("force priority to high\n");
Alex Deucherf9d9c362010-10-22 02:51:05 -0400893 priority_a_cnt |= PRIORITY_ALWAYS_ON;
894 priority_b_cnt |= PRIORITY_ALWAYS_ON;
895 }
896
897 a.full = dfixed_const(1000);
898 b.full = dfixed_const(mode->clock);
899 b.full = dfixed_div(b, a);
900 c.full = dfixed_const(latency_watermark_a);
901 c.full = dfixed_mul(c, b);
902 c.full = dfixed_mul(c, radeon_crtc->hsc);
903 c.full = dfixed_div(c, a);
904 a.full = dfixed_const(16);
905 c.full = dfixed_div(c, a);
906 priority_a_mark = dfixed_trunc(c);
907 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
908
909 a.full = dfixed_const(1000);
910 b.full = dfixed_const(mode->clock);
911 b.full = dfixed_div(b, a);
912 c.full = dfixed_const(latency_watermark_b);
913 c.full = dfixed_mul(c, b);
914 c.full = dfixed_mul(c, radeon_crtc->hsc);
915 c.full = dfixed_div(c, a);
916 a.full = dfixed_const(16);
917 c.full = dfixed_div(c, a);
918 priority_b_mark = dfixed_trunc(c);
919 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
920 }
921
922 /* select wm A */
923 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
924 tmp = arb_control3;
925 tmp &= ~LATENCY_WATERMARK_MASK(3);
926 tmp |= LATENCY_WATERMARK_MASK(1);
927 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
928 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
929 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
930 LATENCY_HIGH_WATERMARK(line_time)));
931 /* select wm B */
932 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
933 tmp &= ~LATENCY_WATERMARK_MASK(3);
934 tmp |= LATENCY_WATERMARK_MASK(2);
935 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
936 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
937 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
938 LATENCY_HIGH_WATERMARK(line_time)));
939 /* restore original selection */
940 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
941
942 /* write the priority marks */
943 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
944 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
945
946}
947
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500948void evergreen_bandwidth_update(struct radeon_device *rdev)
949{
Alex Deucherf9d9c362010-10-22 02:51:05 -0400950 struct drm_display_mode *mode0 = NULL;
951 struct drm_display_mode *mode1 = NULL;
952 u32 num_heads = 0, lb_size;
953 int i;
954
955 radeon_update_display_priority(rdev);
956
957 for (i = 0; i < rdev->num_crtc; i++) {
958 if (rdev->mode_info.crtcs[i]->base.enabled)
959 num_heads++;
960 }
961 for (i = 0; i < rdev->num_crtc; i += 2) {
962 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
963 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
964 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
965 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
966 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
967 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
968 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500969}
970
Alex Deucherb9952a82011-03-02 20:07:33 -0500971int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500972{
973 unsigned i;
974 u32 tmp;
975
976 for (i = 0; i < rdev->usec_timeout; i++) {
977 /* read MC_STATUS */
978 tmp = RREG32(SRBM_STATUS) & 0x1F00;
979 if (!tmp)
980 return 0;
981 udelay(1);
982 }
983 return -1;
984}
985
986/*
987 * GART
988 */
Alex Deucher0fcdb612010-03-24 13:20:41 -0400989void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
990{
991 unsigned i;
992 u32 tmp;
993
Alex Deucher6f2f48a2010-12-15 11:01:56 -0500994 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
995
Alex Deucher0fcdb612010-03-24 13:20:41 -0400996 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
997 for (i = 0; i < rdev->usec_timeout; i++) {
998 /* read MC_STATUS */
999 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1000 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1001 if (tmp == 2) {
1002 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1003 return;
1004 }
1005 if (tmp) {
1006 return;
1007 }
1008 udelay(1);
1009 }
1010}
1011
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001012int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1013{
1014 u32 tmp;
Alex Deucher0fcdb612010-03-24 13:20:41 -04001015 int r;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001016
Jerome Glissec9a1be92011-11-03 11:16:49 -04001017 if (rdev->gart.robj == NULL) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001018 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1019 return -EINVAL;
1020 }
1021 r = radeon_gart_table_vram_pin(rdev);
1022 if (r)
1023 return r;
Dave Airlie82568562010-02-05 16:00:07 +10001024 radeon_gart_restore(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001025 /* Setup L2 cache */
1026 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1027 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1028 EFFECTIVE_L2_QUEUE_SIZE(7));
1029 WREG32(VM_L2_CNTL2, 0);
1030 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1031 /* Setup TLB control */
1032 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1033 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1034 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1035 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001036 if (rdev->flags & RADEON_IS_IGP) {
1037 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1038 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1039 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1040 } else {
1041 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1042 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1043 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucherfe3777a2012-05-31 18:54:43 -04001044 if ((rdev->family == CHIP_JUNIPER) ||
1045 (rdev->family == CHIP_CYPRESS) ||
1046 (rdev->family == CHIP_HEMLOCK) ||
1047 (rdev->family == CHIP_BARTS))
1048 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001049 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001050 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1051 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1052 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1053 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1054 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1055 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1056 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1057 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1058 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1059 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1060 (u32)(rdev->dummy_page.addr >> 12));
Alex Deucher0fcdb612010-03-24 13:20:41 -04001061 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001062
Alex Deucher0fcdb612010-03-24 13:20:41 -04001063 evergreen_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +00001064 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1065 (unsigned)(rdev->mc.gtt_size >> 20),
1066 (unsigned long long)rdev->gart.table_addr);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001067 rdev->gart.ready = true;
1068 return 0;
1069}
1070
1071void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1072{
1073 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001074
1075 /* Disable all tables */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001076 WREG32(VM_CONTEXT0_CNTL, 0);
1077 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001078
1079 /* Setup L2 cache */
1080 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1081 EFFECTIVE_L2_QUEUE_SIZE(7));
1082 WREG32(VM_L2_CNTL2, 0);
1083 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1084 /* Setup TLB control */
1085 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1086 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1087 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1088 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1089 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1090 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1091 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1092 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -04001093 radeon_gart_table_vram_unpin(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001094}
1095
1096void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1097{
1098 evergreen_pcie_gart_disable(rdev);
1099 radeon_gart_table_vram_free(rdev);
1100 radeon_gart_fini(rdev);
1101}
1102
1103
1104void evergreen_agp_enable(struct radeon_device *rdev)
1105{
1106 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001107
1108 /* Setup L2 cache */
1109 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1110 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1111 EFFECTIVE_L2_QUEUE_SIZE(7));
1112 WREG32(VM_L2_CNTL2, 0);
1113 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1114 /* Setup TLB control */
1115 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1116 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1117 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1118 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1119 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1120 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1121 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1122 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1123 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1124 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1125 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Alex Deucher0fcdb612010-03-24 13:20:41 -04001126 WREG32(VM_CONTEXT0_CNTL, 0);
1127 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001128}
1129
Alex Deucherb9952a82011-03-02 20:07:33 -05001130void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001131{
Alex Deuchera0c246c2012-08-15 17:18:42 -04001132 u32 crtc_enabled, tmp, frame_count, blackout;
1133 int i, j;
1134
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001135 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1136 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001137
Alex Deuchera0c246c2012-08-15 17:18:42 -04001138 /* disable VGA render */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001139 WREG32(VGA_RENDER_CONTROL, 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001140 /* blank the display controllers */
1141 for (i = 0; i < rdev->num_crtc; i++) {
1142 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1143 if (crtc_enabled) {
1144 save->crtc_enabled[i] = true;
1145 if (ASIC_IS_DCE6(rdev)) {
1146 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1147 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1148 radeon_wait_for_vblank(rdev, i);
Alex Deucher10939f32013-04-10 19:08:14 -04001149 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001150 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1151 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1152 }
1153 } else {
1154 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1155 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1156 radeon_wait_for_vblank(rdev, i);
Alex Deucher10939f32013-04-10 19:08:14 -04001157 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001158 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1159 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Alex Deucher10939f32013-04-10 19:08:14 -04001160 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001161 }
1162 }
1163 /* wait for the next frame */
1164 frame_count = radeon_get_vblank_counter(rdev, i);
1165 for (j = 0; j < rdev->usec_timeout; j++) {
1166 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1167 break;
1168 udelay(1);
1169 }
Alex Deucher10939f32013-04-10 19:08:14 -04001170
1171 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1172 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1173 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1174 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
1175 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1176 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1177 save->crtc_enabled[i] = false;
1178 /* ***** */
Alex Deucher86d80952012-11-19 09:11:27 -05001179 } else {
1180 save->crtc_enabled[i] = false;
Alex Deuchera0c246c2012-08-15 17:18:42 -04001181 }
Alex Deucher18007402010-11-22 17:56:28 -05001182 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001183
Alex Deuchera0c246c2012-08-15 17:18:42 -04001184 radeon_mc_wait_for_idle(rdev);
1185
1186 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1187 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1188 /* Block CPU access */
1189 WREG32(BIF_FB_EN, 0);
1190 /* blackout the MC */
1191 blackout &= ~BLACKOUT_MODE_MASK;
1192 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
Alex Deucherb7eff392011-07-08 11:44:56 -04001193 }
Alex Deucher36509fc2013-01-31 09:00:52 -05001194 /* wait for the MC to settle */
1195 udelay(100);
Alex Deucher62d6ec12013-04-10 09:58:42 -04001196
1197 /* lock double buffered regs */
1198 for (i = 0; i < rdev->num_crtc; i++) {
1199 if (save->crtc_enabled[i]) {
1200 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1201 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
1202 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1203 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1204 }
1205 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1206 if (!(tmp & 1)) {
1207 tmp |= 1;
1208 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1209 }
1210 }
1211 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001212}
1213
Alex Deucherb9952a82011-03-02 20:07:33 -05001214void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001215{
Alex Deuchera0c246c2012-08-15 17:18:42 -04001216 u32 tmp, frame_count;
1217 int i, j;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001218
Alex Deuchera0c246c2012-08-15 17:18:42 -04001219 /* update crtc base addresses */
1220 for (i = 0; i < rdev->num_crtc; i++) {
1221 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001222 upper_32_bits(rdev->mc.vram_start));
Alex Deuchera0c246c2012-08-15 17:18:42 -04001223 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001224 upper_32_bits(rdev->mc.vram_start));
Alex Deuchera0c246c2012-08-15 17:18:42 -04001225 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001226 (u32)rdev->mc.vram_start);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001227 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001228 (u32)rdev->mc.vram_start);
Alex Deucherb7eff392011-07-08 11:44:56 -04001229 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001230 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1231 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001232
Alex Deucher62d6ec12013-04-10 09:58:42 -04001233 /* unlock regs and wait for update */
1234 for (i = 0; i < rdev->num_crtc; i++) {
1235 if (save->crtc_enabled[i]) {
1236 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
1237 if ((tmp & 0x3) != 0) {
1238 tmp &= ~0x3;
1239 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1240 }
1241 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1242 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
1243 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1244 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1245 }
1246 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1247 if (tmp & 1) {
1248 tmp &= ~1;
1249 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1250 }
1251 for (j = 0; j < rdev->usec_timeout; j++) {
1252 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1253 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
1254 break;
1255 udelay(1);
1256 }
1257 }
1258 }
1259
Alex Deuchera0c246c2012-08-15 17:18:42 -04001260 /* unblackout the MC */
1261 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1262 tmp &= ~BLACKOUT_MODE_MASK;
1263 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1264 /* allow CPU access */
1265 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1266
1267 for (i = 0; i < rdev->num_crtc; i++) {
1268 if (save->crtc_enabled) {
1269 if (ASIC_IS_DCE6(rdev)) {
1270 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1271 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitec7d3c172013-01-26 11:10:58 -05001272 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001273 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitec7d3c172013-01-26 11:10:58 -05001274 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001275 } else {
1276 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1277 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitec7d3c172013-01-26 11:10:58 -05001278 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001279 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitec7d3c172013-01-26 11:10:58 -05001280 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deuchera0c246c2012-08-15 17:18:42 -04001281 }
1282 /* wait for the next frame */
1283 frame_count = radeon_get_vblank_counter(rdev, i);
1284 for (j = 0; j < rdev->usec_timeout; j++) {
1285 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1286 break;
1287 udelay(1);
1288 }
1289 }
1290 }
1291 /* Unlock vga access */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001292 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1293 mdelay(1);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001294 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1295}
1296
Alex Deucher755d8192011-03-02 20:07:34 -05001297void evergreen_mc_program(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001298{
1299 struct evergreen_mc_save save;
1300 u32 tmp;
1301 int i, j;
1302
1303 /* Initialize HDP */
1304 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1305 WREG32((0x2c14 + j), 0x00000000);
1306 WREG32((0x2c18 + j), 0x00000000);
1307 WREG32((0x2c1c + j), 0x00000000);
1308 WREG32((0x2c20 + j), 0x00000000);
1309 WREG32((0x2c24 + j), 0x00000000);
1310 }
1311 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1312
1313 evergreen_mc_stop(rdev, &save);
1314 if (evergreen_mc_wait_for_idle(rdev)) {
1315 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1316 }
1317 /* Lockout access through VGA aperture*/
1318 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1319 /* Update configuration */
1320 if (rdev->flags & RADEON_IS_AGP) {
1321 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1322 /* VRAM before AGP */
1323 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1324 rdev->mc.vram_start >> 12);
1325 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1326 rdev->mc.gtt_end >> 12);
1327 } else {
1328 /* VRAM after AGP */
1329 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1330 rdev->mc.gtt_start >> 12);
1331 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1332 rdev->mc.vram_end >> 12);
1333 }
1334 } else {
1335 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1336 rdev->mc.vram_start >> 12);
1337 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1338 rdev->mc.vram_end >> 12);
1339 }
Alex Deucher3b9832f2011-11-10 08:59:39 -05001340 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001341 /* llano/ontario only */
1342 if ((rdev->family == CHIP_PALM) ||
1343 (rdev->family == CHIP_SUMO) ||
1344 (rdev->family == CHIP_SUMO2)) {
Alex Deucherb4183e32010-12-15 11:04:10 -05001345 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1346 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1347 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1348 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1349 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001350 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1351 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1352 WREG32(MC_VM_FB_LOCATION, tmp);
1353 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
Alex Deucherc46cb4d2011-01-06 19:12:37 -05001354 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001355 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001356 if (rdev->flags & RADEON_IS_AGP) {
1357 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1358 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1359 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1360 } else {
1361 WREG32(MC_VM_AGP_BASE, 0);
1362 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1363 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1364 }
1365 if (evergreen_mc_wait_for_idle(rdev)) {
1366 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1367 }
1368 evergreen_mc_resume(rdev, &save);
1369 /* we need to own VRAM, so turn off the VGA renderer here
1370 * to stop it overwriting our objects */
1371 rv515_vga_render_disable(rdev);
1372}
1373
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001374/*
1375 * CP.
1376 */
Alex Deucher12920592011-02-02 12:37:40 -05001377void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1378{
Christian Könige32eb502011-10-23 12:56:27 +02001379 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Christian König7b1f2482011-09-23 15:11:23 +02001380
Alex Deucher12920592011-02-02 12:37:40 -05001381 /* set to DX10/11 mode */
Christian Könige32eb502011-10-23 12:56:27 +02001382 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1383 radeon_ring_write(ring, 1);
Alex Deucher12920592011-02-02 12:37:40 -05001384 /* FIXME: implement */
Christian Könige32eb502011-10-23 12:56:27 +02001385 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1386 radeon_ring_write(ring,
Alex Deucher0f234f52011-02-13 19:06:33 -05001387#ifdef __BIG_ENDIAN
1388 (2 << 0) |
1389#endif
1390 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02001391 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1392 radeon_ring_write(ring, ib->length_dw);
Alex Deucher12920592011-02-02 12:37:40 -05001393}
1394
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001395
1396static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1397{
Alex Deucherfe251e22010-03-24 13:36:43 -04001398 const __be32 *fw_data;
1399 int i;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001400
Alex Deucherfe251e22010-03-24 13:36:43 -04001401 if (!rdev->me_fw || !rdev->pfp_fw)
1402 return -EINVAL;
1403
1404 r700_cp_stop(rdev);
Alex Deucher0f234f52011-02-13 19:06:33 -05001405 WREG32(CP_RB_CNTL,
1406#ifdef __BIG_ENDIAN
1407 BUF_SWAP_32BIT |
1408#endif
1409 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Alex Deucherfe251e22010-03-24 13:36:43 -04001410
1411 fw_data = (const __be32 *)rdev->pfp_fw->data;
1412 WREG32(CP_PFP_UCODE_ADDR, 0);
1413 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1414 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1415 WREG32(CP_PFP_UCODE_ADDR, 0);
1416
1417 fw_data = (const __be32 *)rdev->me_fw->data;
1418 WREG32(CP_ME_RAM_WADDR, 0);
1419 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1420 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1421
1422 WREG32(CP_PFP_UCODE_ADDR, 0);
1423 WREG32(CP_ME_RAM_WADDR, 0);
1424 WREG32(CP_ME_RAM_RADDR, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001425 return 0;
1426}
1427
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001428static int evergreen_cp_start(struct radeon_device *rdev)
1429{
Christian Könige32eb502011-10-23 12:56:27 +02001430 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher2281a372010-10-21 13:31:38 -04001431 int r, i;
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001432 uint32_t cp_me;
1433
Christian Könige32eb502011-10-23 12:56:27 +02001434 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001435 if (r) {
1436 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1437 return r;
1438 }
Christian Könige32eb502011-10-23 12:56:27 +02001439 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1440 radeon_ring_write(ring, 0x1);
1441 radeon_ring_write(ring, 0x0);
1442 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1443 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1444 radeon_ring_write(ring, 0);
1445 radeon_ring_write(ring, 0);
1446 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001447
1448 cp_me = 0xff;
1449 WREG32(CP_ME_CNTL, cp_me);
1450
Christian Könige32eb502011-10-23 12:56:27 +02001451 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001452 if (r) {
1453 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1454 return r;
1455 }
Alex Deucher2281a372010-10-21 13:31:38 -04001456
1457 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001458 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1459 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001460
1461 for (i = 0; i < evergreen_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001462 radeon_ring_write(ring, evergreen_default_state[i]);
Alex Deucher2281a372010-10-21 13:31:38 -04001463
Christian Könige32eb502011-10-23 12:56:27 +02001464 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1465 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001466
1467 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001468 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1469 radeon_ring_write(ring, 0);
Alex Deucher2281a372010-10-21 13:31:38 -04001470
1471 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001472 radeon_ring_write(ring, 0xc0026f00);
1473 radeon_ring_write(ring, 0x00000000);
1474 radeon_ring_write(ring, 0x00000000);
1475 radeon_ring_write(ring, 0x00000000);
Alex Deucher2281a372010-10-21 13:31:38 -04001476
1477 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001478 radeon_ring_write(ring, 0xc0036f00);
1479 radeon_ring_write(ring, 0x00000bc4);
1480 radeon_ring_write(ring, 0xffffffff);
1481 radeon_ring_write(ring, 0xffffffff);
1482 radeon_ring_write(ring, 0xffffffff);
Alex Deucher2281a372010-10-21 13:31:38 -04001483
Christian Könige32eb502011-10-23 12:56:27 +02001484 radeon_ring_write(ring, 0xc0026900);
1485 radeon_ring_write(ring, 0x00000316);
1486 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1487 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher18ff84d2011-02-02 12:37:41 -05001488
Christian Könige32eb502011-10-23 12:56:27 +02001489 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001490
1491 return 0;
1492}
1493
Alex Deucherfe251e22010-03-24 13:36:43 -04001494int evergreen_cp_resume(struct radeon_device *rdev)
1495{
Christian Könige32eb502011-10-23 12:56:27 +02001496 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -04001497 u32 tmp;
1498 u32 rb_bufsz;
1499 int r;
1500
1501 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1502 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1503 SOFT_RESET_PA |
1504 SOFT_RESET_SH |
1505 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001506 SOFT_RESET_SPI |
Alex Deucherfe251e22010-03-24 13:36:43 -04001507 SOFT_RESET_SX));
1508 RREG32(GRBM_SOFT_RESET);
1509 mdelay(15);
1510 WREG32(GRBM_SOFT_RESET, 0);
1511 RREG32(GRBM_SOFT_RESET);
1512
1513 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02001514 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04001515 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucherfe251e22010-03-24 13:36:43 -04001516#ifdef __BIG_ENDIAN
1517 tmp |= BUF_SWAP_32BIT;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001518#endif
Alex Deucherfe251e22010-03-24 13:36:43 -04001519 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02001520 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f12012-01-20 14:47:43 -05001521 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucherfe251e22010-03-24 13:36:43 -04001522
1523 /* Set the write pointer delay */
1524 WREG32(CP_RB_WPTR_DELAY, 0);
1525
1526 /* Initialize the ring buffer's read and write pointers */
1527 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1528 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02001529 ring->wptr = 0;
1530 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04001531
1532 /* set the wb address wether it's enabled or not */
Alex Deucher0f234f52011-02-13 19:06:33 -05001533 WREG32(CP_RB_RPTR_ADDR,
Alex Deucher0f234f52011-02-13 19:06:33 -05001534 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04001535 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1536 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1537
1538 if (rdev->wb.enabled)
1539 WREG32(SCRATCH_UMSK, 0xff);
1540 else {
1541 tmp |= RB_NO_UPDATE;
1542 WREG32(SCRATCH_UMSK, 0);
1543 }
1544
Alex Deucherfe251e22010-03-24 13:36:43 -04001545 mdelay(1);
1546 WREG32(CP_RB_CNTL, tmp);
1547
Christian Könige32eb502011-10-23 12:56:27 +02001548 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Alex Deucherfe251e22010-03-24 13:36:43 -04001549 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1550
Christian Könige32eb502011-10-23 12:56:27 +02001551 ring->rptr = RREG32(CP_RB_RPTR);
Alex Deucherfe251e22010-03-24 13:36:43 -04001552
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001553 evergreen_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001554 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05001555 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Alex Deucherfe251e22010-03-24 13:36:43 -04001556 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001557 ring->ready = false;
Alex Deucherfe251e22010-03-24 13:36:43 -04001558 return r;
1559 }
1560 return 0;
1561}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001562
1563/*
1564 * Core functions
1565 */
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001566static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1567 u32 num_tile_pipes,
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001568 u32 num_backends,
1569 u32 backend_disable_mask)
1570{
1571 u32 backend_map = 0;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001572 u32 enabled_backends_mask = 0;
1573 u32 enabled_backends_count = 0;
1574 u32 cur_pipe;
1575 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1576 u32 cur_backend = 0;
1577 u32 i;
1578 bool force_no_swizzle;
1579
1580 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1581 num_tile_pipes = EVERGREEN_MAX_PIPES;
1582 if (num_tile_pipes < 1)
1583 num_tile_pipes = 1;
1584 if (num_backends > EVERGREEN_MAX_BACKENDS)
1585 num_backends = EVERGREEN_MAX_BACKENDS;
1586 if (num_backends < 1)
1587 num_backends = 1;
1588
1589 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1590 if (((backend_disable_mask >> i) & 1) == 0) {
1591 enabled_backends_mask |= (1 << i);
1592 ++enabled_backends_count;
1593 }
1594 if (enabled_backends_count == num_backends)
1595 break;
1596 }
1597
1598 if (enabled_backends_count == 0) {
1599 enabled_backends_mask = 1;
1600 enabled_backends_count = 1;
1601 }
1602
1603 if (enabled_backends_count != num_backends)
1604 num_backends = enabled_backends_count;
1605
1606 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1607 switch (rdev->family) {
1608 case CHIP_CEDAR:
1609 case CHIP_REDWOOD:
Alex Deucherd5e455e2010-11-22 17:56:29 -05001610 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04001611 case CHIP_SUMO:
1612 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05001613 case CHIP_TURKS:
1614 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001615 force_no_swizzle = false;
1616 break;
1617 case CHIP_CYPRESS:
1618 case CHIP_HEMLOCK:
1619 case CHIP_JUNIPER:
Alex Deucheradb68fa2011-01-06 21:19:24 -05001620 case CHIP_BARTS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001621 default:
1622 force_no_swizzle = true;
1623 break;
1624 }
1625 if (force_no_swizzle) {
1626 bool last_backend_enabled = false;
1627
1628 force_no_swizzle = false;
1629 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1630 if (((enabled_backends_mask >> i) & 1) == 1) {
1631 if (last_backend_enabled)
1632 force_no_swizzle = true;
1633 last_backend_enabled = true;
1634 } else
1635 last_backend_enabled = false;
1636 }
1637 }
1638
1639 switch (num_tile_pipes) {
1640 case 1:
1641 case 3:
1642 case 5:
1643 case 7:
1644 DRM_ERROR("odd number of pipes!\n");
1645 break;
1646 case 2:
1647 swizzle_pipe[0] = 0;
1648 swizzle_pipe[1] = 1;
1649 break;
1650 case 4:
1651 if (force_no_swizzle) {
1652 swizzle_pipe[0] = 0;
1653 swizzle_pipe[1] = 1;
1654 swizzle_pipe[2] = 2;
1655 swizzle_pipe[3] = 3;
1656 } else {
1657 swizzle_pipe[0] = 0;
1658 swizzle_pipe[1] = 2;
1659 swizzle_pipe[2] = 1;
1660 swizzle_pipe[3] = 3;
1661 }
1662 break;
1663 case 6:
1664 if (force_no_swizzle) {
1665 swizzle_pipe[0] = 0;
1666 swizzle_pipe[1] = 1;
1667 swizzle_pipe[2] = 2;
1668 swizzle_pipe[3] = 3;
1669 swizzle_pipe[4] = 4;
1670 swizzle_pipe[5] = 5;
1671 } else {
1672 swizzle_pipe[0] = 0;
1673 swizzle_pipe[1] = 2;
1674 swizzle_pipe[2] = 4;
1675 swizzle_pipe[3] = 1;
1676 swizzle_pipe[4] = 3;
1677 swizzle_pipe[5] = 5;
1678 }
1679 break;
1680 case 8:
1681 if (force_no_swizzle) {
1682 swizzle_pipe[0] = 0;
1683 swizzle_pipe[1] = 1;
1684 swizzle_pipe[2] = 2;
1685 swizzle_pipe[3] = 3;
1686 swizzle_pipe[4] = 4;
1687 swizzle_pipe[5] = 5;
1688 swizzle_pipe[6] = 6;
1689 swizzle_pipe[7] = 7;
1690 } else {
1691 swizzle_pipe[0] = 0;
1692 swizzle_pipe[1] = 2;
1693 swizzle_pipe[2] = 4;
1694 swizzle_pipe[3] = 6;
1695 swizzle_pipe[4] = 1;
1696 swizzle_pipe[5] = 3;
1697 swizzle_pipe[6] = 5;
1698 swizzle_pipe[7] = 7;
1699 }
1700 break;
1701 }
1702
1703 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1704 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1705 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1706
1707 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1708
1709 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1710 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001711
1712 return backend_map;
1713}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001714
1715static void evergreen_gpu_init(struct radeon_device *rdev)
1716{
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001717 u32 cc_rb_backend_disable = 0;
1718 u32 cc_gc_shader_pipe_config;
1719 u32 gb_addr_config = 0;
1720 u32 mc_shared_chmap, mc_arb_ramcfg;
1721 u32 gb_backend_map;
1722 u32 grbm_gfx_index;
1723 u32 sx_debug_1;
1724 u32 smx_dc_ctl0;
1725 u32 sq_config;
1726 u32 sq_lds_resource_mgmt;
1727 u32 sq_gpr_resource_mgmt_1;
1728 u32 sq_gpr_resource_mgmt_2;
1729 u32 sq_gpr_resource_mgmt_3;
1730 u32 sq_thread_resource_mgmt;
1731 u32 sq_thread_resource_mgmt_2;
1732 u32 sq_stack_resource_mgmt_1;
1733 u32 sq_stack_resource_mgmt_2;
1734 u32 sq_stack_resource_mgmt_3;
1735 u32 vgt_cache_invalidation;
Alex Deucherf25a5c62011-05-19 11:07:57 -04001736 u32 hdp_host_path_cntl, tmp;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001737 int i, j, num_shader_engines, ps_thread_count;
1738
1739 switch (rdev->family) {
1740 case CHIP_CYPRESS:
1741 case CHIP_HEMLOCK:
1742 rdev->config.evergreen.num_ses = 2;
1743 rdev->config.evergreen.max_pipes = 4;
1744 rdev->config.evergreen.max_tile_pipes = 8;
1745 rdev->config.evergreen.max_simds = 10;
1746 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1747 rdev->config.evergreen.max_gprs = 256;
1748 rdev->config.evergreen.max_threads = 248;
1749 rdev->config.evergreen.max_gs_threads = 32;
1750 rdev->config.evergreen.max_stack_entries = 512;
1751 rdev->config.evergreen.sx_num_of_sets = 4;
1752 rdev->config.evergreen.sx_max_export_size = 256;
1753 rdev->config.evergreen.sx_max_export_pos_size = 64;
1754 rdev->config.evergreen.sx_max_export_smx_size = 192;
1755 rdev->config.evergreen.max_hw_contexts = 8;
1756 rdev->config.evergreen.sq_num_cf_insts = 2;
1757
1758 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1759 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1760 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1761 break;
1762 case CHIP_JUNIPER:
1763 rdev->config.evergreen.num_ses = 1;
1764 rdev->config.evergreen.max_pipes = 4;
1765 rdev->config.evergreen.max_tile_pipes = 4;
1766 rdev->config.evergreen.max_simds = 10;
1767 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1768 rdev->config.evergreen.max_gprs = 256;
1769 rdev->config.evergreen.max_threads = 248;
1770 rdev->config.evergreen.max_gs_threads = 32;
1771 rdev->config.evergreen.max_stack_entries = 512;
1772 rdev->config.evergreen.sx_num_of_sets = 4;
1773 rdev->config.evergreen.sx_max_export_size = 256;
1774 rdev->config.evergreen.sx_max_export_pos_size = 64;
1775 rdev->config.evergreen.sx_max_export_smx_size = 192;
1776 rdev->config.evergreen.max_hw_contexts = 8;
1777 rdev->config.evergreen.sq_num_cf_insts = 2;
1778
1779 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1780 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1781 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1782 break;
1783 case CHIP_REDWOOD:
1784 rdev->config.evergreen.num_ses = 1;
1785 rdev->config.evergreen.max_pipes = 4;
1786 rdev->config.evergreen.max_tile_pipes = 4;
1787 rdev->config.evergreen.max_simds = 5;
1788 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1789 rdev->config.evergreen.max_gprs = 256;
1790 rdev->config.evergreen.max_threads = 248;
1791 rdev->config.evergreen.max_gs_threads = 32;
1792 rdev->config.evergreen.max_stack_entries = 256;
1793 rdev->config.evergreen.sx_num_of_sets = 4;
1794 rdev->config.evergreen.sx_max_export_size = 256;
1795 rdev->config.evergreen.sx_max_export_pos_size = 64;
1796 rdev->config.evergreen.sx_max_export_smx_size = 192;
1797 rdev->config.evergreen.max_hw_contexts = 8;
1798 rdev->config.evergreen.sq_num_cf_insts = 2;
1799
1800 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1801 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1802 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1803 break;
1804 case CHIP_CEDAR:
1805 default:
1806 rdev->config.evergreen.num_ses = 1;
1807 rdev->config.evergreen.max_pipes = 2;
1808 rdev->config.evergreen.max_tile_pipes = 2;
1809 rdev->config.evergreen.max_simds = 2;
1810 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1811 rdev->config.evergreen.max_gprs = 256;
1812 rdev->config.evergreen.max_threads = 192;
1813 rdev->config.evergreen.max_gs_threads = 16;
1814 rdev->config.evergreen.max_stack_entries = 256;
1815 rdev->config.evergreen.sx_num_of_sets = 4;
1816 rdev->config.evergreen.sx_max_export_size = 128;
1817 rdev->config.evergreen.sx_max_export_pos_size = 32;
1818 rdev->config.evergreen.sx_max_export_smx_size = 96;
1819 rdev->config.evergreen.max_hw_contexts = 4;
1820 rdev->config.evergreen.sq_num_cf_insts = 1;
1821
1822 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1823 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1824 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1825 break;
Alex Deucherd5e455e2010-11-22 17:56:29 -05001826 case CHIP_PALM:
1827 rdev->config.evergreen.num_ses = 1;
1828 rdev->config.evergreen.max_pipes = 2;
1829 rdev->config.evergreen.max_tile_pipes = 2;
1830 rdev->config.evergreen.max_simds = 2;
1831 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1832 rdev->config.evergreen.max_gprs = 256;
1833 rdev->config.evergreen.max_threads = 192;
1834 rdev->config.evergreen.max_gs_threads = 16;
1835 rdev->config.evergreen.max_stack_entries = 256;
1836 rdev->config.evergreen.sx_num_of_sets = 4;
1837 rdev->config.evergreen.sx_max_export_size = 128;
1838 rdev->config.evergreen.sx_max_export_pos_size = 32;
1839 rdev->config.evergreen.sx_max_export_smx_size = 96;
1840 rdev->config.evergreen.max_hw_contexts = 4;
1841 rdev->config.evergreen.sq_num_cf_insts = 1;
1842
1843 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1844 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1845 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1846 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04001847 case CHIP_SUMO:
1848 rdev->config.evergreen.num_ses = 1;
1849 rdev->config.evergreen.max_pipes = 4;
1850 rdev->config.evergreen.max_tile_pipes = 2;
1851 if (rdev->pdev->device == 0x9648)
1852 rdev->config.evergreen.max_simds = 3;
1853 else if ((rdev->pdev->device == 0x9647) ||
1854 (rdev->pdev->device == 0x964a))
1855 rdev->config.evergreen.max_simds = 4;
1856 else
1857 rdev->config.evergreen.max_simds = 5;
1858 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1859 rdev->config.evergreen.max_gprs = 256;
1860 rdev->config.evergreen.max_threads = 248;
1861 rdev->config.evergreen.max_gs_threads = 32;
1862 rdev->config.evergreen.max_stack_entries = 256;
1863 rdev->config.evergreen.sx_num_of_sets = 4;
1864 rdev->config.evergreen.sx_max_export_size = 256;
1865 rdev->config.evergreen.sx_max_export_pos_size = 64;
1866 rdev->config.evergreen.sx_max_export_smx_size = 192;
1867 rdev->config.evergreen.max_hw_contexts = 8;
1868 rdev->config.evergreen.sq_num_cf_insts = 2;
1869
1870 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1871 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1872 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1873 break;
1874 case CHIP_SUMO2:
1875 rdev->config.evergreen.num_ses = 1;
1876 rdev->config.evergreen.max_pipes = 4;
1877 rdev->config.evergreen.max_tile_pipes = 4;
1878 rdev->config.evergreen.max_simds = 2;
1879 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1880 rdev->config.evergreen.max_gprs = 256;
1881 rdev->config.evergreen.max_threads = 248;
1882 rdev->config.evergreen.max_gs_threads = 32;
1883 rdev->config.evergreen.max_stack_entries = 512;
1884 rdev->config.evergreen.sx_num_of_sets = 4;
1885 rdev->config.evergreen.sx_max_export_size = 256;
1886 rdev->config.evergreen.sx_max_export_pos_size = 64;
1887 rdev->config.evergreen.sx_max_export_smx_size = 192;
1888 rdev->config.evergreen.max_hw_contexts = 8;
1889 rdev->config.evergreen.sq_num_cf_insts = 2;
1890
1891 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1892 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1893 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1894 break;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001895 case CHIP_BARTS:
1896 rdev->config.evergreen.num_ses = 2;
1897 rdev->config.evergreen.max_pipes = 4;
1898 rdev->config.evergreen.max_tile_pipes = 8;
1899 rdev->config.evergreen.max_simds = 7;
1900 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1901 rdev->config.evergreen.max_gprs = 256;
1902 rdev->config.evergreen.max_threads = 248;
1903 rdev->config.evergreen.max_gs_threads = 32;
1904 rdev->config.evergreen.max_stack_entries = 512;
1905 rdev->config.evergreen.sx_num_of_sets = 4;
1906 rdev->config.evergreen.sx_max_export_size = 256;
1907 rdev->config.evergreen.sx_max_export_pos_size = 64;
1908 rdev->config.evergreen.sx_max_export_smx_size = 192;
1909 rdev->config.evergreen.max_hw_contexts = 8;
1910 rdev->config.evergreen.sq_num_cf_insts = 2;
1911
1912 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1913 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1914 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1915 break;
1916 case CHIP_TURKS:
1917 rdev->config.evergreen.num_ses = 1;
1918 rdev->config.evergreen.max_pipes = 4;
1919 rdev->config.evergreen.max_tile_pipes = 4;
1920 rdev->config.evergreen.max_simds = 6;
1921 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1922 rdev->config.evergreen.max_gprs = 256;
1923 rdev->config.evergreen.max_threads = 248;
1924 rdev->config.evergreen.max_gs_threads = 32;
1925 rdev->config.evergreen.max_stack_entries = 256;
1926 rdev->config.evergreen.sx_num_of_sets = 4;
1927 rdev->config.evergreen.sx_max_export_size = 256;
1928 rdev->config.evergreen.sx_max_export_pos_size = 64;
1929 rdev->config.evergreen.sx_max_export_smx_size = 192;
1930 rdev->config.evergreen.max_hw_contexts = 8;
1931 rdev->config.evergreen.sq_num_cf_insts = 2;
1932
1933 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1934 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1935 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1936 break;
1937 case CHIP_CAICOS:
1938 rdev->config.evergreen.num_ses = 1;
1939 rdev->config.evergreen.max_pipes = 4;
1940 rdev->config.evergreen.max_tile_pipes = 2;
1941 rdev->config.evergreen.max_simds = 2;
1942 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1943 rdev->config.evergreen.max_gprs = 256;
1944 rdev->config.evergreen.max_threads = 192;
1945 rdev->config.evergreen.max_gs_threads = 16;
1946 rdev->config.evergreen.max_stack_entries = 256;
1947 rdev->config.evergreen.sx_num_of_sets = 4;
1948 rdev->config.evergreen.sx_max_export_size = 128;
1949 rdev->config.evergreen.sx_max_export_pos_size = 32;
1950 rdev->config.evergreen.sx_max_export_smx_size = 96;
1951 rdev->config.evergreen.max_hw_contexts = 4;
1952 rdev->config.evergreen.sq_num_cf_insts = 1;
1953
1954 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1955 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1956 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1957 break;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001958 }
1959
1960 /* Initialize HDP */
1961 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1962 WREG32((0x2c14 + j), 0x00000000);
1963 WREG32((0x2c18 + j), 0x00000000);
1964 WREG32((0x2c1c + j), 0x00000000);
1965 WREG32((0x2c20 + j), 0x00000000);
1966 WREG32((0x2c24 + j), 0x00000000);
1967 }
1968
1969 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1970
Alex Deucherd054ac12011-09-01 17:46:15 +00001971 evergreen_fix_pci_max_read_req_size(rdev);
1972
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001973 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1974
1975 cc_gc_shader_pipe_config |=
1976 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1977 & EVERGREEN_MAX_PIPES_MASK);
1978 cc_gc_shader_pipe_config |=
1979 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1980 & EVERGREEN_MAX_SIMDS_MASK);
1981
1982 cc_rb_backend_disable =
1983 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1984 & EVERGREEN_MAX_BACKENDS_MASK);
1985
1986
1987 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001988 if ((rdev->family == CHIP_PALM) ||
1989 (rdev->family == CHIP_SUMO) ||
1990 (rdev->family == CHIP_SUMO2))
Alex Deucherd9282fc2011-05-11 03:15:24 -04001991 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
1992 else
1993 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001994
1995 switch (rdev->config.evergreen.max_tile_pipes) {
1996 case 1:
1997 default:
1998 gb_addr_config |= NUM_PIPES(0);
1999 break;
2000 case 2:
2001 gb_addr_config |= NUM_PIPES(1);
2002 break;
2003 case 4:
2004 gb_addr_config |= NUM_PIPES(2);
2005 break;
2006 case 8:
2007 gb_addr_config |= NUM_PIPES(3);
2008 break;
2009 }
2010
2011 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2012 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
2013 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
2014 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
2015 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
2016 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
2017
2018 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
2019 gb_addr_config |= ROW_SIZE(2);
2020 else
2021 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
2022
2023 if (rdev->ddev->pdev->device == 0x689e) {
2024 u32 efuse_straps_4;
2025 u32 efuse_straps_3;
2026 u8 efuse_box_bit_131_124;
2027
2028 WREG32(RCU_IND_INDEX, 0x204);
2029 efuse_straps_4 = RREG32(RCU_IND_DATA);
2030 WREG32(RCU_IND_INDEX, 0x203);
2031 efuse_straps_3 = RREG32(RCU_IND_DATA);
2032 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
2033
2034 switch(efuse_box_bit_131_124) {
2035 case 0x00:
2036 gb_backend_map = 0x76543210;
2037 break;
2038 case 0x55:
2039 gb_backend_map = 0x77553311;
2040 break;
2041 case 0x56:
2042 gb_backend_map = 0x77553300;
2043 break;
2044 case 0x59:
2045 gb_backend_map = 0x77552211;
2046 break;
2047 case 0x66:
2048 gb_backend_map = 0x77443300;
2049 break;
2050 case 0x99:
2051 gb_backend_map = 0x66552211;
2052 break;
2053 case 0x5a:
2054 gb_backend_map = 0x77552200;
2055 break;
2056 case 0xaa:
2057 gb_backend_map = 0x66442200;
2058 break;
2059 case 0x95:
2060 gb_backend_map = 0x66553311;
2061 break;
2062 default:
2063 DRM_ERROR("bad backend map, using default\n");
2064 gb_backend_map =
2065 evergreen_get_tile_pipe_to_backend_map(rdev,
2066 rdev->config.evergreen.max_tile_pipes,
2067 rdev->config.evergreen.max_backends,
2068 ((EVERGREEN_MAX_BACKENDS_MASK <<
2069 rdev->config.evergreen.max_backends) &
2070 EVERGREEN_MAX_BACKENDS_MASK));
2071 break;
2072 }
2073 } else if (rdev->ddev->pdev->device == 0x68b9) {
2074 u32 efuse_straps_3;
2075 u8 efuse_box_bit_127_124;
2076
2077 WREG32(RCU_IND_INDEX, 0x203);
2078 efuse_straps_3 = RREG32(RCU_IND_DATA);
Alex Deucherd31dba52010-10-11 12:41:32 -04002079 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002080
2081 switch(efuse_box_bit_127_124) {
2082 case 0x0:
2083 gb_backend_map = 0x00003210;
2084 break;
2085 case 0x5:
2086 case 0x6:
2087 case 0x9:
2088 case 0xa:
2089 gb_backend_map = 0x00003311;
2090 break;
2091 default:
2092 DRM_ERROR("bad backend map, using default\n");
2093 gb_backend_map =
2094 evergreen_get_tile_pipe_to_backend_map(rdev,
2095 rdev->config.evergreen.max_tile_pipes,
2096 rdev->config.evergreen.max_backends,
2097 ((EVERGREEN_MAX_BACKENDS_MASK <<
2098 rdev->config.evergreen.max_backends) &
2099 EVERGREEN_MAX_BACKENDS_MASK));
2100 break;
2101 }
Alex Deucherb741be82010-09-09 19:15:23 -04002102 } else {
2103 switch (rdev->family) {
2104 case CHIP_CYPRESS:
2105 case CHIP_HEMLOCK:
Alex Deucher03f40092011-01-06 21:19:25 -05002106 case CHIP_BARTS:
Alex Deucherb741be82010-09-09 19:15:23 -04002107 gb_backend_map = 0x66442200;
2108 break;
2109 case CHIP_JUNIPER:
Alex Deucher9a4a0b92011-07-11 19:45:32 +00002110 gb_backend_map = 0x00002200;
Alex Deucherb741be82010-09-09 19:15:23 -04002111 break;
2112 default:
2113 gb_backend_map =
2114 evergreen_get_tile_pipe_to_backend_map(rdev,
2115 rdev->config.evergreen.max_tile_pipes,
2116 rdev->config.evergreen.max_backends,
2117 ((EVERGREEN_MAX_BACKENDS_MASK <<
2118 rdev->config.evergreen.max_backends) &
2119 EVERGREEN_MAX_BACKENDS_MASK));
2120 }
2121 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002122
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002123 /* setup tiling info dword. gb_addr_config is not adequate since it does
2124 * not have bank info, so create a custom tiling dword.
2125 * bits 3:0 num_pipes
2126 * bits 7:4 num_banks
2127 * bits 11:8 group_size
2128 * bits 15:12 row_size
2129 */
2130 rdev->config.evergreen.tile_config = 0;
2131 switch (rdev->config.evergreen.max_tile_pipes) {
2132 case 1:
2133 default:
2134 rdev->config.evergreen.tile_config |= (0 << 0);
2135 break;
2136 case 2:
2137 rdev->config.evergreen.tile_config |= (1 << 0);
2138 break;
2139 case 4:
2140 rdev->config.evergreen.tile_config |= (2 << 0);
2141 break;
2142 case 8:
2143 rdev->config.evergreen.tile_config |= (3 << 0);
2144 break;
2145 }
Alex Deucherd698a342011-06-23 00:49:29 -04002146 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
Alex Deucher5bfa4872011-05-20 12:35:22 -04002147 if (rdev->flags & RADEON_IS_IGP)
Alex Deucherd698a342011-06-23 00:49:29 -04002148 rdev->config.evergreen.tile_config |= 1 << 4;
Alex Deucherd8d09be2012-05-31 18:53:36 -04002149 else {
Alex Deucher75a75712012-07-31 11:01:10 -04002150 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
2151 case 0: /* four banks */
Alex Deucherd8d09be2012-05-31 18:53:36 -04002152 rdev->config.evergreen.tile_config |= 0 << 4;
Alex Deucher75a75712012-07-31 11:01:10 -04002153 break;
2154 case 1: /* eight banks */
2155 rdev->config.evergreen.tile_config |= 1 << 4;
2156 break;
2157 case 2: /* sixteen banks */
2158 default:
2159 rdev->config.evergreen.tile_config |= 2 << 4;
2160 break;
2161 }
Alex Deucherd8d09be2012-05-31 18:53:36 -04002162 }
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002163 rdev->config.evergreen.tile_config |=
2164 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
2165 rdev->config.evergreen.tile_config |=
2166 ((gb_addr_config & 0x30000000) >> 28) << 12;
2167
Alex Deuchere55b9422011-07-15 19:53:52 +00002168 rdev->config.evergreen.backend_map = gb_backend_map;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002169 WREG32(GB_BACKEND_MAP, gb_backend_map);
2170 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2171 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2172 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2173
2174 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2175 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2176
2177 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
2178 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
2179 u32 sp = cc_gc_shader_pipe_config;
2180 u32 gfx = grbm_gfx_index | SE_INDEX(i);
2181
2182 if (i == num_shader_engines) {
2183 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
2184 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
2185 }
2186
2187 WREG32(GRBM_GFX_INDEX, gfx);
2188 WREG32(RLC_GFX_INDEX, gfx);
2189
2190 WREG32(CC_RB_BACKEND_DISABLE, rb);
2191 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
2192 WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
2193 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
Jerome Glisse888e4b92012-05-31 19:00:24 -04002194 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002195
Jerome Glisse888e4b92012-05-31 19:00:24 -04002196 grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002197 WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
2198 WREG32(RLC_GFX_INDEX, grbm_gfx_index);
2199
2200 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2201 WREG32(CGTS_TCC_DISABLE, 0);
2202 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2203 WREG32(CGTS_USER_TCC_DISABLE, 0);
2204
2205 /* set HW defaults for 3D engine */
2206 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2207 ROQ_IB2_START(0x2b)));
2208
2209 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2210
2211 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2212 SYNC_GRADIENT |
2213 SYNC_WALKER |
2214 SYNC_ALIGNER));
2215
2216 sx_debug_1 = RREG32(SX_DEBUG_1);
2217 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2218 WREG32(SX_DEBUG_1, sx_debug_1);
2219
2220
2221 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2222 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2223 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2224 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2225
Alex Deucher789ed2a2012-06-14 22:06:36 +02002226 if (rdev->family <= CHIP_SUMO2)
2227 WREG32(SMX_SAR_CTL0, 0x00010000);
2228
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002229 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2230 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2231 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2232
2233 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2234 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2235 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2236
2237 WREG32(VGT_NUM_INSTANCES, 1);
2238 WREG32(SPI_CONFIG_CNTL, 0);
2239 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2240 WREG32(CP_PERFMON_CNTL, 0);
2241
2242 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2243 FETCH_FIFO_HIWATER(0x4) |
2244 DONE_FIFO_HIWATER(0xe0) |
2245 ALU_UPDATE_FIFO_HIWATER(0x8)));
2246
2247 sq_config = RREG32(SQ_CONFIG);
2248 sq_config &= ~(PS_PRIO(3) |
2249 VS_PRIO(3) |
2250 GS_PRIO(3) |
2251 ES_PRIO(3));
2252 sq_config |= (VC_ENABLE |
2253 EXPORT_SRC_C |
2254 PS_PRIO(0) |
2255 VS_PRIO(1) |
2256 GS_PRIO(2) |
2257 ES_PRIO(3));
2258
Alex Deucherd5e455e2010-11-22 17:56:29 -05002259 switch (rdev->family) {
2260 case CHIP_CEDAR:
2261 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002262 case CHIP_SUMO:
2263 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002264 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002265 /* no vertex cache */
2266 sq_config &= ~VC_ENABLE;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002267 break;
2268 default:
2269 break;
2270 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002271
2272 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2273
2274 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2275 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2276 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2277 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2278 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2279 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2280 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2281
Alex Deucherd5e455e2010-11-22 17:56:29 -05002282 switch (rdev->family) {
2283 case CHIP_CEDAR:
2284 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002285 case CHIP_SUMO:
2286 case CHIP_SUMO2:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002287 ps_thread_count = 96;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002288 break;
2289 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002290 ps_thread_count = 128;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002291 break;
2292 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002293
2294 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
Alex Deucherf96b35c2010-06-16 12:24:07 -04002295 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2296 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2297 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2298 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2299 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002300
2301 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2302 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2303 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2304 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2305 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2306 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2307
2308 WREG32(SQ_CONFIG, sq_config);
2309 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2310 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2311 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2312 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2313 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2314 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2315 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2316 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2317 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2318 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2319
2320 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2321 FORCE_EOV_MAX_REZ_CNT(255)));
2322
Alex Deucherd5e455e2010-11-22 17:56:29 -05002323 switch (rdev->family) {
2324 case CHIP_CEDAR:
2325 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002326 case CHIP_SUMO:
2327 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002328 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002329 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002330 break;
2331 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002332 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002333 break;
2334 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002335 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2336 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2337
2338 WREG32(VGT_GS_VERTEX_REUSE, 16);
Alex Deucher12920592011-02-02 12:37:40 -05002339 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002340 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2341
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002342 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2343 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2344
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002345 WREG32(CB_PERF_CTR0_SEL_0, 0);
2346 WREG32(CB_PERF_CTR0_SEL_1, 0);
2347 WREG32(CB_PERF_CTR1_SEL_0, 0);
2348 WREG32(CB_PERF_CTR1_SEL_1, 0);
2349 WREG32(CB_PERF_CTR2_SEL_0, 0);
2350 WREG32(CB_PERF_CTR2_SEL_1, 0);
2351 WREG32(CB_PERF_CTR3_SEL_0, 0);
2352 WREG32(CB_PERF_CTR3_SEL_1, 0);
2353
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002354 /* clear render buffer base addresses */
2355 WREG32(CB_COLOR0_BASE, 0);
2356 WREG32(CB_COLOR1_BASE, 0);
2357 WREG32(CB_COLOR2_BASE, 0);
2358 WREG32(CB_COLOR3_BASE, 0);
2359 WREG32(CB_COLOR4_BASE, 0);
2360 WREG32(CB_COLOR5_BASE, 0);
2361 WREG32(CB_COLOR6_BASE, 0);
2362 WREG32(CB_COLOR7_BASE, 0);
2363 WREG32(CB_COLOR8_BASE, 0);
2364 WREG32(CB_COLOR9_BASE, 0);
2365 WREG32(CB_COLOR10_BASE, 0);
2366 WREG32(CB_COLOR11_BASE, 0);
2367
2368 /* set the shader const cache sizes to 0 */
2369 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2370 WREG32(i, 0);
2371 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2372 WREG32(i, 0);
2373
Alex Deucherf25a5c62011-05-19 11:07:57 -04002374 tmp = RREG32(HDP_MISC_CNTL);
2375 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2376 WREG32(HDP_MISC_CNTL, tmp);
2377
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002378 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2379 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2380
2381 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2382
2383 udelay(50);
2384
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002385}
2386
2387int evergreen_mc_init(struct radeon_device *rdev)
2388{
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002389 u32 tmp;
2390 int chansize, numchan;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002391
2392 /* Get VRAM informations */
2393 rdev->mc.vram_is_ddr = true;
Alex Deucher05b3ef62012-03-20 17:18:37 -04002394 if ((rdev->family == CHIP_PALM) ||
2395 (rdev->family == CHIP_SUMO) ||
2396 (rdev->family == CHIP_SUMO2))
Alex Deucher82084412011-07-01 13:18:28 -04002397 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2398 else
2399 tmp = RREG32(MC_ARB_RAMCFG);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002400 if (tmp & CHANSIZE_OVERRIDE) {
2401 chansize = 16;
2402 } else if (tmp & CHANSIZE_MASK) {
2403 chansize = 64;
2404 } else {
2405 chansize = 32;
2406 }
2407 tmp = RREG32(MC_SHARED_CHMAP);
2408 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2409 case 0:
2410 default:
2411 numchan = 1;
2412 break;
2413 case 1:
2414 numchan = 2;
2415 break;
2416 case 2:
2417 numchan = 4;
2418 break;
2419 case 3:
2420 numchan = 8;
2421 break;
2422 }
2423 rdev->mc.vram_width = numchan * chansize;
2424 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06002425 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2426 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002427 /* Setup GPU memory space */
Alex Deucher05b3ef62012-03-20 17:18:37 -04002428 if ((rdev->family == CHIP_PALM) ||
2429 (rdev->family == CHIP_SUMO) ||
2430 (rdev->family == CHIP_SUMO2)) {
Alex Deucher6eb18f82010-11-22 17:56:27 -05002431 /* size in bytes on fusion */
2432 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2433 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2434 } else {
Alex Deucher05b3ef62012-03-20 17:18:37 -04002435 /* size in MB on evergreen/cayman/tn */
Alex Deucher6eb18f82010-11-22 17:56:27 -05002436 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2437 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2438 }
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00002439 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05002440 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04002441 radeon_update_bandwidth_info(rdev);
2442
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002443 return 0;
2444}
Jerome Glissed594e462010-02-17 21:54:29 +00002445
Christian Könige32eb502011-10-23 12:56:27 +02002446bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse225758d2010-03-09 14:45:10 +00002447{
Alex Deucher17db7042010-12-21 16:05:39 -05002448 u32 srbm_status;
2449 u32 grbm_status;
2450 u32 grbm_status_se0, grbm_status_se1;
2451 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2452 int r;
2453
2454 srbm_status = RREG32(SRBM_STATUS);
2455 grbm_status = RREG32(GRBM_STATUS);
2456 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2457 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2458 if (!(grbm_status & GUI_ACTIVE)) {
Christian Könige32eb502011-10-23 12:56:27 +02002459 r100_gpu_lockup_update(lockup, ring);
Alex Deucher17db7042010-12-21 16:05:39 -05002460 return false;
2461 }
2462 /* force CP activities */
Christian Könige32eb502011-10-23 12:56:27 +02002463 r = radeon_ring_lock(rdev, ring, 2);
Alex Deucher17db7042010-12-21 16:05:39 -05002464 if (!r) {
2465 /* PACKET2 NOP */
Christian Könige32eb502011-10-23 12:56:27 +02002466 radeon_ring_write(ring, 0x80000000);
2467 radeon_ring_write(ring, 0x80000000);
2468 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher17db7042010-12-21 16:05:39 -05002469 }
Christian Könige32eb502011-10-23 12:56:27 +02002470 ring->rptr = RREG32(CP_RB_RPTR);
2471 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
Jerome Glisse225758d2010-03-09 14:45:10 +00002472}
2473
Alex Deucher747943e2010-03-24 13:26:36 -04002474static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2475{
2476 struct evergreen_mc_save save;
Alex Deucher747943e2010-03-24 13:26:36 -04002477 u32 grbm_reset = 0;
2478
Alex Deucher8d96fe92011-01-21 15:38:22 +00002479 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2480 return 0;
2481
Alex Deucher747943e2010-03-24 13:26:36 -04002482 dev_info(rdev->dev, "GPU softreset \n");
2483 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2484 RREG32(GRBM_STATUS));
2485 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2486 RREG32(GRBM_STATUS_SE0));
2487 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2488 RREG32(GRBM_STATUS_SE1));
2489 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2490 RREG32(SRBM_STATUS));
2491 evergreen_mc_stop(rdev, &save);
2492 if (evergreen_mc_wait_for_idle(rdev)) {
2493 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2494 }
2495 /* Disable CP parsing/prefetching */
2496 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2497
2498 /* reset all the gfx blocks */
2499 grbm_reset = (SOFT_RESET_CP |
2500 SOFT_RESET_CB |
2501 SOFT_RESET_DB |
2502 SOFT_RESET_PA |
2503 SOFT_RESET_SC |
2504 SOFT_RESET_SPI |
2505 SOFT_RESET_SH |
2506 SOFT_RESET_SX |
2507 SOFT_RESET_TC |
2508 SOFT_RESET_TA |
2509 SOFT_RESET_VC |
2510 SOFT_RESET_VGT);
2511
2512 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2513 WREG32(GRBM_SOFT_RESET, grbm_reset);
2514 (void)RREG32(GRBM_SOFT_RESET);
2515 udelay(50);
2516 WREG32(GRBM_SOFT_RESET, 0);
2517 (void)RREG32(GRBM_SOFT_RESET);
Alex Deucher747943e2010-03-24 13:26:36 -04002518 /* Wait a little for things to settle down */
2519 udelay(50);
2520 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2521 RREG32(GRBM_STATUS));
2522 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2523 RREG32(GRBM_STATUS_SE0));
2524 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2525 RREG32(GRBM_STATUS_SE1));
2526 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2527 RREG32(SRBM_STATUS));
Alex Deucher747943e2010-03-24 13:26:36 -04002528 evergreen_mc_resume(rdev, &save);
2529 return 0;
2530}
2531
Jerome Glissea2d07b72010-03-09 14:45:11 +00002532int evergreen_asic_reset(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002533{
Alex Deucher747943e2010-03-24 13:26:36 -04002534 return evergreen_gpu_soft_reset(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002535}
2536
Alex Deucher45f9a392010-03-24 13:55:51 -04002537/* Interrupts */
2538
2539u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2540{
2541 switch (crtc) {
2542 case 0:
2543 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2544 case 1:
2545 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2546 case 2:
2547 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2548 case 3:
2549 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2550 case 4:
2551 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2552 case 5:
2553 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2554 default:
2555 return 0;
2556 }
2557}
2558
2559void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2560{
2561 u32 tmp;
2562
Alex Deucher1b370782011-11-17 20:13:28 -05002563 if (rdev->family >= CHIP_CAYMAN) {
2564 cayman_cp_int_cntl_setup(rdev, 0,
2565 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2566 cayman_cp_int_cntl_setup(rdev, 1, 0);
2567 cayman_cp_int_cntl_setup(rdev, 2, 0);
2568 } else
2569 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher45f9a392010-03-24 13:55:51 -04002570 WREG32(GRBM_INT_CNTL, 0);
2571 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2572 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002573 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002574 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2575 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002576 }
2577 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002578 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2579 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2580 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002581
2582 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2583 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002584 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002585 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2586 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002587 }
2588 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002589 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2590 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2591 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002592
Alex Deucher05b3ef62012-03-20 17:18:37 -04002593 /* only one DAC on DCE6 */
2594 if (!ASIC_IS_DCE6(rdev))
2595 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
Alex Deucher45f9a392010-03-24 13:55:51 -04002596 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2597
2598 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2599 WREG32(DC_HPD1_INT_CONTROL, tmp);
2600 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2601 WREG32(DC_HPD2_INT_CONTROL, tmp);
2602 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2603 WREG32(DC_HPD3_INT_CONTROL, tmp);
2604 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2605 WREG32(DC_HPD4_INT_CONTROL, tmp);
2606 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2607 WREG32(DC_HPD5_INT_CONTROL, tmp);
2608 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2609 WREG32(DC_HPD6_INT_CONTROL, tmp);
2610
2611}
2612
2613int evergreen_irq_set(struct radeon_device *rdev)
2614{
2615 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
Alex Deucher1b370782011-11-17 20:13:28 -05002616 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002617 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2618 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
Alex Deucher2031f772010-04-22 12:52:11 -04002619 u32 grbm_int_cntl = 0;
Alex Deucher6f34be52010-11-21 10:59:01 -05002620 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002621
2622 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00002623 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Alex Deucher45f9a392010-03-24 13:55:51 -04002624 return -EINVAL;
2625 }
2626 /* don't enable anything if the ih is disabled */
2627 if (!rdev->ih.enabled) {
2628 r600_disable_interrupts(rdev);
2629 /* force the active interrupt state to all disabled */
2630 evergreen_disable_interrupt_state(rdev);
2631 return 0;
2632 }
2633
2634 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2635 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2636 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2637 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2638 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2639 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2640
Alex Deucher1b370782011-11-17 20:13:28 -05002641 if (rdev->family >= CHIP_CAYMAN) {
2642 /* enable CP interrupts on all rings */
2643 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
2644 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2645 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2646 }
2647 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
2648 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2649 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2650 }
2651 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
2652 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2653 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2654 }
2655 } else {
2656 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
2657 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2658 cp_int_cntl |= RB_INT_ENABLE;
2659 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2660 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002661 }
Alex Deucher1b370782011-11-17 20:13:28 -05002662
Alex Deucher6f34be52010-11-21 10:59:01 -05002663 if (rdev->irq.crtc_vblank_int[0] ||
2664 rdev->irq.pflip[0]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002665 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2666 crtc1 |= VBLANK_INT_MASK;
2667 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002668 if (rdev->irq.crtc_vblank_int[1] ||
2669 rdev->irq.pflip[1]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002670 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2671 crtc2 |= VBLANK_INT_MASK;
2672 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002673 if (rdev->irq.crtc_vblank_int[2] ||
2674 rdev->irq.pflip[2]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002675 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2676 crtc3 |= VBLANK_INT_MASK;
2677 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002678 if (rdev->irq.crtc_vblank_int[3] ||
2679 rdev->irq.pflip[3]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002680 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2681 crtc4 |= VBLANK_INT_MASK;
2682 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002683 if (rdev->irq.crtc_vblank_int[4] ||
2684 rdev->irq.pflip[4]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002685 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2686 crtc5 |= VBLANK_INT_MASK;
2687 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002688 if (rdev->irq.crtc_vblank_int[5] ||
2689 rdev->irq.pflip[5]) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002690 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2691 crtc6 |= VBLANK_INT_MASK;
2692 }
2693 if (rdev->irq.hpd[0]) {
2694 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2695 hpd1 |= DC_HPDx_INT_EN;
2696 }
2697 if (rdev->irq.hpd[1]) {
2698 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2699 hpd2 |= DC_HPDx_INT_EN;
2700 }
2701 if (rdev->irq.hpd[2]) {
2702 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2703 hpd3 |= DC_HPDx_INT_EN;
2704 }
2705 if (rdev->irq.hpd[3]) {
2706 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2707 hpd4 |= DC_HPDx_INT_EN;
2708 }
2709 if (rdev->irq.hpd[4]) {
2710 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2711 hpd5 |= DC_HPDx_INT_EN;
2712 }
2713 if (rdev->irq.hpd[5]) {
2714 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2715 hpd6 |= DC_HPDx_INT_EN;
2716 }
Alex Deucher2031f772010-04-22 12:52:11 -04002717 if (rdev->irq.gui_idle) {
2718 DRM_DEBUG("gui idle\n");
2719 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2720 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002721
Alex Deucher1b370782011-11-17 20:13:28 -05002722 if (rdev->family >= CHIP_CAYMAN) {
2723 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
2724 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
2725 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2726 } else
2727 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher2031f772010-04-22 12:52:11 -04002728 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deucher45f9a392010-03-24 13:55:51 -04002729
2730 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2731 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
Alex Deucherb7eff392011-07-08 11:44:56 -04002732 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002733 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2734 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
Alex Deucherb7eff392011-07-08 11:44:56 -04002735 }
2736 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002737 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2738 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2739 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002740
Alex Deucher6f34be52010-11-21 10:59:01 -05002741 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2742 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
Alex Deucherb7eff392011-07-08 11:44:56 -04002743 if (rdev->num_crtc >= 4) {
2744 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2745 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2746 }
2747 if (rdev->num_crtc >= 6) {
2748 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2749 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2750 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002751
Alex Deucher45f9a392010-03-24 13:55:51 -04002752 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2753 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2754 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2755 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2756 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2757 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2758
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002759 return 0;
2760}
2761
Andi Kleencbdd4502011-10-13 16:08:46 -07002762static void evergreen_irq_ack(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002763{
2764 u32 tmp;
2765
Alex Deucher6f34be52010-11-21 10:59:01 -05002766 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2767 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2768 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2769 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2770 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2771 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2772 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2773 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
Alex Deucherb7eff392011-07-08 11:44:56 -04002774 if (rdev->num_crtc >= 4) {
2775 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2776 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2777 }
2778 if (rdev->num_crtc >= 6) {
2779 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2780 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2781 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002782
Alex Deucher6f34be52010-11-21 10:59:01 -05002783 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2784 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2785 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2786 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
Alex Deucher6f34be52010-11-21 10:59:01 -05002787 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002788 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002789 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002790 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002791 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002792 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002793 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002794 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2795
Alex Deucherb7eff392011-07-08 11:44:56 -04002796 if (rdev->num_crtc >= 4) {
2797 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2798 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2799 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2800 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2801 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2802 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2803 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2804 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2805 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2806 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2807 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2808 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2809 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002810
Alex Deucherb7eff392011-07-08 11:44:56 -04002811 if (rdev->num_crtc >= 6) {
2812 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2813 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2814 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2815 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2816 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2817 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2818 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2819 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2820 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2821 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2822 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2823 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2824 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002825
Alex Deucher6f34be52010-11-21 10:59:01 -05002826 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002827 tmp = RREG32(DC_HPD1_INT_CONTROL);
2828 tmp |= DC_HPDx_INT_ACK;
2829 WREG32(DC_HPD1_INT_CONTROL, tmp);
2830 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002831 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002832 tmp = RREG32(DC_HPD2_INT_CONTROL);
2833 tmp |= DC_HPDx_INT_ACK;
2834 WREG32(DC_HPD2_INT_CONTROL, tmp);
2835 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002836 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002837 tmp = RREG32(DC_HPD3_INT_CONTROL);
2838 tmp |= DC_HPDx_INT_ACK;
2839 WREG32(DC_HPD3_INT_CONTROL, tmp);
2840 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002841 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002842 tmp = RREG32(DC_HPD4_INT_CONTROL);
2843 tmp |= DC_HPDx_INT_ACK;
2844 WREG32(DC_HPD4_INT_CONTROL, tmp);
2845 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002846 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002847 tmp = RREG32(DC_HPD5_INT_CONTROL);
2848 tmp |= DC_HPDx_INT_ACK;
2849 WREG32(DC_HPD5_INT_CONTROL, tmp);
2850 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002851 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002852 tmp = RREG32(DC_HPD5_INT_CONTROL);
2853 tmp |= DC_HPDx_INT_ACK;
2854 WREG32(DC_HPD6_INT_CONTROL, tmp);
2855 }
2856}
2857
2858void evergreen_irq_disable(struct radeon_device *rdev)
2859{
Alex Deucher45f9a392010-03-24 13:55:51 -04002860 r600_disable_interrupts(rdev);
2861 /* Wait and acknowledge irq */
2862 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05002863 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04002864 evergreen_disable_interrupt_state(rdev);
2865}
2866
Alex Deucher755d8192011-03-02 20:07:34 -05002867void evergreen_irq_suspend(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002868{
2869 evergreen_irq_disable(rdev);
2870 r600_rlc_stop(rdev);
2871}
2872
Andi Kleencbdd4502011-10-13 16:08:46 -07002873static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002874{
2875 u32 wptr, tmp;
2876
Alex Deucher724c80e2010-08-27 18:25:25 -04002877 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04002878 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04002879 else
2880 wptr = RREG32(IH_RB_WPTR);
Alex Deucher45f9a392010-03-24 13:55:51 -04002881
2882 if (wptr & RB_OVERFLOW) {
2883 /* When a ring buffer overflow happen start parsing interrupt
2884 * from the last not overwritten vector (wptr + 16). Hopefully
2885 * this should allow us to catchup.
2886 */
2887 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2888 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2889 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2890 tmp = RREG32(IH_RB_CNTL);
2891 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2892 WREG32(IH_RB_CNTL, tmp);
2893 }
2894 return (wptr & rdev->ih.ptr_mask);
2895}
2896
2897int evergreen_irq_process(struct radeon_device *rdev)
2898{
Dave Airlie682f1a52011-06-18 03:59:51 +00002899 u32 wptr;
2900 u32 rptr;
Alex Deucher45f9a392010-03-24 13:55:51 -04002901 u32 src_id, src_data;
2902 u32 ring_index;
Alex Deucher45f9a392010-03-24 13:55:51 -04002903 unsigned long flags;
2904 bool queue_hotplug = false;
2905
Dave Airlie682f1a52011-06-18 03:59:51 +00002906 if (!rdev->ih.enabled || rdev->shutdown)
Alex Deucher45f9a392010-03-24 13:55:51 -04002907 return IRQ_NONE;
2908
Dave Airlie682f1a52011-06-18 03:59:51 +00002909 wptr = evergreen_get_ih_wptr(rdev);
2910 rptr = rdev->ih.rptr;
2911 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Alex Deucher45f9a392010-03-24 13:55:51 -04002912
Dave Airlie682f1a52011-06-18 03:59:51 +00002913 spin_lock_irqsave(&rdev->ih.lock, flags);
Alex Deucher45f9a392010-03-24 13:55:51 -04002914 if (rptr == wptr) {
2915 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2916 return IRQ_NONE;
2917 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002918restart_ih:
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10002919 /* Order reading of wptr vs. reading of IH ring data */
2920 rmb();
2921
Alex Deucher45f9a392010-03-24 13:55:51 -04002922 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05002923 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04002924
2925 rdev->ih.wptr = wptr;
2926 while (rptr != wptr) {
2927 /* wptr/rptr are in bytes! */
2928 ring_index = rptr / 4;
Alex Deucher0f234f52011-02-13 19:06:33 -05002929 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2930 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucher45f9a392010-03-24 13:55:51 -04002931
2932 switch (src_id) {
2933 case 1: /* D1 vblank/vline */
2934 switch (src_data) {
2935 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05002936 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05002937 if (rdev->irq.crtc_vblank_int[0]) {
2938 drm_handle_vblank(rdev->ddev, 0);
2939 rdev->pm.vblank_sync = true;
2940 wake_up(&rdev->irq.vblank_queue);
2941 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05002942 if (rdev->irq.pflip[0])
2943 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05002944 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002945 DRM_DEBUG("IH: D1 vblank\n");
2946 }
2947 break;
2948 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05002949 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2950 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002951 DRM_DEBUG("IH: D1 vline\n");
2952 }
2953 break;
2954 default:
2955 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2956 break;
2957 }
2958 break;
2959 case 2: /* D2 vblank/vline */
2960 switch (src_data) {
2961 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05002962 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05002963 if (rdev->irq.crtc_vblank_int[1]) {
2964 drm_handle_vblank(rdev->ddev, 1);
2965 rdev->pm.vblank_sync = true;
2966 wake_up(&rdev->irq.vblank_queue);
2967 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05002968 if (rdev->irq.pflip[1])
2969 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05002970 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002971 DRM_DEBUG("IH: D2 vblank\n");
2972 }
2973 break;
2974 case 1: /* D2 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05002975 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2976 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002977 DRM_DEBUG("IH: D2 vline\n");
2978 }
2979 break;
2980 default:
2981 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2982 break;
2983 }
2984 break;
2985 case 3: /* D3 vblank/vline */
2986 switch (src_data) {
2987 case 0: /* D3 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05002988 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2989 if (rdev->irq.crtc_vblank_int[2]) {
2990 drm_handle_vblank(rdev->ddev, 2);
2991 rdev->pm.vblank_sync = true;
2992 wake_up(&rdev->irq.vblank_queue);
2993 }
2994 if (rdev->irq.pflip[2])
2995 radeon_crtc_handle_flip(rdev, 2);
2996 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04002997 DRM_DEBUG("IH: D3 vblank\n");
2998 }
2999 break;
3000 case 1: /* D3 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003001 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3002 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003003 DRM_DEBUG("IH: D3 vline\n");
3004 }
3005 break;
3006 default:
3007 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3008 break;
3009 }
3010 break;
3011 case 4: /* D4 vblank/vline */
3012 switch (src_data) {
3013 case 0: /* D4 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003014 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3015 if (rdev->irq.crtc_vblank_int[3]) {
3016 drm_handle_vblank(rdev->ddev, 3);
3017 rdev->pm.vblank_sync = true;
3018 wake_up(&rdev->irq.vblank_queue);
3019 }
3020 if (rdev->irq.pflip[3])
3021 radeon_crtc_handle_flip(rdev, 3);
3022 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003023 DRM_DEBUG("IH: D4 vblank\n");
3024 }
3025 break;
3026 case 1: /* D4 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003027 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3028 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003029 DRM_DEBUG("IH: D4 vline\n");
3030 }
3031 break;
3032 default:
3033 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3034 break;
3035 }
3036 break;
3037 case 5: /* D5 vblank/vline */
3038 switch (src_data) {
3039 case 0: /* D5 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003040 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3041 if (rdev->irq.crtc_vblank_int[4]) {
3042 drm_handle_vblank(rdev->ddev, 4);
3043 rdev->pm.vblank_sync = true;
3044 wake_up(&rdev->irq.vblank_queue);
3045 }
3046 if (rdev->irq.pflip[4])
3047 radeon_crtc_handle_flip(rdev, 4);
3048 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003049 DRM_DEBUG("IH: D5 vblank\n");
3050 }
3051 break;
3052 case 1: /* D5 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003053 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3054 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003055 DRM_DEBUG("IH: D5 vline\n");
3056 }
3057 break;
3058 default:
3059 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3060 break;
3061 }
3062 break;
3063 case 6: /* D6 vblank/vline */
3064 switch (src_data) {
3065 case 0: /* D6 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003066 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3067 if (rdev->irq.crtc_vblank_int[5]) {
3068 drm_handle_vblank(rdev->ddev, 5);
3069 rdev->pm.vblank_sync = true;
3070 wake_up(&rdev->irq.vblank_queue);
3071 }
3072 if (rdev->irq.pflip[5])
3073 radeon_crtc_handle_flip(rdev, 5);
3074 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003075 DRM_DEBUG("IH: D6 vblank\n");
3076 }
3077 break;
3078 case 1: /* D6 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003079 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3080 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003081 DRM_DEBUG("IH: D6 vline\n");
3082 }
3083 break;
3084 default:
3085 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3086 break;
3087 }
3088 break;
3089 case 42: /* HPD hotplug */
3090 switch (src_data) {
3091 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003092 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3093 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003094 queue_hotplug = true;
3095 DRM_DEBUG("IH: HPD1\n");
3096 }
3097 break;
3098 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003099 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3100 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003101 queue_hotplug = true;
3102 DRM_DEBUG("IH: HPD2\n");
3103 }
3104 break;
3105 case 2:
Alex Deucher6f34be52010-11-21 10:59:01 -05003106 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3107 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003108 queue_hotplug = true;
3109 DRM_DEBUG("IH: HPD3\n");
3110 }
3111 break;
3112 case 3:
Alex Deucher6f34be52010-11-21 10:59:01 -05003113 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3114 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003115 queue_hotplug = true;
3116 DRM_DEBUG("IH: HPD4\n");
3117 }
3118 break;
3119 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003120 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3121 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003122 queue_hotplug = true;
3123 DRM_DEBUG("IH: HPD5\n");
3124 }
3125 break;
3126 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003127 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3128 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003129 queue_hotplug = true;
3130 DRM_DEBUG("IH: HPD6\n");
3131 }
3132 break;
3133 default:
3134 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3135 break;
3136 }
3137 break;
3138 case 176: /* CP_INT in ring buffer */
3139 case 177: /* CP_INT in IB1 */
3140 case 178: /* CP_INT in IB2 */
3141 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04003142 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003143 break;
3144 case 181: /* CP EOP event */
3145 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher1b370782011-11-17 20:13:28 -05003146 if (rdev->family >= CHIP_CAYMAN) {
3147 switch (src_data) {
3148 case 0:
3149 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3150 break;
3151 case 1:
3152 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3153 break;
3154 case 2:
3155 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3156 break;
3157 }
3158 } else
3159 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003160 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003161 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003162 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003163 rdev->pm.gui_idle = true;
3164 wake_up(&rdev->irq.idle_queue);
3165 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003166 default:
3167 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3168 break;
3169 }
3170
3171 /* wptr/rptr are in bytes! */
3172 rptr += 16;
3173 rptr &= rdev->ih.ptr_mask;
3174 }
3175 /* make sure wptr hasn't changed while processing */
3176 wptr = evergreen_get_ih_wptr(rdev);
3177 if (wptr != rdev->ih.wptr)
3178 goto restart_ih;
3179 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003180 schedule_work(&rdev->hotplug_work);
Alex Deucher45f9a392010-03-24 13:55:51 -04003181 rdev->ih.rptr = rptr;
3182 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3183 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3184 return IRQ_HANDLED;
3185}
3186
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003187static int evergreen_startup(struct radeon_device *rdev)
3188{
Christian Könige32eb502011-10-23 12:56:27 +02003189 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003190 int r;
3191
Alex Deucher9e46a482011-01-06 18:49:35 -05003192 /* enable pcie gen2 link */
Ilija Hadziccd540332011-09-20 10:22:57 -04003193 evergreen_pcie_gen2_enable(rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -05003194
Alex Deucher0af62b02011-01-06 21:19:31 -05003195 if (ASIC_IS_DCE5(rdev)) {
3196 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3197 r = ni_init_microcode(rdev);
3198 if (r) {
3199 DRM_ERROR("Failed to load firmware!\n");
3200 return r;
3201 }
3202 }
Alex Deucher755d8192011-03-02 20:07:34 -05003203 r = ni_mc_load_microcode(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003204 if (r) {
Alex Deucher0af62b02011-01-06 21:19:31 -05003205 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003206 return r;
3207 }
Alex Deucher0af62b02011-01-06 21:19:31 -05003208 } else {
3209 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3210 r = r600_init_microcode(rdev);
3211 if (r) {
3212 DRM_ERROR("Failed to load firmware!\n");
3213 return r;
3214 }
3215 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003216 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003217
Alex Deucher16cdf042011-10-28 10:30:02 -04003218 r = r600_vram_scratch_init(rdev);
3219 if (r)
3220 return r;
3221
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003222 evergreen_mc_program(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003223 if (rdev->flags & RADEON_IS_AGP) {
Alex Deucher0fcdb612010-03-24 13:20:41 -04003224 evergreen_agp_enable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003225 } else {
3226 r = evergreen_pcie_gart_enable(rdev);
3227 if (r)
3228 return r;
3229 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003230 evergreen_gpu_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003231
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003232 r = evergreen_blit_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003233 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003234 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003235 rdev->asic->copy.copy = NULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003236 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003237 }
3238
Alex Deucher724c80e2010-08-27 18:25:25 -04003239 /* allocate wb buffer */
3240 r = radeon_wb_init(rdev);
3241 if (r)
3242 return r;
3243
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003244 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3245 if (r) {
3246 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3247 return r;
3248 }
3249
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003250 /* Enable IRQ */
3251 r = r600_irq_init(rdev);
3252 if (r) {
3253 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3254 radeon_irq_kms_fini(rdev);
3255 return r;
3256 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003257 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003258
Christian Könige32eb502011-10-23 12:56:27 +02003259 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003260 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3261 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003262 if (r)
3263 return r;
3264 r = evergreen_cp_load_microcode(rdev);
3265 if (r)
3266 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003267 r = evergreen_cp_resume(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003268 if (r)
3269 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003270
Jerome Glisseb15ba512011-11-15 11:48:34 -05003271 r = radeon_ib_pool_start(rdev);
3272 if (r)
3273 return r;
3274
Alex Deucherf7128122012-02-23 17:53:45 -05003275 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003276 if (r) {
3277 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3278 rdev->accel_working = false;
Matthijs Kooijman3fe89a02012-02-02 21:23:11 +01003279 return r;
Dave Airlie7a7e8732012-01-03 09:43:28 +00003280 }
3281
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003282 r = r600_audio_init(rdev);
3283 if (r) {
3284 DRM_ERROR("radeon: audio init failed\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -05003285 return r;
3286 }
3287
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003288 return 0;
3289}
3290
3291int evergreen_resume(struct radeon_device *rdev)
3292{
3293 int r;
3294
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003295 /* reset the asic, the gfx blocks are often in a bad state
3296 * after the driver is unloaded or after a resume
3297 */
3298 if (radeon_asic_reset(rdev))
3299 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003300 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3301 * posting will perform necessary task to bring back GPU into good
3302 * shape.
3303 */
3304 /* post card */
3305 atom_asic_init(rdev->mode_info.atom_context);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003306
Jerome Glisseb15ba512011-11-15 11:48:34 -05003307 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003308 r = evergreen_startup(rdev);
3309 if (r) {
Alex Deucher755d8192011-03-02 20:07:34 -05003310 DRM_ERROR("evergreen startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05003311 rdev->accel_working = false;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003312 return r;
3313 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003314
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003315 return r;
3316
3317}
3318
3319int evergreen_suspend(struct radeon_device *rdev)
3320{
Christian Könige32eb502011-10-23 12:56:27 +02003321 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian König7b1f2482011-09-23 15:11:23 +02003322
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003323 r600_audio_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003324 /* FIXME: we should wait for ring to be empty */
Jerome Glisseb15ba512011-11-15 11:48:34 -05003325 radeon_ib_pool_suspend(rdev);
3326 r600_blit_suspend(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003327 r700_cp_stop(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02003328 ring->ready = false;
Alex Deucher45f9a392010-03-24 13:55:51 -04003329 evergreen_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003330 radeon_wb_disable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003331 evergreen_pcie_gart_disable(rdev);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003332
3333 return 0;
3334}
3335
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003336/* Plan is to move initialization in that function and use
3337 * helper function so that radeon_device_init pretty much
3338 * do nothing more than calling asic specific function. This
3339 * should also allow to remove a bunch of callback function
3340 * like vram_info.
3341 */
3342int evergreen_init(struct radeon_device *rdev)
3343{
3344 int r;
3345
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003346 /* This don't do much */
3347 r = radeon_gem_init(rdev);
3348 if (r)
3349 return r;
3350 /* Read BIOS */
3351 if (!radeon_get_bios(rdev)) {
3352 if (ASIC_IS_AVIVO(rdev))
3353 return -EINVAL;
3354 }
3355 /* Must be an ATOMBIOS */
3356 if (!rdev->is_atom_bios) {
Alex Deucher755d8192011-03-02 20:07:34 -05003357 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003358 return -EINVAL;
3359 }
3360 r = radeon_atombios_init(rdev);
3361 if (r)
3362 return r;
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003363 /* reset the asic, the gfx blocks are often in a bad state
3364 * after the driver is unloaded or after a resume
3365 */
3366 if (radeon_asic_reset(rdev))
3367 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003368 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05003369 if (!radeon_card_posted(rdev)) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003370 if (!rdev->bios) {
3371 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3372 return -EINVAL;
3373 }
3374 DRM_INFO("GPU not posted. posting now...\n");
3375 atom_asic_init(rdev->mode_info.atom_context);
3376 }
3377 /* Initialize scratch registers */
3378 r600_scratch_init(rdev);
3379 /* Initialize surface registers */
3380 radeon_surface_init(rdev);
3381 /* Initialize clocks */
3382 radeon_get_clock_info(rdev->ddev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003383 /* Fence driver */
3384 r = radeon_fence_driver_init(rdev);
3385 if (r)
3386 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00003387 /* initialize AGP */
3388 if (rdev->flags & RADEON_IS_AGP) {
3389 r = radeon_agp_init(rdev);
3390 if (r)
3391 radeon_agp_disable(rdev);
3392 }
3393 /* initialize memory controller */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003394 r = evergreen_mc_init(rdev);
3395 if (r)
3396 return r;
3397 /* Memory manager */
3398 r = radeon_bo_init(rdev);
3399 if (r)
3400 return r;
Alex Deucher45f9a392010-03-24 13:55:51 -04003401
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003402 r = radeon_irq_kms_init(rdev);
3403 if (r)
3404 return r;
3405
Christian Könige32eb502011-10-23 12:56:27 +02003406 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3407 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003408
3409 rdev->ih.ring_obj = NULL;
3410 r600_ih_ring_init(rdev, 64 * 1024);
3411
3412 r = r600_pcie_gart_init(rdev);
3413 if (r)
3414 return r;
Alex Deucher0fcdb612010-03-24 13:20:41 -04003415
Jerome Glisseb15ba512011-11-15 11:48:34 -05003416 r = radeon_ib_pool_init(rdev);
Alex Deucher148a03b2010-06-03 19:00:03 -04003417 rdev->accel_working = true;
Jerome Glisseb15ba512011-11-15 11:48:34 -05003418 if (r) {
3419 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3420 rdev->accel_working = false;
3421 }
3422
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003423 r = evergreen_startup(rdev);
3424 if (r) {
Alex Deucherfe251e22010-03-24 13:36:43 -04003425 dev_err(rdev->dev, "disabling GPU acceleration\n");
3426 r700_cp_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04003427 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003428 radeon_wb_fini(rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003429 r100_ib_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04003430 radeon_irq_kms_fini(rdev);
Alex Deucher0fcdb612010-03-24 13:20:41 -04003431 evergreen_pcie_gart_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003432 rdev->accel_working = false;
3433 }
Alex Deucher77e00f22011-12-21 11:58:17 -05003434
3435 /* Don't start up if the MC ucode is missing on BTC parts.
3436 * The default clocks and voltages before the MC ucode
3437 * is loaded are not suffient for advanced operations.
3438 */
3439 if (ASIC_IS_DCE5(rdev)) {
3440 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
3441 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3442 return -EINVAL;
3443 }
3444 }
3445
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003446 return 0;
3447}
3448
3449void evergreen_fini(struct radeon_device *rdev)
3450{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003451 r600_audio_fini(rdev);
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003452 r600_blit_fini(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003453 r700_cp_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003454 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003455 radeon_wb_fini(rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003456 r100_ib_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003457 radeon_irq_kms_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003458 evergreen_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04003459 r600_vram_scratch_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003460 radeon_gem_fini(rdev);
Christian König15d33322011-09-15 19:02:22 +02003461 radeon_semaphore_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003462 radeon_fence_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003463 radeon_agp_fini(rdev);
3464 radeon_bo_fini(rdev);
3465 radeon_atombios_fini(rdev);
3466 kfree(rdev->bios);
3467 rdev->bios = NULL;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003468}
Alex Deucher9e46a482011-01-06 18:49:35 -05003469
Ilija Hadzicb07759b2011-09-20 10:22:58 -04003470void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
Alex Deucher9e46a482011-01-06 18:49:35 -05003471{
3472 u32 link_width_cntl, speed_cntl;
3473
Alex Deucherd42dd572011-01-12 20:05:11 -05003474 if (radeon_pcie_gen2 == 0)
3475 return;
3476
Alex Deucher9e46a482011-01-06 18:49:35 -05003477 if (rdev->flags & RADEON_IS_IGP)
3478 return;
3479
3480 if (!(rdev->flags & RADEON_IS_PCIE))
3481 return;
3482
3483 /* x2 cards have a special sequence */
3484 if (ASIC_IS_X2(rdev))
3485 return;
3486
3487 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3488 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3489 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3490
3491 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3492 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3493 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3494
3495 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3496 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3497 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3498
3499 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3500 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3501 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3502
3503 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3504 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3505 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3506
3507 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3508 speed_cntl |= LC_GEN2_EN_STRAP;
3509 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3510
3511 } else {
3512 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3513 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3514 if (1)
3515 link_width_cntl |= LC_UPCONFIGURE_DIS;
3516 else
3517 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3518 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3519 }
3520}