| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2008 Advanced Micro Devices, Inc. | 
 | 3 |  * Copyright 2008 Red Hat Inc. | 
 | 4 |  * Copyright 2009 Jerome Glisse. | 
 | 5 |  * | 
 | 6 |  * Permission is hereby granted, free of charge, to any person obtaining a | 
 | 7 |  * copy of this software and associated documentation files (the "Software"), | 
 | 8 |  * to deal in the Software without restriction, including without limitation | 
 | 9 |  * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
 | 10 |  * and/or sell copies of the Software, and to permit persons to whom the | 
 | 11 |  * Software is furnished to do so, subject to the following conditions: | 
 | 12 |  * | 
 | 13 |  * The above copyright notice and this permission notice shall be included in | 
 | 14 |  * all copies or substantial portions of the Software. | 
 | 15 |  * | 
 | 16 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
 | 17 |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
 | 18 |  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
 | 19 |  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
 | 20 |  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
 | 21 |  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
 | 22 |  * OTHER DEALINGS IN THE SOFTWARE. | 
 | 23 |  * | 
 | 24 |  * Authors: Dave Airlie | 
 | 25 |  *          Alex Deucher | 
 | 26 |  *          Jerome Glisse | 
 | 27 |  */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 28 | /* RS600 / Radeon X1250/X1270 integrated GPU | 
 | 29 |  * | 
 | 30 |  * This file gather function specific to RS600 which is the IGP of | 
 | 31 |  * the X1250/X1270 family supporting intel CPU (while RS690/RS740 | 
 | 32 |  * is the X1250/X1270 supporting AMD CPU). The display engine are | 
 | 33 |  * the avivo one, bios is an atombios, 3D block are the one of the | 
 | 34 |  * R4XX family. The GART is different from the RS400 one and is very | 
 | 35 |  * close to the one of the R600 family (R600 likely being an evolution | 
 | 36 |  * of the RS600 GART block). | 
 | 37 |  */ | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 38 | #include "drmP.h" | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 39 | #include "radeon.h" | 
| Daniel Vetter | e699037 | 2010-03-11 21:19:17 +0000 | [diff] [blame] | 40 | #include "radeon_asic.h" | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 41 | #include "atom.h" | 
 | 42 | #include "rs600d.h" | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 43 |  | 
| Dave Airlie | 3f7dc91a | 2009-08-27 11:10:15 +1000 | [diff] [blame] | 44 | #include "rs600_reg_safe.h" | 
 | 45 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 46 | void rs600_gpu_init(struct radeon_device *rdev); | 
 | 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 48 |  | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 49 | void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) | 
 | 50 | { | 
 | 51 | 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | 
 | 52 | 	u32 tmp; | 
 | 53 |  | 
 | 54 | 	/* make sure flip is at vb rather than hb */ | 
 | 55 | 	tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); | 
 | 56 | 	tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; | 
 | 57 | 	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); | 
 | 58 |  | 
 | 59 | 	/* set pageflip to happen anywhere in vblank interval */ | 
 | 60 | 	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); | 
 | 61 |  | 
 | 62 | 	/* enable the pflip int */ | 
 | 63 | 	radeon_irq_kms_pflip_irq_get(rdev, crtc); | 
 | 64 | } | 
 | 65 |  | 
 | 66 | void rs600_post_page_flip(struct radeon_device *rdev, int crtc) | 
 | 67 | { | 
 | 68 | 	/* disable the pflip int */ | 
 | 69 | 	radeon_irq_kms_pflip_irq_put(rdev, crtc); | 
 | 70 | } | 
 | 71 |  | 
 | 72 | u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | 
 | 73 | { | 
 | 74 | 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | 
 | 75 | 	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | 
 | 76 |  | 
 | 77 | 	/* Lock the graphics update lock */ | 
 | 78 | 	tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | 
 | 79 | 	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | 
 | 80 |  | 
 | 81 | 	/* update the scanout addresses */ | 
 | 82 | 	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 
 | 83 | 	       (u32)crtc_base); | 
 | 84 | 	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 
 | 85 | 	       (u32)crtc_base); | 
 | 86 |  | 
 | 87 | 	/* Wait for update_pending to go high. */ | 
 | 88 | 	while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | 
 | 89 | 	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | 
 | 90 |  | 
 | 91 | 	/* Unlock the lock, so double-buffering can take place inside vblank */ | 
 | 92 | 	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; | 
 | 93 | 	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | 
 | 94 |  | 
 | 95 | 	/* Return current update_pending status: */ | 
 | 96 | 	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; | 
 | 97 | } | 
 | 98 |  | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 99 | void rs600_pm_misc(struct radeon_device *rdev) | 
 | 100 | { | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 101 | 	int requested_index = rdev->pm.requested_power_state_index; | 
 | 102 | 	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; | 
 | 103 | 	struct radeon_voltage *voltage = &ps->clock_info[0].voltage; | 
 | 104 | 	u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; | 
| Alex Deucher | 536fcd5 | 2010-04-29 16:33:38 -0400 | [diff] [blame] | 105 | 	u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 106 |  | 
 | 107 | 	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { | 
 | 108 | 		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 
 | 109 | 			tmp = RREG32(voltage->gpio.reg); | 
 | 110 | 			if (voltage->active_high) | 
 | 111 | 				tmp |= voltage->gpio.mask; | 
 | 112 | 			else | 
 | 113 | 				tmp &= ~(voltage->gpio.mask); | 
 | 114 | 			WREG32(voltage->gpio.reg, tmp); | 
 | 115 | 			if (voltage->delay) | 
 | 116 | 				udelay(voltage->delay); | 
 | 117 | 		} else { | 
 | 118 | 			tmp = RREG32(voltage->gpio.reg); | 
 | 119 | 			if (voltage->active_high) | 
 | 120 | 				tmp &= ~voltage->gpio.mask; | 
 | 121 | 			else | 
 | 122 | 				tmp |= voltage->gpio.mask; | 
 | 123 | 			WREG32(voltage->gpio.reg, tmp); | 
 | 124 | 			if (voltage->delay) | 
 | 125 | 				udelay(voltage->delay); | 
 | 126 | 		} | 
| Alex Deucher | 7ac9aa5 | 2010-05-27 19:25:54 -0400 | [diff] [blame] | 127 | 	} else if (voltage->type == VOLTAGE_VDDC) | 
 | 128 | 		radeon_atom_set_voltage(rdev, voltage->vddc_id); | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 129 |  | 
 | 130 | 	dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 
 | 131 | 	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 
 | 132 | 	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); | 
 | 133 | 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { | 
 | 134 | 		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { | 
 | 135 | 			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); | 
 | 136 | 			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); | 
 | 137 | 		} else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { | 
 | 138 | 			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); | 
 | 139 | 			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); | 
 | 140 | 		} | 
 | 141 | 	} else { | 
 | 142 | 		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); | 
 | 143 | 		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); | 
 | 144 | 	} | 
 | 145 | 	WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); | 
 | 146 |  | 
 | 147 | 	dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); | 
 | 148 | 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { | 
 | 149 | 		dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; | 
 | 150 | 		if (voltage->delay) { | 
 | 151 | 			dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; | 
 | 152 | 			dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); | 
 | 153 | 		} else | 
 | 154 | 			dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; | 
 | 155 | 	} else | 
 | 156 | 		dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; | 
 | 157 | 	WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); | 
 | 158 |  | 
 | 159 | 	hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); | 
 | 160 | 	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) | 
 | 161 | 		hdp_dyn_cntl &= ~HDP_FORCEON; | 
 | 162 | 	else | 
 | 163 | 		hdp_dyn_cntl |= HDP_FORCEON; | 
 | 164 | 	WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); | 
| Alex Deucher | 536fcd5 | 2010-04-29 16:33:38 -0400 | [diff] [blame] | 165 | #if 0 | 
 | 166 | 	/* mc_host_dyn seems to cause hangs from time to time */ | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 167 | 	mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); | 
 | 168 | 	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) | 
 | 169 | 		mc_host_dyn_cntl &= ~MC_HOST_FORCEON; | 
 | 170 | 	else | 
 | 171 | 		mc_host_dyn_cntl |= MC_HOST_FORCEON; | 
 | 172 | 	WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); | 
| Alex Deucher | 536fcd5 | 2010-04-29 16:33:38 -0400 | [diff] [blame] | 173 | #endif | 
 | 174 | 	dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); | 
 | 175 | 	if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) | 
 | 176 | 		dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; | 
 | 177 | 	else | 
 | 178 | 		dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; | 
 | 179 | 	WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 180 |  | 
 | 181 | 	/* set pcie lanes */ | 
 | 182 | 	if ((rdev->flags & RADEON_IS_PCIE) && | 
 | 183 | 	    !(rdev->flags & RADEON_IS_IGP) && | 
 | 184 | 	    rdev->asic->set_pcie_lanes && | 
 | 185 | 	    (ps->pcie_lanes != | 
 | 186 | 	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { | 
 | 187 | 		radeon_set_pcie_lanes(rdev, | 
 | 188 | 				      ps->pcie_lanes); | 
| Alex Deucher | ce8a3eb | 2010-05-07 16:58:27 -0400 | [diff] [blame] | 189 | 		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 190 | 	} | 
| Alex Deucher | 49e02b7 | 2010-04-23 17:57:27 -0400 | [diff] [blame] | 191 | } | 
 | 192 |  | 
 | 193 | void rs600_pm_prepare(struct radeon_device *rdev) | 
 | 194 | { | 
 | 195 | 	struct drm_device *ddev = rdev->ddev; | 
 | 196 | 	struct drm_crtc *crtc; | 
 | 197 | 	struct radeon_crtc *radeon_crtc; | 
 | 198 | 	u32 tmp; | 
 | 199 |  | 
 | 200 | 	/* disable any active CRTCs */ | 
 | 201 | 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | 
 | 202 | 		radeon_crtc = to_radeon_crtc(crtc); | 
 | 203 | 		if (radeon_crtc->enabled) { | 
 | 204 | 			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); | 
 | 205 | 			tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; | 
 | 206 | 			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | 
 | 207 | 		} | 
 | 208 | 	} | 
 | 209 | } | 
 | 210 |  | 
 | 211 | void rs600_pm_finish(struct radeon_device *rdev) | 
 | 212 | { | 
 | 213 | 	struct drm_device *ddev = rdev->ddev; | 
 | 214 | 	struct drm_crtc *crtc; | 
 | 215 | 	struct radeon_crtc *radeon_crtc; | 
 | 216 | 	u32 tmp; | 
 | 217 |  | 
 | 218 | 	/* enable any active CRTCs */ | 
 | 219 | 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | 
 | 220 | 		radeon_crtc = to_radeon_crtc(crtc); | 
 | 221 | 		if (radeon_crtc->enabled) { | 
 | 222 | 			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); | 
 | 223 | 			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; | 
 | 224 | 			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | 
 | 225 | 		} | 
 | 226 | 	} | 
 | 227 | } | 
 | 228 |  | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 229 | /* hpd for digital panel detect/disconnect */ | 
 | 230 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | 
 | 231 | { | 
 | 232 | 	u32 tmp; | 
 | 233 | 	bool connected = false; | 
 | 234 |  | 
 | 235 | 	switch (hpd) { | 
 | 236 | 	case RADEON_HPD_1: | 
 | 237 | 		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); | 
 | 238 | 		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) | 
 | 239 | 			connected = true; | 
 | 240 | 		break; | 
 | 241 | 	case RADEON_HPD_2: | 
 | 242 | 		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); | 
 | 243 | 		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) | 
 | 244 | 			connected = true; | 
 | 245 | 		break; | 
 | 246 | 	default: | 
 | 247 | 		break; | 
 | 248 | 	} | 
 | 249 | 	return connected; | 
 | 250 | } | 
 | 251 |  | 
 | 252 | void rs600_hpd_set_polarity(struct radeon_device *rdev, | 
 | 253 | 			    enum radeon_hpd_id hpd) | 
 | 254 | { | 
 | 255 | 	u32 tmp; | 
 | 256 | 	bool connected = rs600_hpd_sense(rdev, hpd); | 
 | 257 |  | 
 | 258 | 	switch (hpd) { | 
 | 259 | 	case RADEON_HPD_1: | 
 | 260 | 		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | 
 | 261 | 		if (connected) | 
 | 262 | 			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); | 
 | 263 | 		else | 
 | 264 | 			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); | 
 | 265 | 		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 
 | 266 | 		break; | 
 | 267 | 	case RADEON_HPD_2: | 
 | 268 | 		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | 
 | 269 | 		if (connected) | 
 | 270 | 			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); | 
 | 271 | 		else | 
 | 272 | 			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); | 
 | 273 | 		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 
 | 274 | 		break; | 
 | 275 | 	default: | 
 | 276 | 		break; | 
 | 277 | 	} | 
 | 278 | } | 
 | 279 |  | 
 | 280 | void rs600_hpd_init(struct radeon_device *rdev) | 
 | 281 | { | 
 | 282 | 	struct drm_device *dev = rdev->ddev; | 
 | 283 | 	struct drm_connector *connector; | 
 | 284 |  | 
 | 285 | 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 
 | 286 | 		struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 
 | 287 | 		switch (radeon_connector->hpd.hpd) { | 
 | 288 | 		case RADEON_HPD_1: | 
 | 289 | 			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, | 
 | 290 | 			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); | 
 | 291 | 			rdev->irq.hpd[0] = true; | 
 | 292 | 			break; | 
 | 293 | 		case RADEON_HPD_2: | 
 | 294 | 			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, | 
 | 295 | 			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); | 
 | 296 | 			rdev->irq.hpd[1] = true; | 
 | 297 | 			break; | 
 | 298 | 		default: | 
 | 299 | 			break; | 
 | 300 | 		} | 
 | 301 | 	} | 
| Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 302 | 	if (rdev->irq.installed) | 
 | 303 | 		rs600_irq_set(rdev); | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 304 | } | 
 | 305 |  | 
 | 306 | void rs600_hpd_fini(struct radeon_device *rdev) | 
 | 307 | { | 
 | 308 | 	struct drm_device *dev = rdev->ddev; | 
 | 309 | 	struct drm_connector *connector; | 
 | 310 |  | 
 | 311 | 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 
 | 312 | 		struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 
 | 313 | 		switch (radeon_connector->hpd.hpd) { | 
 | 314 | 		case RADEON_HPD_1: | 
 | 315 | 			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, | 
 | 316 | 			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); | 
 | 317 | 			rdev->irq.hpd[0] = false; | 
 | 318 | 			break; | 
 | 319 | 		case RADEON_HPD_2: | 
 | 320 | 			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, | 
 | 321 | 			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); | 
 | 322 | 			rdev->irq.hpd[1] = false; | 
 | 323 | 			break; | 
 | 324 | 		default: | 
 | 325 | 			break; | 
 | 326 | 		} | 
 | 327 | 	} | 
 | 328 | } | 
 | 329 |  | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 330 | void rs600_bm_disable(struct radeon_device *rdev) | 
 | 331 | { | 
 | 332 | 	u32 tmp; | 
 | 333 |  | 
 | 334 | 	/* disable bus mastering */ | 
 | 335 | 	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); | 
 | 336 | 	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); | 
 | 337 | 	mdelay(1); | 
 | 338 | } | 
 | 339 |  | 
 | 340 | int rs600_asic_reset(struct radeon_device *rdev) | 
 | 341 | { | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 342 | 	struct rv515_mc_save save; | 
| Alex Deucher | 25b2ec5b | 2011-01-11 13:36:55 -0500 | [diff] [blame] | 343 | 	u32 status, tmp; | 
 | 344 | 	int ret = 0; | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 345 |  | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 346 | 	status = RREG32(R_000E40_RBBM_STATUS); | 
 | 347 | 	if (!G_000E40_GUI_ACTIVE(status)) { | 
 | 348 | 		return 0; | 
 | 349 | 	} | 
| Alex Deucher | 25b2ec5b | 2011-01-11 13:36:55 -0500 | [diff] [blame] | 350 | 	/* Stops all mc clients */ | 
 | 351 | 	rv515_mc_stop(rdev, &save); | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 352 | 	status = RREG32(R_000E40_RBBM_STATUS); | 
 | 353 | 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 
 | 354 | 	/* stop CP */ | 
 | 355 | 	WREG32(RADEON_CP_CSQ_CNTL, 0); | 
 | 356 | 	tmp = RREG32(RADEON_CP_RB_CNTL); | 
 | 357 | 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); | 
 | 358 | 	WREG32(RADEON_CP_RB_RPTR_WR, 0); | 
 | 359 | 	WREG32(RADEON_CP_RB_WPTR, 0); | 
 | 360 | 	WREG32(RADEON_CP_RB_CNTL, tmp); | 
 | 361 | 	pci_save_state(rdev->pdev); | 
 | 362 | 	/* disable bus mastering */ | 
 | 363 | 	rs600_bm_disable(rdev); | 
 | 364 | 	/* reset GA+VAP */ | 
 | 365 | 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | | 
 | 366 | 					S_0000F0_SOFT_RESET_GA(1)); | 
 | 367 | 	RREG32(R_0000F0_RBBM_SOFT_RESET); | 
 | 368 | 	mdelay(500); | 
 | 369 | 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0); | 
 | 370 | 	mdelay(1); | 
 | 371 | 	status = RREG32(R_000E40_RBBM_STATUS); | 
 | 372 | 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 
 | 373 | 	/* reset CP */ | 
 | 374 | 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); | 
 | 375 | 	RREG32(R_0000F0_RBBM_SOFT_RESET); | 
 | 376 | 	mdelay(500); | 
 | 377 | 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0); | 
 | 378 | 	mdelay(1); | 
 | 379 | 	status = RREG32(R_000E40_RBBM_STATUS); | 
 | 380 | 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 
 | 381 | 	/* reset MC */ | 
 | 382 | 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); | 
 | 383 | 	RREG32(R_0000F0_RBBM_SOFT_RESET); | 
 | 384 | 	mdelay(500); | 
 | 385 | 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0); | 
 | 386 | 	mdelay(1); | 
 | 387 | 	status = RREG32(R_000E40_RBBM_STATUS); | 
 | 388 | 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 
 | 389 | 	/* restore PCI & busmastering */ | 
 | 390 | 	pci_restore_state(rdev->pdev); | 
 | 391 | 	/* Check if GPU is idle */ | 
 | 392 | 	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { | 
 | 393 | 		dev_err(rdev->dev, "failed to reset GPU\n"); | 
 | 394 | 		rdev->gpu_lockup = true; | 
| Alex Deucher | 25b2ec5b | 2011-01-11 13:36:55 -0500 | [diff] [blame] | 395 | 		ret = -1; | 
 | 396 | 	} else | 
 | 397 | 		dev_info(rdev->dev, "GPU reset succeed\n"); | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 398 | 	rv515_mc_resume(rdev, &save); | 
| Alex Deucher | 25b2ec5b | 2011-01-11 13:36:55 -0500 | [diff] [blame] | 399 | 	return ret; | 
| Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 400 | } | 
 | 401 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 402 | /* | 
 | 403 |  * GART. | 
 | 404 |  */ | 
 | 405 | void rs600_gart_tlb_flush(struct radeon_device *rdev) | 
 | 406 | { | 
 | 407 | 	uint32_t tmp; | 
 | 408 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 409 | 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 
 | 410 | 	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; | 
 | 411 | 	WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 412 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 413 | 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 
| Jerome Glisse | 30f69f3 | 2010-04-16 18:46:35 +0200 | [diff] [blame] | 414 | 	tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 415 | 	WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 416 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 417 | 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 
 | 418 | 	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; | 
 | 419 | 	WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 
 | 420 | 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 421 | } | 
 | 422 |  | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 423 | int rs600_gart_init(struct radeon_device *rdev) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 424 | { | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 425 | 	int r; | 
 | 426 |  | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 427 | 	if (rdev->gart.table.vram.robj) { | 
| Joe Perches | fce7d61 | 2010-10-30 21:08:30 +0000 | [diff] [blame] | 428 | 		WARN(1, "RS600 GART already initialized\n"); | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 429 | 		return 0; | 
 | 430 | 	} | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 431 | 	/* Initialize common gart structure */ | 
 | 432 | 	r = radeon_gart_init(rdev); | 
 | 433 | 	if (r) { | 
 | 434 | 		return r; | 
 | 435 | 	} | 
 | 436 | 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 437 | 	return radeon_gart_table_vram_alloc(rdev); | 
 | 438 | } | 
 | 439 |  | 
 | 440 | int rs600_gart_enable(struct radeon_device *rdev) | 
 | 441 | { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 442 | 	u32 tmp; | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 443 | 	int r, i; | 
 | 444 |  | 
 | 445 | 	if (rdev->gart.table.vram.robj == NULL) { | 
 | 446 | 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 
 | 447 | 		return -EINVAL; | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 448 | 	} | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 449 | 	r = radeon_gart_table_vram_pin(rdev); | 
 | 450 | 	if (r) | 
 | 451 | 		return r; | 
| Dave Airlie | 8256856 | 2010-02-05 16:00:07 +1000 | [diff] [blame] | 452 | 	radeon_gart_restore(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 453 | 	/* Enable bus master */ | 
 | 454 | 	tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | 
 | 455 | 	WREG32(R_00004C_BUS_CNTL, tmp); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 456 | 	/* FIXME: setup default page */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 457 | 	WREG32_MC(R_000100_MC_PT0_CNTL, | 
| Alex Deucher | 4f15d24 | 2009-12-05 17:55:37 -0500 | [diff] [blame] | 458 | 		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | | 
 | 459 | 		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); | 
 | 460 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 461 | 	for (i = 0; i < 19; i++) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 462 | 		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, | 
| Alex Deucher | 4f15d24 | 2009-12-05 17:55:37 -0500 | [diff] [blame] | 463 | 			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | | 
 | 464 | 			  S_00016C_SYSTEM_ACCESS_MODE_MASK( | 
 | 465 | 				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | | 
 | 466 | 			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( | 
 | 467 | 				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | | 
 | 468 | 			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | | 
 | 469 | 			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | | 
 | 470 | 			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 471 | 	} | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 472 | 	/* enable first context */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 473 | 	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, | 
| Alex Deucher | 4f15d24 | 2009-12-05 17:55:37 -0500 | [diff] [blame] | 474 | 		  S_000102_ENABLE_PAGE_TABLE(1) | | 
 | 475 | 		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); | 
 | 476 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 477 | 	/* disable all other contexts */ | 
| Alex Deucher | 4f15d24 | 2009-12-05 17:55:37 -0500 | [diff] [blame] | 478 | 	for (i = 1; i < 8; i++) | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 479 | 		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 480 |  | 
 | 481 | 	/* setup the page table */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 482 | 	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | 
| Alex Deucher | 4f15d24 | 2009-12-05 17:55:37 -0500 | [diff] [blame] | 483 | 		  rdev->gart.table_addr); | 
 | 484 | 	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); | 
 | 485 | 	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 486 | 	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 487 |  | 
| Alex Deucher | 4f15d24 | 2009-12-05 17:55:37 -0500 | [diff] [blame] | 488 | 	/* System context maps to VRAM space */ | 
 | 489 | 	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); | 
 | 490 | 	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); | 
 | 491 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 492 | 	/* enable page tables */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 493 | 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 
 | 494 | 	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); | 
 | 495 | 	tmp = RREG32_MC(R_000009_MC_CNTL1); | 
 | 496 | 	WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 497 | 	rs600_gart_tlb_flush(rdev); | 
 | 498 | 	rdev->gart.ready = true; | 
 | 499 | 	return 0; | 
 | 500 | } | 
 | 501 |  | 
 | 502 | void rs600_gart_disable(struct radeon_device *rdev) | 
 | 503 | { | 
| Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 504 | 	u32 tmp; | 
 | 505 | 	int r; | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 506 |  | 
 | 507 | 	/* FIXME: disable out of gart access */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 508 | 	WREG32_MC(R_000100_MC_PT0_CNTL, 0); | 
 | 509 | 	tmp = RREG32_MC(R_000009_MC_CNTL1); | 
 | 510 | 	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 511 | 	if (rdev->gart.table.vram.robj) { | 
| Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 512 | 		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | 
 | 513 | 		if (r == 0) { | 
 | 514 | 			radeon_bo_kunmap(rdev->gart.table.vram.robj); | 
 | 515 | 			radeon_bo_unpin(rdev->gart.table.vram.robj); | 
 | 516 | 			radeon_bo_unreserve(rdev->gart.table.vram.robj); | 
 | 517 | 		} | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 518 | 	} | 
 | 519 | } | 
 | 520 |  | 
 | 521 | void rs600_gart_fini(struct radeon_device *rdev) | 
 | 522 | { | 
| Jerome Glisse | f927456 | 2010-03-17 14:44:29 +0000 | [diff] [blame] | 523 | 	radeon_gart_fini(rdev); | 
| Jerome Glisse | 4aac047 | 2009-09-14 18:29:49 +0200 | [diff] [blame] | 524 | 	rs600_gart_disable(rdev); | 
 | 525 | 	radeon_gart_table_vram_free(rdev); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 526 | } | 
 | 527 |  | 
 | 528 | #define R600_PTE_VALID     (1 << 0) | 
 | 529 | #define R600_PTE_SYSTEM    (1 << 1) | 
 | 530 | #define R600_PTE_SNOOPED   (1 << 2) | 
 | 531 | #define R600_PTE_READABLE  (1 << 5) | 
 | 532 | #define R600_PTE_WRITEABLE (1 << 6) | 
 | 533 |  | 
 | 534 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 
 | 535 | { | 
 | 536 | 	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 
 | 537 |  | 
 | 538 | 	if (i < 0 || i > rdev->gart.num_gpu_pages) { | 
 | 539 | 		return -EINVAL; | 
 | 540 | 	} | 
 | 541 | 	addr = addr & 0xFFFFFFFFFFFFF000ULL; | 
 | 542 | 	addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; | 
 | 543 | 	addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; | 
 | 544 | 	writeq(addr, ((void __iomem *)ptr) + (i * 8)); | 
 | 545 | 	return 0; | 
 | 546 | } | 
 | 547 |  | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 548 | int rs600_irq_set(struct radeon_device *rdev) | 
 | 549 | { | 
 | 550 | 	uint32_t tmp = 0; | 
 | 551 | 	uint32_t mode_int = 0; | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 552 | 	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & | 
 | 553 | 		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); | 
 | 554 | 	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & | 
 | 555 | 		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 556 |  | 
| Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 557 | 	if (!rdev->irq.installed) { | 
| Joe Perches | fce7d61 | 2010-10-30 21:08:30 +0000 | [diff] [blame] | 558 | 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); | 
| Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 559 | 		WREG32(R_000040_GEN_INT_CNTL, 0); | 
 | 560 | 		return -EINVAL; | 
 | 561 | 	} | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 562 | 	if (rdev->irq.sw_int) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 563 | 		tmp |= S_000040_SW_INT_EN(1); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 564 | 	} | 
| Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 565 | 	if (rdev->irq.gui_idle) { | 
 | 566 | 		tmp |= S_000040_GUI_IDLE(1); | 
 | 567 | 	} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 568 | 	if (rdev->irq.crtc_vblank_int[0] || | 
 | 569 | 	    rdev->irq.pflip[0]) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 570 | 		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 571 | 	} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 572 | 	if (rdev->irq.crtc_vblank_int[1] || | 
 | 573 | 	    rdev->irq.pflip[1]) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 574 | 		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 575 | 	} | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 576 | 	if (rdev->irq.hpd[0]) { | 
 | 577 | 		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); | 
 | 578 | 	} | 
 | 579 | 	if (rdev->irq.hpd[1]) { | 
 | 580 | 		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 
 | 581 | 	} | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 582 | 	WREG32(R_000040_GEN_INT_CNTL, tmp); | 
 | 583 | 	WREG32(R_006540_DxMODE_INT_MASK, mode_int); | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 584 | 	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); | 
 | 585 | 	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 586 | 	return 0; | 
 | 587 | } | 
 | 588 |  | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 589 | static inline u32 rs600_irq_ack(struct radeon_device *rdev) | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 590 | { | 
| Jerome Glisse | 01ceae8 | 2009-10-07 11:08:22 +0200 | [diff] [blame] | 591 | 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); | 
| Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 592 | 	uint32_t irq_mask = S_000044_SW_INT(1); | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 593 | 	u32 tmp; | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 594 |  | 
| Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 595 | 	/* the interrupt works, but the status bit is permanently asserted */ | 
 | 596 | 	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { | 
 | 597 | 		if (!rdev->irq.gui_idle_acked) | 
 | 598 | 			irq_mask |= S_000044_GUI_IDLE_STAT(1); | 
 | 599 | 	} | 
 | 600 |  | 
| Jerome Glisse | 01ceae8 | 2009-10-07 11:08:22 +0200 | [diff] [blame] | 601 | 	if (G_000044_DISPLAY_INT_STAT(irqs)) { | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 602 | 		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); | 
 | 603 | 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 604 | 			WREG32(R_006534_D1MODE_VBLANK_STATUS, | 
 | 605 | 				S_006534_D1MODE_VBLANK_ACK(1)); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 606 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 607 | 		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 608 | 			WREG32(R_006D34_D2MODE_VBLANK_STATUS, | 
 | 609 | 				S_006D34_D2MODE_VBLANK_ACK(1)); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 610 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 611 | 		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 612 | 			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | 
 | 613 | 			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); | 
 | 614 | 			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 
 | 615 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 616 | 		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 617 | 			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | 
 | 618 | 			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); | 
 | 619 | 			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 
 | 620 | 		} | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 621 | 	} else { | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 622 | 		rdev->irq.stat_regs.r500.disp_int = 0; | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 623 | 	} | 
 | 624 |  | 
 | 625 | 	if (irqs) { | 
| Jerome Glisse | 01ceae8 | 2009-10-07 11:08:22 +0200 | [diff] [blame] | 626 | 		WREG32(R_000044_GEN_INT_STATUS, irqs); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 627 | 	} | 
 | 628 | 	return irqs & irq_mask; | 
 | 629 | } | 
 | 630 |  | 
| Jerome Glisse | ac447df | 2009-09-30 22:18:43 +0200 | [diff] [blame] | 631 | void rs600_irq_disable(struct radeon_device *rdev) | 
 | 632 | { | 
| Jerome Glisse | ac447df | 2009-09-30 22:18:43 +0200 | [diff] [blame] | 633 | 	WREG32(R_000040_GEN_INT_CNTL, 0); | 
 | 634 | 	WREG32(R_006540_DxMODE_INT_MASK, 0); | 
 | 635 | 	/* Wait and acknowledge irq */ | 
 | 636 | 	mdelay(1); | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 637 | 	rs600_irq_ack(rdev); | 
| Jerome Glisse | ac447df | 2009-09-30 22:18:43 +0200 | [diff] [blame] | 638 | } | 
 | 639 |  | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 640 | int rs600_irq_process(struct radeon_device *rdev) | 
 | 641 | { | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 642 | 	u32 status, msi_rearm; | 
| Alex Deucher | d4877cf | 2009-12-04 16:56:37 -0500 | [diff] [blame] | 643 | 	bool queue_hotplug = false; | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 644 |  | 
| Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 645 | 	/* reset gui idle ack.  the status bit is broken */ | 
 | 646 | 	rdev->irq.gui_idle_acked = false; | 
 | 647 |  | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 648 | 	status = rs600_irq_ack(rdev); | 
 | 649 | 	if (!status && !rdev->irq.stat_regs.r500.disp_int) { | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 650 | 		return IRQ_NONE; | 
 | 651 | 	} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 652 | 	while (status || rdev->irq.stat_regs.r500.disp_int) { | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 653 | 		/* SW interrupt */ | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 654 | 		if (G_000044_SW_INT(status)) { | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 655 | 			radeon_fence_process(rdev); | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 656 | 		} | 
| Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 657 | 		/* GUI idle */ | 
 | 658 | 		if (G_000040_GUI_IDLE(status)) { | 
 | 659 | 			rdev->irq.gui_idle_acked = true; | 
 | 660 | 			rdev->pm.gui_idle = true; | 
 | 661 | 			wake_up(&rdev->irq.idle_queue); | 
 | 662 | 		} | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 663 | 		/* Vertical blank interrupts */ | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 664 | 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 665 | 			if (rdev->irq.crtc_vblank_int[0]) { | 
 | 666 | 				drm_handle_vblank(rdev->ddev, 0); | 
 | 667 | 				rdev->pm.vblank_sync = true; | 
 | 668 | 				wake_up(&rdev->irq.vblank_queue); | 
 | 669 | 			} | 
| Mario Kleiner | 3e4ea74 | 2010-11-21 10:59:02 -0500 | [diff] [blame] | 670 | 			if (rdev->irq.pflip[0]) | 
 | 671 | 				radeon_crtc_handle_flip(rdev, 0); | 
| Rafał Miłecki | c913e23 | 2009-12-22 23:02:16 +0100 | [diff] [blame] | 672 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 673 | 		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 674 | 			if (rdev->irq.crtc_vblank_int[1]) { | 
 | 675 | 				drm_handle_vblank(rdev->ddev, 1); | 
 | 676 | 				rdev->pm.vblank_sync = true; | 
 | 677 | 				wake_up(&rdev->irq.vblank_queue); | 
 | 678 | 			} | 
| Mario Kleiner | 3e4ea74 | 2010-11-21 10:59:02 -0500 | [diff] [blame] | 679 | 			if (rdev->irq.pflip[1]) | 
 | 680 | 				radeon_crtc_handle_flip(rdev, 1); | 
| Rafał Miłecki | c913e23 | 2009-12-22 23:02:16 +0100 | [diff] [blame] | 681 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 682 | 		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Alex Deucher | d4877cf | 2009-12-04 16:56:37 -0500 | [diff] [blame] | 683 | 			queue_hotplug = true; | 
 | 684 | 			DRM_DEBUG("HPD1\n"); | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 685 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 686 | 		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { | 
| Alex Deucher | d4877cf | 2009-12-04 16:56:37 -0500 | [diff] [blame] | 687 | 			queue_hotplug = true; | 
 | 688 | 			DRM_DEBUG("HPD2\n"); | 
| Alex Deucher | dcfdd40 | 2009-12-04 15:04:19 -0500 | [diff] [blame] | 689 | 		} | 
| Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 690 | 		status = rs600_irq_ack(rdev); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 691 | 	} | 
| Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 692 | 	/* reset gui idle ack.  the status bit is broken */ | 
 | 693 | 	rdev->irq.gui_idle_acked = false; | 
| Alex Deucher | d4877cf | 2009-12-04 16:56:37 -0500 | [diff] [blame] | 694 | 	if (queue_hotplug) | 
| Tejun Heo | 32c87fc | 2011-01-03 14:49:32 +0100 | [diff] [blame] | 695 | 		schedule_work(&rdev->hotplug_work); | 
| Alex Deucher | 3e5cb98 | 2009-10-16 12:21:24 -0400 | [diff] [blame] | 696 | 	if (rdev->msi_enabled) { | 
 | 697 | 		switch (rdev->family) { | 
 | 698 | 		case CHIP_RS600: | 
 | 699 | 		case CHIP_RS690: | 
 | 700 | 		case CHIP_RS740: | 
 | 701 | 			msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; | 
 | 702 | 			WREG32(RADEON_BUS_CNTL, msi_rearm); | 
 | 703 | 			WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); | 
 | 704 | 			break; | 
 | 705 | 		default: | 
 | 706 | 			msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; | 
 | 707 | 			WREG32(RADEON_MSI_REARM_EN, msi_rearm); | 
 | 708 | 			WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); | 
 | 709 | 			break; | 
 | 710 | 		} | 
 | 711 | 	} | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 712 | 	return IRQ_HANDLED; | 
 | 713 | } | 
 | 714 |  | 
 | 715 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | 
 | 716 | { | 
 | 717 | 	if (crtc == 0) | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 718 | 		return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 719 | 	else | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 720 | 		return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); | 
| Michel Dänzer | 7ed220d | 2009-08-13 11:10:51 +0200 | [diff] [blame] | 721 | } | 
 | 722 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 723 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) | 
 | 724 | { | 
 | 725 | 	unsigned i; | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 726 |  | 
 | 727 | 	for (i = 0; i < rdev->usec_timeout; i++) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 728 | 		if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 729 | 			return 0; | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 730 | 		udelay(1); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 731 | 	} | 
 | 732 | 	return -1; | 
 | 733 | } | 
 | 734 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 735 | void rs600_gpu_init(struct radeon_device *rdev) | 
 | 736 | { | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 737 | 	r420_pipes_init(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 738 | 	/* Wait for mc idle */ | 
 | 739 | 	if (rs600_mc_wait_for_idle(rdev)) | 
 | 740 | 		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 741 | } | 
 | 742 |  | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 743 | void rs600_mc_init(struct radeon_device *rdev) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 744 | { | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 745 | 	u64 base; | 
 | 746 |  | 
| Jordan Crouse | 01d73a6 | 2010-05-27 13:40:24 -0600 | [diff] [blame] | 747 | 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 
 | 748 | 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 749 | 	rdev->mc.vram_is_ddr = true; | 
 | 750 | 	rdev->mc.vram_width = 128; | 
| Alex Deucher | 722f294 | 2009-12-03 16:18:19 -0500 | [diff] [blame] | 751 | 	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 
 | 752 | 	rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 
| Jerome Glisse | 51e5fcd | 2010-02-19 14:33:54 +0000 | [diff] [blame] | 753 | 	rdev->mc.visible_vram_size = rdev->mc.aper_size; | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 754 | 	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 
 | 755 | 	base = RREG32_MC(R_000004_MC_FB_LOCATION); | 
 | 756 | 	base = G_000004_MC_FB_START(base) << 16; | 
 | 757 | 	radeon_vram_location(rdev, &rdev->mc, base); | 
| Alex Deucher | 8d369bb | 2010-07-15 10:51:10 -0400 | [diff] [blame] | 758 | 	rdev->mc.gtt_base_align = 0; | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 759 | 	radeon_gtt_location(rdev, &rdev->mc); | 
| Alex Deucher | f47299c | 2010-03-16 20:54:38 -0400 | [diff] [blame] | 760 | 	radeon_update_bandwidth_info(rdev); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 761 | } | 
 | 762 |  | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 763 | void rs600_bandwidth_update(struct radeon_device *rdev) | 
 | 764 | { | 
| Alex Deucher | f46c012 | 2010-03-31 00:33:27 -0400 | [diff] [blame] | 765 | 	struct drm_display_mode *mode0 = NULL; | 
 | 766 | 	struct drm_display_mode *mode1 = NULL; | 
 | 767 | 	u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; | 
 | 768 | 	/* FIXME: implement full support */ | 
 | 769 |  | 
 | 770 | 	radeon_update_display_priority(rdev); | 
 | 771 |  | 
 | 772 | 	if (rdev->mode_info.crtcs[0]->base.enabled) | 
 | 773 | 		mode0 = &rdev->mode_info.crtcs[0]->base.mode; | 
 | 774 | 	if (rdev->mode_info.crtcs[1]->base.enabled) | 
 | 775 | 		mode1 = &rdev->mode_info.crtcs[1]->base.mode; | 
 | 776 |  | 
 | 777 | 	rs690_line_buffer_adjust(rdev, mode0, mode1); | 
 | 778 |  | 
 | 779 | 	if (rdev->disp_priority == 2) { | 
 | 780 | 		d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); | 
 | 781 | 		d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); | 
 | 782 | 		d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 
 | 783 | 		d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 
 | 784 | 		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | 
 | 785 | 		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | 
 | 786 | 		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | 
 | 787 | 		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | 
 | 788 | 	} | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 789 | } | 
 | 790 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 791 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 
 | 792 | { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 793 | 	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | | 
 | 794 | 		S_000070_MC_IND_CITF_ARB0(1)); | 
 | 795 | 	return RREG32(R_000074_MC_IND_DATA); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 796 | } | 
 | 797 |  | 
 | 798 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 
 | 799 | { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 800 | 	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | | 
 | 801 | 		S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); | 
 | 802 | 	WREG32(R_000074_MC_IND_DATA, v); | 
 | 803 | } | 
 | 804 |  | 
 | 805 | void rs600_debugfs(struct radeon_device *rdev) | 
 | 806 | { | 
 | 807 | 	if (r100_debugfs_rbbm_init(rdev)) | 
 | 808 | 		DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 809 | } | 
| Dave Airlie | 3f7dc91a | 2009-08-27 11:10:15 +1000 | [diff] [blame] | 810 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 811 | void rs600_set_safe_registers(struct radeon_device *rdev) | 
| Dave Airlie | 3f7dc91a | 2009-08-27 11:10:15 +1000 | [diff] [blame] | 812 | { | 
 | 813 | 	rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; | 
 | 814 | 	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 815 | } | 
 | 816 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 817 | static void rs600_mc_program(struct radeon_device *rdev) | 
 | 818 | { | 
 | 819 | 	struct rv515_mc_save save; | 
 | 820 |  | 
 | 821 | 	/* Stops all mc clients */ | 
 | 822 | 	rv515_mc_stop(rdev, &save); | 
 | 823 |  | 
 | 824 | 	/* Wait for mc idle */ | 
 | 825 | 	if (rs600_mc_wait_for_idle(rdev)) | 
 | 826 | 		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | 
 | 827 |  | 
 | 828 | 	/* FIXME: What does AGP means for such chipset ? */ | 
 | 829 | 	WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); | 
 | 830 | 	WREG32_MC(R_000006_AGP_BASE, 0); | 
 | 831 | 	WREG32_MC(R_000007_AGP_BASE_2, 0); | 
 | 832 | 	/* Program MC */ | 
 | 833 | 	WREG32_MC(R_000004_MC_FB_LOCATION, | 
 | 834 | 			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | | 
 | 835 | 			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 
 | 836 | 	WREG32(R_000134_HDP_FB_LOCATION, | 
 | 837 | 		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | 
 | 838 |  | 
 | 839 | 	rv515_mc_resume(rdev, &save); | 
 | 840 | } | 
 | 841 |  | 
 | 842 | static int rs600_startup(struct radeon_device *rdev) | 
 | 843 | { | 
 | 844 | 	int r; | 
 | 845 |  | 
 | 846 | 	rs600_mc_program(rdev); | 
 | 847 | 	/* Resume clock */ | 
 | 848 | 	rv515_clock_startup(rdev); | 
 | 849 | 	/* Initialize GPU configuration (# pipes, ...) */ | 
 | 850 | 	rs600_gpu_init(rdev); | 
 | 851 | 	/* Initialize GART (initialize after TTM so we can allocate | 
 | 852 | 	 * memory through TTM but finalize after TTM) */ | 
 | 853 | 	r = rs600_gart_enable(rdev); | 
 | 854 | 	if (r) | 
 | 855 | 		return r; | 
| Alex Deucher | 724c80e | 2010-08-27 18:25:25 -0400 | [diff] [blame] | 856 |  | 
 | 857 | 	/* allocate wb buffer */ | 
 | 858 | 	r = radeon_wb_init(rdev); | 
 | 859 | 	if (r) | 
 | 860 | 		return r; | 
 | 861 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 862 | 	/* Enable IRQ */ | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 863 | 	rs600_irq_set(rdev); | 
| Jerome Glisse | cafe660 | 2010-01-07 12:39:21 +0100 | [diff] [blame] | 864 | 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 865 | 	/* 1M ring buffer */ | 
 | 866 | 	r = r100_cp_init(rdev, 1024 * 1024); | 
 | 867 | 	if (r) { | 
| Paul Bolle | ec4f2ac | 2011-01-28 23:32:04 +0100 | [diff] [blame] | 868 | 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 869 | 		return r; | 
 | 870 | 	} | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 871 | 	r = r100_ib_init(rdev); | 
 | 872 | 	if (r) { | 
| Paul Bolle | ec4f2ac | 2011-01-28 23:32:04 +0100 | [diff] [blame] | 873 | 		dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 874 | 		return r; | 
 | 875 | 	} | 
| Rafał Miłecki | fe50ac7 | 2010-06-19 12:24:57 +0200 | [diff] [blame] | 876 |  | 
 | 877 | 	r = r600_audio_init(rdev); | 
 | 878 | 	if (r) { | 
 | 879 | 		dev_err(rdev->dev, "failed initializing audio\n"); | 
 | 880 | 		return r; | 
 | 881 | 	} | 
 | 882 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 883 | 	return 0; | 
 | 884 | } | 
 | 885 |  | 
 | 886 | int rs600_resume(struct radeon_device *rdev) | 
 | 887 | { | 
 | 888 | 	/* Make sur GART are not working */ | 
 | 889 | 	rs600_gart_disable(rdev); | 
 | 890 | 	/* Resume clock before doing reset */ | 
 | 891 | 	rv515_clock_startup(rdev); | 
 | 892 | 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 
| Jerome Glisse | a2d07b7 | 2010-03-09 14:45:11 +0000 | [diff] [blame] | 893 | 	if (radeon_asic_reset(rdev)) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 894 | 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | 
 | 895 | 			RREG32(R_000E40_RBBM_STATUS), | 
 | 896 | 			RREG32(R_0007C0_CP_STAT)); | 
 | 897 | 	} | 
 | 898 | 	/* post */ | 
 | 899 | 	atom_asic_init(rdev->mode_info.atom_context); | 
 | 900 | 	/* Resume clock after posting */ | 
 | 901 | 	rv515_clock_startup(rdev); | 
| Dave Airlie | 550e2d9 | 2009-12-09 14:15:38 +1000 | [diff] [blame] | 902 | 	/* Initialize surface registers */ | 
 | 903 | 	radeon_surface_init(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 904 | 	return rs600_startup(rdev); | 
 | 905 | } | 
 | 906 |  | 
 | 907 | int rs600_suspend(struct radeon_device *rdev) | 
 | 908 | { | 
| Rafał Miłecki | fe50ac7 | 2010-06-19 12:24:57 +0200 | [diff] [blame] | 909 | 	r600_audio_fini(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 910 | 	r100_cp_disable(rdev); | 
| Alex Deucher | 724c80e | 2010-08-27 18:25:25 -0400 | [diff] [blame] | 911 | 	radeon_wb_disable(rdev); | 
| Jerome Glisse | ac447df | 2009-09-30 22:18:43 +0200 | [diff] [blame] | 912 | 	rs600_irq_disable(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 913 | 	rs600_gart_disable(rdev); | 
 | 914 | 	return 0; | 
 | 915 | } | 
 | 916 |  | 
 | 917 | void rs600_fini(struct radeon_device *rdev) | 
 | 918 | { | 
| Rafał Miłecki | fe50ac7 | 2010-06-19 12:24:57 +0200 | [diff] [blame] | 919 | 	r600_audio_fini(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 920 | 	r100_cp_fini(rdev); | 
| Alex Deucher | 724c80e | 2010-08-27 18:25:25 -0400 | [diff] [blame] | 921 | 	radeon_wb_fini(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 922 | 	r100_ib_fini(rdev); | 
 | 923 | 	radeon_gem_fini(rdev); | 
 | 924 | 	rs600_gart_fini(rdev); | 
 | 925 | 	radeon_irq_kms_fini(rdev); | 
 | 926 | 	radeon_fence_driver_fini(rdev); | 
| Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 927 | 	radeon_bo_fini(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 928 | 	radeon_atombios_fini(rdev); | 
 | 929 | 	kfree(rdev->bios); | 
 | 930 | 	rdev->bios = NULL; | 
 | 931 | } | 
 | 932 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 933 | int rs600_init(struct radeon_device *rdev) | 
 | 934 | { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 935 | 	int r; | 
 | 936 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 937 | 	/* Disable VGA */ | 
 | 938 | 	rv515_vga_render_disable(rdev); | 
 | 939 | 	/* Initialize scratch registers */ | 
 | 940 | 	radeon_scratch_init(rdev); | 
 | 941 | 	/* Initialize surface registers */ | 
 | 942 | 	radeon_surface_init(rdev); | 
| Dave Airlie | 4c712e6 | 2010-07-15 12:13:50 +1000 | [diff] [blame] | 943 | 	/* restore some register to sane defaults */ | 
 | 944 | 	r100_restore_sanity(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 945 | 	/* BIOS */ | 
 | 946 | 	if (!radeon_get_bios(rdev)) { | 
 | 947 | 		if (ASIC_IS_AVIVO(rdev)) | 
 | 948 | 			return -EINVAL; | 
 | 949 | 	} | 
 | 950 | 	if (rdev->is_atom_bios) { | 
 | 951 | 		r = radeon_atombios_init(rdev); | 
 | 952 | 		if (r) | 
 | 953 | 			return r; | 
 | 954 | 	} else { | 
 | 955 | 		dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); | 
 | 956 | 		return -EINVAL; | 
 | 957 | 	} | 
 | 958 | 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 
| Jerome Glisse | a2d07b7 | 2010-03-09 14:45:11 +0000 | [diff] [blame] | 959 | 	if (radeon_asic_reset(rdev)) { | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 960 | 		dev_warn(rdev->dev, | 
 | 961 | 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | 
 | 962 | 			RREG32(R_000E40_RBBM_STATUS), | 
 | 963 | 			RREG32(R_0007C0_CP_STAT)); | 
 | 964 | 	} | 
 | 965 | 	/* check if cards are posted or not */ | 
| Dave Airlie | 72542d7 | 2009-12-01 14:06:31 +1000 | [diff] [blame] | 966 | 	if (radeon_boot_test_post_card(rdev) == false) | 
 | 967 | 		return -EINVAL; | 
 | 968 |  | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 969 | 	/* Initialize clocks */ | 
 | 970 | 	radeon_get_clock_info(rdev->ddev); | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 971 | 	/* initialize memory controller */ | 
 | 972 | 	rs600_mc_init(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 973 | 	rs600_debugfs(rdev); | 
 | 974 | 	/* Fence driver */ | 
 | 975 | 	r = radeon_fence_driver_init(rdev); | 
 | 976 | 	if (r) | 
 | 977 | 		return r; | 
 | 978 | 	r = radeon_irq_kms_init(rdev); | 
 | 979 | 	if (r) | 
 | 980 | 		return r; | 
 | 981 | 	/* Memory manager */ | 
| Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 982 | 	r = radeon_bo_init(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 983 | 	if (r) | 
 | 984 | 		return r; | 
 | 985 | 	r = rs600_gart_init(rdev); | 
 | 986 | 	if (r) | 
 | 987 | 		return r; | 
 | 988 | 	rs600_set_safe_registers(rdev); | 
 | 989 | 	rdev->accel_working = true; | 
 | 990 | 	r = rs600_startup(rdev); | 
 | 991 | 	if (r) { | 
 | 992 | 		/* Somethings want wront with the accel init stop accel */ | 
 | 993 | 		dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 994 | 		r100_cp_fini(rdev); | 
| Alex Deucher | 724c80e | 2010-08-27 18:25:25 -0400 | [diff] [blame] | 995 | 		radeon_wb_fini(rdev); | 
| Jerome Glisse | c010f80 | 2009-09-30 22:09:06 +0200 | [diff] [blame] | 996 | 		r100_ib_fini(rdev); | 
 | 997 | 		rs600_gart_fini(rdev); | 
 | 998 | 		radeon_irq_kms_fini(rdev); | 
 | 999 | 		rdev->accel_working = false; | 
 | 1000 | 	} | 
| Dave Airlie | 3f7dc91a | 2009-08-27 11:10:15 +1000 | [diff] [blame] | 1001 | 	return 0; | 
 | 1002 | } |