| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright 2008 Advanced Micro Devices, Inc. | 
|  | 3 | * Copyright 2008 Red Hat Inc. | 
|  | 4 | * Copyright 2009 Jerome Glisse. | 
|  | 5 | * | 
|  | 6 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | 7 | * copy of this software and associated documentation files (the "Software"), | 
|  | 8 | * to deal in the Software without restriction, including without limitation | 
|  | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|  | 10 | * and/or sell copies of the Software, and to permit persons to whom the | 
|  | 11 | * Software is furnished to do so, subject to the following conditions: | 
|  | 12 | * | 
|  | 13 | * The above copyright notice and this permission notice shall be included in | 
|  | 14 | * all copies or substantial portions of the Software. | 
|  | 15 | * | 
|  | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|  | 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
|  | 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
|  | 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
|  | 22 | * OTHER DEALINGS IN THE SOFTWARE. | 
|  | 23 | * | 
|  | 24 | * Authors: Dave Airlie | 
|  | 25 | *          Alex Deucher | 
|  | 26 | *          Jerome Glisse | 
|  | 27 | */ | 
|  | 28 | #include "drmP.h" | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 29 | #include "radeon.h" | 
| Daniel Vetter | e699037 | 2010-03-11 21:19:17 +0000 | [diff] [blame] | 30 | #include "radeon_asic.h" | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 31 | #include "atom.h" | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 32 | #include "rs690d.h" | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 33 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 34 | static int rs690_mc_wait_for_idle(struct radeon_device *rdev) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 35 | { | 
|  | 36 | unsigned i; | 
|  | 37 | uint32_t tmp; | 
|  | 38 |  | 
|  | 39 | for (i = 0; i < rdev->usec_timeout; i++) { | 
|  | 40 | /* read MC_STATUS */ | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 41 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); | 
|  | 42 | if (G_000090_MC_SYSTEM_IDLE(tmp)) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 43 | return 0; | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 44 | udelay(1); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 45 | } | 
|  | 46 | return -1; | 
|  | 47 | } | 
|  | 48 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 49 | static void rs690_gpu_init(struct radeon_device *rdev) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 50 | { | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 51 | /* FIXME: is this correct ? */ | 
|  | 52 | r420_pipes_init(rdev); | 
|  | 53 | if (rs690_mc_wait_for_idle(rdev)) { | 
|  | 54 | printk(KERN_WARNING "Failed to wait MC idle while " | 
|  | 55 | "programming pipes. Bad things might happen.\n"); | 
|  | 56 | } | 
|  | 57 | } | 
|  | 58 |  | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 59 | union igp_info { | 
|  | 60 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | 
|  | 61 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; | 
|  | 62 | }; | 
|  | 63 |  | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 64 | void rs690_pm_info(struct radeon_device *rdev) | 
|  | 65 | { | 
|  | 66 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 67 | union igp_info *info; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 68 | uint16_t data_offset; | 
|  | 69 | uint8_t frev, crev; | 
|  | 70 | fixed20_12 tmp; | 
|  | 71 |  | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 72 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | 
|  | 73 | &frev, &crev, &data_offset)) { | 
|  | 74 | info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); | 
|  | 75 |  | 
|  | 76 | /* Get various system informations from bios */ | 
|  | 77 | switch (crev) { | 
|  | 78 | case 1: | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 79 | tmp.full = dfixed_const(100); | 
|  | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 
|  | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 
| Alex Deucher | f892034 | 2010-06-30 12:02:03 -0400 | [diff] [blame] | 82 | if (info->info.usK8MemoryClock) | 
|  | 83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 
|  | 84 | else if (rdev->clock.default_mclk) { | 
|  | 85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 
|  | 86 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 
|  | 87 | } else | 
|  | 88 | rdev->pm.igp_system_mclk.full = dfixed_const(400); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 89 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); | 
|  | 90 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 91 | break; | 
|  | 92 | case 2: | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 93 | tmp.full = dfixed_const(100); | 
|  | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 
|  | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 
| Alex Deucher | f892034 | 2010-06-30 12:02:03 -0400 | [diff] [blame] | 96 | if (info->info_v2.ulBootUpUMAClock) | 
|  | 97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 
|  | 98 | else if (rdev->clock.default_mclk) | 
|  | 99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 
|  | 100 | else | 
|  | 101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 
|  | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 
|  | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 
|  | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 106 | break; | 
|  | 107 | default: | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 108 | /* We assume the slower possible clock ie worst case */ | 
| Alex Deucher | f892034 | 2010-06-30 12:02:03 -0400 | [diff] [blame] | 109 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); | 
|  | 110 | rdev->pm.igp_system_mclk.full = dfixed_const(200); | 
|  | 111 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 112 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 
| Alex Deucher | a084e6e | 2010-03-18 01:04:01 -0400 | [diff] [blame] | 113 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 
|  | 114 | break; | 
|  | 115 | } | 
|  | 116 | } else { | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 117 | /* We assume the slower possible clock ie worst case */ | 
| Alex Deucher | f892034 | 2010-06-30 12:02:03 -0400 | [diff] [blame] | 118 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); | 
|  | 119 | rdev->pm.igp_system_mclk.full = dfixed_const(200); | 
|  | 120 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 121 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 122 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 123 | } | 
|  | 124 | /* Compute various bandwidth */ | 
|  | 125 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 126 | tmp.full = dfixed_const(4); | 
|  | 127 | rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 128 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | 
|  | 129 | *              = ht_clk * ht_width / 5 | 
|  | 130 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 131 | tmp.full = dfixed_const(5); | 
|  | 132 | rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 133 | rdev->pm.igp_ht_link_width); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 134 | rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 135 | if (tmp.full < rdev->pm.max_bandwidth.full) { | 
|  | 136 | /* HT link is a limiting factor */ | 
|  | 137 | rdev->pm.max_bandwidth.full = tmp.full; | 
|  | 138 | } | 
|  | 139 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | 
|  | 140 | *                    = (sideport_clk * 14) / 10 | 
|  | 141 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 142 | tmp.full = dfixed_const(14); | 
|  | 143 | rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | 
|  | 144 | tmp.full = dfixed_const(10); | 
|  | 145 | rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 146 | } | 
|  | 147 |  | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 148 | void rs690_mc_init(struct radeon_device *rdev) | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 149 | { | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 150 | u64 base; | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 151 |  | 
|  | 152 | rs400_gart_adjust_size(rdev); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 153 | rdev->mc.vram_is_ddr = true; | 
| Alex Deucher | 722f294 | 2009-12-03 16:18:19 -0500 | [diff] [blame] | 154 | rdev->mc.vram_width = 128; | 
| Dave Airlie | 7a50f01 | 2009-07-21 20:39:30 +1000 | [diff] [blame] | 155 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 
|  | 156 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 
| Jordan Crouse | 01d73a6 | 2010-05-27 13:40:24 -0600 | [diff] [blame] | 157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 
|  | 158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 
| Jerome Glisse | 51e5fcd | 2010-02-19 14:33:54 +0000 | [diff] [blame] | 159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 160 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 
|  | 161 | base = G_000100_MC_FB_START(base) << 16; | 
| Alex Deucher | 06b6476 | 2010-01-05 11:27:29 -0500 | [diff] [blame] | 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 
| Alex Deucher | 4c70b2e | 2010-08-02 19:39:15 -0400 | [diff] [blame] | 163 | rs690_pm_info(rdev); | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 164 | radeon_vram_location(rdev, &rdev->mc, base); | 
| Alex Deucher | 8d369bb | 2010-07-15 10:51:10 -0400 | [diff] [blame] | 165 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 166 | radeon_gtt_location(rdev, &rdev->mc); | 
| Alex Deucher | f47299c | 2010-03-16 20:54:38 -0400 | [diff] [blame] | 167 | radeon_update_bandwidth_info(rdev); | 
| Alex Deucher | 22dd501 | 2009-12-06 19:45:17 -0500 | [diff] [blame] | 168 | } | 
|  | 169 |  | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 170 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | 
|  | 171 | struct drm_display_mode *mode1, | 
|  | 172 | struct drm_display_mode *mode2) | 
|  | 173 | { | 
|  | 174 | u32 tmp; | 
|  | 175 |  | 
|  | 176 | /* | 
|  | 177 | * Line Buffer Setup | 
|  | 178 | * There is a single line buffer shared by both display controllers. | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 179 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 180 | * the display controllers.  The paritioning can either be done | 
|  | 181 | * manually or via one of four preset allocations specified in bits 1:0: | 
|  | 182 | *  0 - line buffer is divided in half and shared between crtc | 
|  | 183 | *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | 
|  | 184 | *  2 - D1 gets the whole buffer | 
|  | 185 | *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 186 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 187 | * allocation mode. In manual allocation mode, D1 always starts at 0, | 
|  | 188 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | 
|  | 189 | */ | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 190 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; | 
|  | 191 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 192 | /* auto */ | 
|  | 193 | if (mode1 && mode2) { | 
|  | 194 | if (mode1->hdisplay > mode2->hdisplay) { | 
|  | 195 | if (mode1->hdisplay > 2560) | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 196 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 197 | else | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 198 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 199 | } else if (mode2->hdisplay > mode1->hdisplay) { | 
|  | 200 | if (mode2->hdisplay > 2560) | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 201 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 202 | else | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 203 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 204 | } else | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 205 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 206 | } else if (mode1) { | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 207 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 208 | } else if (mode2) { | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 209 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 210 | } | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 211 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 212 | } | 
|  | 213 |  | 
|  | 214 | struct rs690_watermark { | 
|  | 215 | u32        lb_request_fifo_depth; | 
|  | 216 | fixed20_12 num_line_pair; | 
|  | 217 | fixed20_12 estimated_width; | 
|  | 218 | fixed20_12 worst_case_latency; | 
|  | 219 | fixed20_12 consumption_rate; | 
|  | 220 | fixed20_12 active_time; | 
|  | 221 | fixed20_12 dbpp; | 
|  | 222 | fixed20_12 priority_mark_max; | 
|  | 223 | fixed20_12 priority_mark; | 
|  | 224 | fixed20_12 sclk; | 
|  | 225 | }; | 
|  | 226 |  | 
|  | 227 | void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | 
|  | 228 | struct radeon_crtc *crtc, | 
|  | 229 | struct rs690_watermark *wm) | 
|  | 230 | { | 
|  | 231 | struct drm_display_mode *mode = &crtc->base.mode; | 
|  | 232 | fixed20_12 a, b, c; | 
|  | 233 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | 
|  | 234 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 235 |  | 
|  | 236 | if (!crtc->base.enabled) { | 
|  | 237 | /* FIXME: wouldn't it better to set priority mark to maximum */ | 
|  | 238 | wm->lb_request_fifo_depth = 4; | 
|  | 239 | return; | 
|  | 240 | } | 
|  | 241 |  | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 242 | if (crtc->vsc.full > dfixed_const(2)) | 
|  | 243 | wm->num_line_pair.full = dfixed_const(2); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 244 | else | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 245 | wm->num_line_pair.full = dfixed_const(1); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 246 |  | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 247 | b.full = dfixed_const(mode->crtc_hdisplay); | 
|  | 248 | c.full = dfixed_const(256); | 
|  | 249 | a.full = dfixed_div(b, c); | 
|  | 250 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); | 
|  | 251 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); | 
|  | 252 | if (a.full < dfixed_const(4)) { | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 253 | wm->lb_request_fifo_depth = 4; | 
|  | 254 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 255 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 256 | } | 
|  | 257 |  | 
|  | 258 | /* Determine consumption rate | 
|  | 259 | *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | 
|  | 260 | *  vtaps = number of vertical taps, | 
|  | 261 | *  vsc = vertical scaling ratio, defined as source/destination | 
|  | 262 | *  hsc = horizontal scaling ration, defined as source/destination | 
|  | 263 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 264 | a.full = dfixed_const(mode->clock); | 
|  | 265 | b.full = dfixed_const(1000); | 
|  | 266 | a.full = dfixed_div(a, b); | 
|  | 267 | pclk.full = dfixed_div(b, a); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 268 | if (crtc->rmx_type != RMX_OFF) { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 269 | b.full = dfixed_const(2); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 270 | if (crtc->vsc.full > b.full) | 
|  | 271 | b.full = crtc->vsc.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 272 | b.full = dfixed_mul(b, crtc->hsc); | 
|  | 273 | c.full = dfixed_const(2); | 
|  | 274 | b.full = dfixed_div(b, c); | 
|  | 275 | consumption_time.full = dfixed_div(pclk, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 276 | } else { | 
|  | 277 | consumption_time.full = pclk.full; | 
|  | 278 | } | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 279 | a.full = dfixed_const(1); | 
|  | 280 | wm->consumption_rate.full = dfixed_div(a, consumption_time); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 281 |  | 
|  | 282 |  | 
|  | 283 | /* Determine line time | 
|  | 284 | *  LineTime = total time for one line of displayhtotal | 
|  | 285 | *  LineTime = total number of horizontal pixels | 
|  | 286 | *  pclk = pixel clock period(ns) | 
|  | 287 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 288 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); | 
|  | 289 | line_time.full = dfixed_mul(a, pclk); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 290 |  | 
|  | 291 | /* Determine active time | 
|  | 292 | *  ActiveTime = time of active region of display within one line, | 
|  | 293 | *  hactive = total number of horizontal active pixels | 
|  | 294 | *  htotal = total number of horizontal pixels | 
|  | 295 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 296 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); | 
|  | 297 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); | 
|  | 298 | wm->active_time.full = dfixed_mul(line_time, b); | 
|  | 299 | wm->active_time.full = dfixed_div(wm->active_time, a); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 300 |  | 
|  | 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ | 
|  | 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | 
| Alex Deucher | 0888e88 | 2010-06-12 11:50:13 -0400 | [diff] [blame] | 303 | if (rdev->mc.igp_sideport_enabled) { | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | 
|  | 305 | rdev->pm.sideport_bandwidth.full) | 
|  | 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 307 | read_delay_latency.full = dfixed_const(370 * 800 * 1000); | 
|  | 308 | read_delay_latency.full = dfixed_div(read_delay_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 309 | rdev->pm.igp_sideport_mclk); | 
|  | 310 | } else { | 
|  | 311 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | 
|  | 312 | rdev->pm.k8_bandwidth.full) | 
|  | 313 | rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; | 
|  | 314 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | 
|  | 315 | rdev->pm.ht_bandwidth.full) | 
|  | 316 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 317 | read_delay_latency.full = dfixed_const(5000); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 318 | } | 
|  | 319 |  | 
|  | 320 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 321 | a.full = dfixed_const(16); | 
|  | 322 | rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); | 
|  | 323 | a.full = dfixed_const(1000); | 
|  | 324 | rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 325 | /* Determine chunk time | 
|  | 326 | * ChunkTime = the time it takes the DCP to send one chunk of data | 
|  | 327 | * to the LB which consists of pipeline delay and inter chunk gap | 
|  | 328 | * sclk = system clock(ns) | 
|  | 329 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 330 | a.full = dfixed_const(256 * 13); | 
|  | 331 | chunk_time.full = dfixed_mul(rdev->pm.sclk, a); | 
|  | 332 | a.full = dfixed_const(10); | 
|  | 333 | chunk_time.full = dfixed_div(chunk_time, a); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 334 |  | 
|  | 335 | /* Determine the worst case latency | 
|  | 336 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | 
|  | 337 | * WorstCaseLatency = worst case time from urgent to when the MC starts | 
|  | 338 | *                    to return data | 
|  | 339 | * READ_DELAY_IDLE_MAX = constant of 1us | 
|  | 340 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | 
|  | 341 | *             which consists of pipeline delay and inter chunk gap | 
|  | 342 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 343 | if (dfixed_trunc(wm->num_line_pair) > 1) { | 
|  | 344 | a.full = dfixed_const(3); | 
|  | 345 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 346 | wm->worst_case_latency.full += read_delay_latency.full; | 
|  | 347 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 348 | a.full = dfixed_const(2); | 
|  | 349 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 350 | wm->worst_case_latency.full += read_delay_latency.full; | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | /* Determine the tolerable latency | 
|  | 354 | * TolerableLatency = Any given request has only 1 line time | 
|  | 355 | *                    for the data to be returned | 
|  | 356 | * LBRequestFifoDepth = Number of chunk requests the LB can | 
|  | 357 | *                      put into the request FIFO for a display | 
|  | 358 | *  LineTime = total time for one line of display | 
|  | 359 | *  ChunkTime = the time it takes the DCP to send one chunk | 
|  | 360 | *              of data to the LB which consists of | 
|  | 361 | *  pipeline delay and inter chunk gap | 
|  | 362 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 363 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 364 | tolerable_latency.full = line_time.full; | 
|  | 365 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 366 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 367 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 368 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 369 | tolerable_latency.full = line_time.full - tolerable_latency.full; | 
|  | 370 | } | 
|  | 371 | /* We assume worst case 32bits (4 bytes) */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 372 | wm->dbpp.full = dfixed_const(4 * 8); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 373 |  | 
|  | 374 | /* Determine the maximum priority mark | 
|  | 375 | *  width = viewport width in pixels | 
|  | 376 | */ | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 377 | a.full = dfixed_const(16); | 
|  | 378 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); | 
|  | 379 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); | 
|  | 380 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 381 |  | 
|  | 382 | /* Determine estimated width */ | 
|  | 383 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 384 | estimated_width.full = dfixed_div(estimated_width, consumption_time); | 
|  | 385 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | 
|  | 386 | wm->priority_mark.full = dfixed_const(10); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 387 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 388 | a.full = dfixed_const(16); | 
|  | 389 | wm->priority_mark.full = dfixed_div(estimated_width, a); | 
|  | 390 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 391 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | 
|  | 392 | } | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | void rs690_bandwidth_update(struct radeon_device *rdev) | 
|  | 396 | { | 
|  | 397 | struct drm_display_mode *mode0 = NULL; | 
|  | 398 | struct drm_display_mode *mode1 = NULL; | 
|  | 399 | struct rs690_watermark wm0; | 
|  | 400 | struct rs690_watermark wm1; | 
| Alex Deucher | e06b14e | 2010-08-02 12:13:46 -0400 | [diff] [blame] | 401 | u32 tmp; | 
|  | 402 | u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); | 
|  | 403 | u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 404 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | 
|  | 405 | fixed20_12 a, b; | 
|  | 406 |  | 
| Alex Deucher | f46c012 | 2010-03-31 00:33:27 -0400 | [diff] [blame] | 407 | radeon_update_display_priority(rdev); | 
|  | 408 |  | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 409 | if (rdev->mode_info.crtcs[0]->base.enabled) | 
|  | 410 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | 
|  | 411 | if (rdev->mode_info.crtcs[1]->base.enabled) | 
|  | 412 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | 
|  | 413 | /* | 
|  | 414 | * Set display0/1 priority up in the memory controller for | 
|  | 415 | * modes if the user specifies HIGH for displaypriority | 
|  | 416 | * option. | 
|  | 417 | */ | 
| Alex Deucher | f46c012 | 2010-03-31 00:33:27 -0400 | [diff] [blame] | 418 | if ((rdev->disp_priority == 2) && | 
|  | 419 | ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 420 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); | 
|  | 421 | tmp &= C_000104_MC_DISP0R_INIT_LAT; | 
|  | 422 | tmp &= C_000104_MC_DISP1R_INIT_LAT; | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 423 | if (mode0) | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 424 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); | 
|  | 425 | if (mode1) | 
|  | 426 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); | 
|  | 427 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 428 | } | 
|  | 429 | rs690_line_buffer_adjust(rdev, mode0, mode1); | 
|  | 430 |  | 
|  | 431 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 432 | WREG32(R_006C9C_DCP_CONTROL, 0); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 433 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 434 | WREG32(R_006C9C_DCP_CONTROL, 2); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 435 |  | 
|  | 436 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | 
|  | 437 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | 
|  | 438 |  | 
|  | 439 | tmp = (wm0.lb_request_fifo_depth - 1); | 
|  | 440 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 441 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 442 |  | 
|  | 443 | if (mode0 && mode1) { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 444 | if (dfixed_trunc(wm0.dbpp) > 64) | 
|  | 445 | a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 446 | else | 
|  | 447 | a.full = wm0.num_line_pair.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 448 | if (dfixed_trunc(wm1.dbpp) > 64) | 
|  | 449 | b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 450 | else | 
|  | 451 | b.full = wm1.num_line_pair.full; | 
|  | 452 | a.full += b.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 453 | fill_rate.full = dfixed_div(wm0.sclk, a); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 454 | if (wm0.consumption_rate.full > fill_rate.full) { | 
|  | 455 | b.full = wm0.consumption_rate.full - fill_rate.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 456 | b.full = dfixed_mul(b, wm0.active_time); | 
|  | 457 | a.full = dfixed_mul(wm0.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 458 | wm0.consumption_rate); | 
|  | 459 | a.full = a.full + b.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 460 | b.full = dfixed_const(16 * 1000); | 
|  | 461 | priority_mark02.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 462 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 463 | a.full = dfixed_mul(wm0.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 464 | wm0.consumption_rate); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 465 | b.full = dfixed_const(16 * 1000); | 
|  | 466 | priority_mark02.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 467 | } | 
|  | 468 | if (wm1.consumption_rate.full > fill_rate.full) { | 
|  | 469 | b.full = wm1.consumption_rate.full - fill_rate.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 470 | b.full = dfixed_mul(b, wm1.active_time); | 
|  | 471 | a.full = dfixed_mul(wm1.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 472 | wm1.consumption_rate); | 
|  | 473 | a.full = a.full + b.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 474 | b.full = dfixed_const(16 * 1000); | 
|  | 475 | priority_mark12.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 476 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 477 | a.full = dfixed_mul(wm1.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 478 | wm1.consumption_rate); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 479 | b.full = dfixed_const(16 * 1000); | 
|  | 480 | priority_mark12.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 481 | } | 
|  | 482 | if (wm0.priority_mark.full > priority_mark02.full) | 
|  | 483 | priority_mark02.full = wm0.priority_mark.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 484 | if (dfixed_trunc(priority_mark02) < 0) | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 485 | priority_mark02.full = 0; | 
|  | 486 | if (wm0.priority_mark_max.full > priority_mark02.full) | 
|  | 487 | priority_mark02.full = wm0.priority_mark_max.full; | 
|  | 488 | if (wm1.priority_mark.full > priority_mark12.full) | 
|  | 489 | priority_mark12.full = wm1.priority_mark.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 490 | if (dfixed_trunc(priority_mark12) < 0) | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 491 | priority_mark12.full = 0; | 
|  | 492 | if (wm1.priority_mark_max.full > priority_mark12.full) | 
|  | 493 | priority_mark12.full = wm1.priority_mark_max.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 494 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); | 
|  | 495 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); | 
| Alex Deucher | f46c012 | 2010-03-31 00:33:27 -0400 | [diff] [blame] | 496 | if (rdev->disp_priority == 2) { | 
|  | 497 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 
|  | 498 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 
|  | 499 | } | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 500 | } else if (mode0) { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 501 | if (dfixed_trunc(wm0.dbpp) > 64) | 
|  | 502 | a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 503 | else | 
|  | 504 | a.full = wm0.num_line_pair.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 505 | fill_rate.full = dfixed_div(wm0.sclk, a); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 506 | if (wm0.consumption_rate.full > fill_rate.full) { | 
|  | 507 | b.full = wm0.consumption_rate.full - fill_rate.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 508 | b.full = dfixed_mul(b, wm0.active_time); | 
|  | 509 | a.full = dfixed_mul(wm0.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 510 | wm0.consumption_rate); | 
|  | 511 | a.full = a.full + b.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 512 | b.full = dfixed_const(16 * 1000); | 
|  | 513 | priority_mark02.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 514 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 515 | a.full = dfixed_mul(wm0.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 516 | wm0.consumption_rate); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 517 | b.full = dfixed_const(16 * 1000); | 
|  | 518 | priority_mark02.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 519 | } | 
|  | 520 | if (wm0.priority_mark.full > priority_mark02.full) | 
|  | 521 | priority_mark02.full = wm0.priority_mark.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 522 | if (dfixed_trunc(priority_mark02) < 0) | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 523 | priority_mark02.full = 0; | 
|  | 524 | if (wm0.priority_mark_max.full > priority_mark02.full) | 
|  | 525 | priority_mark02.full = wm0.priority_mark_max.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 526 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); | 
| Alex Deucher | f46c012 | 2010-03-31 00:33:27 -0400 | [diff] [blame] | 527 | if (rdev->disp_priority == 2) | 
|  | 528 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 
| Alex Deucher | e06b14e | 2010-08-02 12:13:46 -0400 | [diff] [blame] | 529 | } else if (mode1) { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 530 | if (dfixed_trunc(wm1.dbpp) > 64) | 
|  | 531 | a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 532 | else | 
|  | 533 | a.full = wm1.num_line_pair.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 534 | fill_rate.full = dfixed_div(wm1.sclk, a); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 535 | if (wm1.consumption_rate.full > fill_rate.full) { | 
|  | 536 | b.full = wm1.consumption_rate.full - fill_rate.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 537 | b.full = dfixed_mul(b, wm1.active_time); | 
|  | 538 | a.full = dfixed_mul(wm1.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 539 | wm1.consumption_rate); | 
|  | 540 | a.full = a.full + b.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 541 | b.full = dfixed_const(16 * 1000); | 
|  | 542 | priority_mark12.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 543 | } else { | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 544 | a.full = dfixed_mul(wm1.worst_case_latency, | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 545 | wm1.consumption_rate); | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 546 | b.full = dfixed_const(16 * 1000); | 
|  | 547 | priority_mark12.full = dfixed_div(a, b); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 548 | } | 
|  | 549 | if (wm1.priority_mark.full > priority_mark12.full) | 
|  | 550 | priority_mark12.full = wm1.priority_mark.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 551 | if (dfixed_trunc(priority_mark12) < 0) | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 552 | priority_mark12.full = 0; | 
|  | 553 | if (wm1.priority_mark_max.full > priority_mark12.full) | 
|  | 554 | priority_mark12.full = wm1.priority_mark_max.full; | 
| Ben Skeggs | 68adac5 | 2010-04-28 11:46:42 +1000 | [diff] [blame] | 555 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); | 
| Alex Deucher | f46c012 | 2010-03-31 00:33:27 -0400 | [diff] [blame] | 556 | if (rdev->disp_priority == 2) | 
|  | 557 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 558 | } | 
| Alex Deucher | e06b14e | 2010-08-02 12:13:46 -0400 | [diff] [blame] | 559 |  | 
|  | 560 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | 
|  | 561 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); | 
|  | 562 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | 
|  | 563 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | 
| Jerome Glisse | c93bb85 | 2009-07-13 21:04:08 +0200 | [diff] [blame] | 564 | } | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 565 |  | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 566 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 
|  | 567 | { | 
|  | 568 | uint32_t r; | 
|  | 569 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 570 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); | 
|  | 571 | r = RREG32(R_00007C_MC_DATA); | 
|  | 572 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 573 | return r; | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 
|  | 577 | { | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 578 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | | 
|  | 579 | S_000078_MC_IND_WR_EN(1)); | 
|  | 580 | WREG32(R_00007C_MC_DATA, v); | 
|  | 581 | WREG32(R_000078_MC_INDEX, 0x7F); | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | void rs690_mc_program(struct radeon_device *rdev) | 
|  | 585 | { | 
|  | 586 | struct rv515_mc_save save; | 
|  | 587 |  | 
|  | 588 | /* Stops all mc clients */ | 
|  | 589 | rv515_mc_stop(rdev, &save); | 
|  | 590 |  | 
|  | 591 | /* Wait for mc idle */ | 
|  | 592 | if (rs690_mc_wait_for_idle(rdev)) | 
|  | 593 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | 
|  | 594 | /* Program MC, should be a 32bits limited address space */ | 
|  | 595 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, | 
|  | 596 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | | 
|  | 597 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 
|  | 598 | WREG32(R_000134_HDP_FB_LOCATION, | 
|  | 599 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | 
|  | 600 |  | 
|  | 601 | rv515_mc_resume(rdev, &save); | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | static int rs690_startup(struct radeon_device *rdev) | 
|  | 605 | { | 
|  | 606 | int r; | 
|  | 607 |  | 
|  | 608 | rs690_mc_program(rdev); | 
|  | 609 | /* Resume clock */ | 
|  | 610 | rv515_clock_startup(rdev); | 
|  | 611 | /* Initialize GPU configuration (# pipes, ...) */ | 
|  | 612 | rs690_gpu_init(rdev); | 
|  | 613 | /* Initialize GART (initialize after TTM so we can allocate | 
|  | 614 | * memory through TTM but finalize after TTM) */ | 
|  | 615 | r = rs400_gart_enable(rdev); | 
|  | 616 | if (r) | 
|  | 617 | return r; | 
|  | 618 | /* Enable IRQ */ | 
| Jerome Glisse | ac447df | 2009-09-30 22:18:43 +0200 | [diff] [blame] | 619 | rs600_irq_set(rdev); | 
| Jerome Glisse | cafe660 | 2010-01-07 12:39:21 +0100 | [diff] [blame] | 620 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 621 | /* 1M ring buffer */ | 
|  | 622 | r = r100_cp_init(rdev, 1024 * 1024); | 
|  | 623 | if (r) { | 
|  | 624 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 
|  | 625 | return r; | 
|  | 626 | } | 
|  | 627 | r = r100_wb_init(rdev); | 
|  | 628 | if (r) | 
|  | 629 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | 
|  | 630 | r = r100_ib_init(rdev); | 
|  | 631 | if (r) { | 
|  | 632 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 
|  | 633 | return r; | 
|  | 634 | } | 
| Rafał Miłecki | fe50ac7 | 2010-06-19 12:24:57 +0200 | [diff] [blame] | 635 |  | 
|  | 636 | r = r600_audio_init(rdev); | 
|  | 637 | if (r) { | 
|  | 638 | dev_err(rdev->dev, "failed initializing audio\n"); | 
|  | 639 | return r; | 
|  | 640 | } | 
|  | 641 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 642 | return 0; | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | int rs690_resume(struct radeon_device *rdev) | 
|  | 646 | { | 
|  | 647 | /* Make sur GART are not working */ | 
|  | 648 | rs400_gart_disable(rdev); | 
|  | 649 | /* Resume clock before doing reset */ | 
|  | 650 | rv515_clock_startup(rdev); | 
|  | 651 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 
| Jerome Glisse | a2d07b7 | 2010-03-09 14:45:11 +0000 | [diff] [blame] | 652 | if (radeon_asic_reset(rdev)) { | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 653 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | 
|  | 654 | RREG32(R_000E40_RBBM_STATUS), | 
|  | 655 | RREG32(R_0007C0_CP_STAT)); | 
|  | 656 | } | 
|  | 657 | /* post */ | 
|  | 658 | atom_asic_init(rdev->mode_info.atom_context); | 
|  | 659 | /* Resume clock after posting */ | 
|  | 660 | rv515_clock_startup(rdev); | 
| Dave Airlie | 550e2d9 | 2009-12-09 14:15:38 +1000 | [diff] [blame] | 661 | /* Initialize surface registers */ | 
|  | 662 | radeon_surface_init(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 663 | return rs690_startup(rdev); | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | int rs690_suspend(struct radeon_device *rdev) | 
|  | 667 | { | 
| Rafał Miłecki | fe50ac7 | 2010-06-19 12:24:57 +0200 | [diff] [blame] | 668 | r600_audio_fini(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 669 | r100_cp_disable(rdev); | 
|  | 670 | r100_wb_disable(rdev); | 
| Jerome Glisse | ac447df | 2009-09-30 22:18:43 +0200 | [diff] [blame] | 671 | rs600_irq_disable(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 672 | rs400_gart_disable(rdev); | 
|  | 673 | return 0; | 
|  | 674 | } | 
|  | 675 |  | 
|  | 676 | void rs690_fini(struct radeon_device *rdev) | 
|  | 677 | { | 
| Rafał Miłecki | fe50ac7 | 2010-06-19 12:24:57 +0200 | [diff] [blame] | 678 | r600_audio_fini(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 679 | r100_cp_fini(rdev); | 
|  | 680 | r100_wb_fini(rdev); | 
|  | 681 | r100_ib_fini(rdev); | 
|  | 682 | radeon_gem_fini(rdev); | 
|  | 683 | rs400_gart_fini(rdev); | 
|  | 684 | radeon_irq_kms_fini(rdev); | 
|  | 685 | radeon_fence_driver_fini(rdev); | 
| Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 686 | radeon_bo_fini(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 687 | radeon_atombios_fini(rdev); | 
|  | 688 | kfree(rdev->bios); | 
|  | 689 | rdev->bios = NULL; | 
|  | 690 | } | 
|  | 691 |  | 
|  | 692 | int rs690_init(struct radeon_device *rdev) | 
|  | 693 | { | 
|  | 694 | int r; | 
|  | 695 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 696 | /* Disable VGA */ | 
|  | 697 | rv515_vga_render_disable(rdev); | 
|  | 698 | /* Initialize scratch registers */ | 
|  | 699 | radeon_scratch_init(rdev); | 
|  | 700 | /* Initialize surface registers */ | 
|  | 701 | radeon_surface_init(rdev); | 
| Dave Airlie | 4c712e6 | 2010-07-15 12:13:50 +1000 | [diff] [blame] | 702 | /* restore some register to sane defaults */ | 
|  | 703 | r100_restore_sanity(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 704 | /* TODO: disable VGA need to use VGA request */ | 
|  | 705 | /* BIOS*/ | 
|  | 706 | if (!radeon_get_bios(rdev)) { | 
|  | 707 | if (ASIC_IS_AVIVO(rdev)) | 
|  | 708 | return -EINVAL; | 
|  | 709 | } | 
|  | 710 | if (rdev->is_atom_bios) { | 
|  | 711 | r = radeon_atombios_init(rdev); | 
|  | 712 | if (r) | 
|  | 713 | return r; | 
|  | 714 | } else { | 
|  | 715 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | 
|  | 716 | return -EINVAL; | 
|  | 717 | } | 
|  | 718 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 
| Jerome Glisse | a2d07b7 | 2010-03-09 14:45:11 +0000 | [diff] [blame] | 719 | if (radeon_asic_reset(rdev)) { | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 720 | dev_warn(rdev->dev, | 
|  | 721 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | 
|  | 722 | RREG32(R_000E40_RBBM_STATUS), | 
|  | 723 | RREG32(R_0007C0_CP_STAT)); | 
|  | 724 | } | 
|  | 725 | /* check if cards are posted or not */ | 
| Dave Airlie | 72542d7 | 2009-12-01 14:06:31 +1000 | [diff] [blame] | 726 | if (radeon_boot_test_post_card(rdev) == false) | 
|  | 727 | return -EINVAL; | 
|  | 728 |  | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 729 | /* Initialize clocks */ | 
|  | 730 | radeon_get_clock_info(rdev->ddev); | 
| Jerome Glisse | d594e46 | 2010-02-17 21:54:29 +0000 | [diff] [blame] | 731 | /* initialize memory controller */ | 
|  | 732 | rs690_mc_init(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 733 | rv515_debugfs(rdev); | 
|  | 734 | /* Fence driver */ | 
|  | 735 | r = radeon_fence_driver_init(rdev); | 
|  | 736 | if (r) | 
|  | 737 | return r; | 
|  | 738 | r = radeon_irq_kms_init(rdev); | 
|  | 739 | if (r) | 
|  | 740 | return r; | 
|  | 741 | /* Memory manager */ | 
| Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 742 | r = radeon_bo_init(rdev); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 743 | if (r) | 
|  | 744 | return r; | 
|  | 745 | r = rs400_gart_init(rdev); | 
|  | 746 | if (r) | 
|  | 747 | return r; | 
|  | 748 | rs600_set_safe_registers(rdev); | 
|  | 749 | rdev->accel_working = true; | 
|  | 750 | r = rs690_startup(rdev); | 
|  | 751 | if (r) { | 
|  | 752 | /* Somethings want wront with the accel init stop accel */ | 
|  | 753 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 
| Jerome Glisse | 3bc6853 | 2009-10-01 09:39:24 +0200 | [diff] [blame] | 754 | r100_cp_fini(rdev); | 
|  | 755 | r100_wb_fini(rdev); | 
|  | 756 | r100_ib_fini(rdev); | 
|  | 757 | rs400_gart_fini(rdev); | 
|  | 758 | radeon_irq_kms_fini(rdev); | 
|  | 759 | rdev->accel_working = false; | 
|  | 760 | } | 
|  | 761 | return 0; | 
| Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 762 | } |