blob: 59065ba7c3420535f8e10b27711b80a4c0c7b2fb [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +100028#include <linux/firmware.h>
29#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/drmP.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000033#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/radeon_drm.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100035#include "rv770d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020037#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038
Jerome Glisse3ce0a232009-09-08 10:10:24 +100039#define R700_PFP_UCODE_SIZE 848
40#define R700_PM4_UCODE_SIZE 1360
Jerome Glisse771fe6b2009-06-05 14:42:42 +020041
Jerome Glisse3ce0a232009-09-08 10:10:24 +100042static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -050044static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
Christian Königef0e6e62013-04-08 12:41:35 +020045int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
46
47static int rv770_uvd_calc_post_div(unsigned target_freq,
48 unsigned vco_freq,
49 unsigned *div)
50{
51 /* Fclk = Fvco / PDIV */
52 *div = vco_freq / target_freq;
53
54 /* we alway need a frequency less than or equal the target */
55 if ((vco_freq / *div) > target_freq)
56 *div += 1;
57
58 /* out of range ? */
59 if (*div > 30)
60 return -1; /* forget it */
61
62 *div -= 1;
63 return vco_freq / (*div + 1);
64}
65
66static int rv770_uvd_send_upll_ctlreq(struct radeon_device *rdev)
67{
68 unsigned i;
69
70 /* assert UPLL_CTLREQ */
71 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
72
73 /* wait for CTLACK and CTLACK2 to get asserted */
74 for (i = 0; i < 100; ++i) {
75 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
76 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
77 break;
78 mdelay(10);
79 }
80 if (i == 100)
81 return -ETIMEDOUT;
82
83 /* deassert UPLL_CTLREQ */
84 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
85
86 return 0;
87}
88
89int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
90{
91 /* start off with something large */
92 int optimal_diff_score = 0x7FFFFFF;
93 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
94 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
95 unsigned vco_freq, vco_min = 50000, vco_max = 160000;
96 unsigned ref_freq = rdev->clock.spll.reference_freq;
97 int r;
98
99 /* RV740 uses evergreen uvd clk programming */
100 if (rdev->family == CHIP_RV740)
101 return evergreen_set_uvd_clocks(rdev, vclk, dclk);
102
Christian König4ed10832013-04-18 15:25:58 +0200103 /* bypass vclk and dclk with bclk */
104 WREG32_P(CG_UPLL_FUNC_CNTL_2,
105 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
106 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
107
108 if (!vclk || !dclk) {
109 /* keep the Bypass mode, put PLL to sleep */
110 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
111 return 0;
112 }
113
Christian Königef0e6e62013-04-08 12:41:35 +0200114 /* loop through vco from low to high */
115 vco_min = max(max(vco_min, vclk), dclk);
116 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 500) {
117 uint64_t fb_div = (uint64_t)vco_freq * 43663;
118 int calc_clk, diff_score, diff_vclk, diff_dclk;
119 unsigned vclk_div, dclk_div;
120
121 do_div(fb_div, ref_freq);
122 fb_div |= 1;
123
124 /* fb div out of range ? */
125 if (fb_div > 0x03FFFFFF)
126 break; /* it can oly get worse */
127
128 /* calc vclk with current vco freq. */
129 calc_clk = rv770_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
130 if (calc_clk == -1)
131 break; /* vco is too big, it has to stop. */
132 diff_vclk = vclk - calc_clk;
133
134 /* calc dclk with current vco freq. */
135 calc_clk = rv770_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
136 if (calc_clk == -1)
137 break; /* vco is too big, it has to stop. */
138 diff_dclk = dclk - calc_clk;
139
140 /* determine if this vco setting is better than current optimal settings */
141 diff_score = abs(diff_vclk) + abs(diff_dclk);
142 if (diff_score < optimal_diff_score) {
143 optimal_fb_div = fb_div;
144 optimal_vclk_div = vclk_div;
145 optimal_dclk_div = dclk_div;
146 optimal_vco_freq = vco_freq;
147 optimal_diff_score = diff_score;
148 if (optimal_diff_score == 0)
149 break; /* it can't get better than this */
150 }
151 }
152
Christian Königef0e6e62013-04-08 12:41:35 +0200153 /* set UPLL_FB_DIV to 0x50000 */
154 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
155
Christian König4ed10832013-04-18 15:25:58 +0200156 /* deassert UPLL_RESET and UPLL_SLEEP */
157 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
Christian Königef0e6e62013-04-08 12:41:35 +0200158
159 /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
160 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
161 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
162
163 r = rv770_uvd_send_upll_ctlreq(rdev);
164 if (r)
165 return r;
166
167 /* assert PLL_RESET */
168 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
169
170 /* set the required FB_DIV, REF_DIV, Post divder values */
171 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
172 WREG32_P(CG_UPLL_FUNC_CNTL_2,
173 UPLL_SW_HILEN(optimal_vclk_div >> 1) |
174 UPLL_SW_LOLEN((optimal_vclk_div >> 1) + (optimal_vclk_div & 1)) |
175 UPLL_SW_HILEN2(optimal_dclk_div >> 1) |
176 UPLL_SW_LOLEN2((optimal_dclk_div >> 1) + (optimal_dclk_div & 1)),
177 ~UPLL_SW_MASK);
178
179 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div),
180 ~UPLL_FB_DIV_MASK);
181
182 /* give the PLL some time to settle */
183 mdelay(15);
184
185 /* deassert PLL_RESET */
186 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
187
188 mdelay(15);
189
190 /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
191 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
192 WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
193
194 r = rv770_uvd_send_upll_ctlreq(rdev);
195 if (r)
196 return r;
197
198 /* switch VCLK and DCLK selection */
199 WREG32_P(CG_UPLL_FUNC_CNTL_2,
200 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
201 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
202
203 mdelay(100);
204
205 return 0;
206}
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000207
Alex Deucher454d2e22013-02-14 10:04:02 -0500208#define PCIE_BUS_CLK 10000
209#define TCLK (PCIE_BUS_CLK / 10)
210
211/**
212 * rv770_get_xclk - get the xclk
213 *
214 * @rdev: radeon_device pointer
215 *
216 * Returns the reference clock used by the gfx engine
217 * (r7xx-cayman).
218 */
219u32 rv770_get_xclk(struct radeon_device *rdev)
220{
221 u32 reference_clock = rdev->clock.spll.reference_freq;
222 u32 tmp = RREG32(CG_CLKPIN_CNTL);
223
224 if (tmp & MUX_TCLK_TO_XCLK)
225 return TCLK;
226
227 if (tmp & XTALIN_DIVIDE)
228 return reference_clock / 4;
229
230 return reference_clock;
231}
232
Christian Königf2ba57b2013-04-08 12:41:29 +0200233int rv770_uvd_resume(struct radeon_device *rdev)
234{
235 uint64_t addr;
236 uint32_t chip_id, size;
237 int r;
238
239 r = radeon_uvd_resume(rdev);
240 if (r)
241 return r;
242
243 /* programm the VCPU memory controller bits 0-27 */
244 addr = rdev->uvd.gpu_addr >> 3;
245 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
246 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
247 WREG32(UVD_VCPU_CACHE_SIZE0, size);
248
249 addr += size;
250 size = RADEON_UVD_STACK_SIZE >> 3;
251 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
252 WREG32(UVD_VCPU_CACHE_SIZE1, size);
253
254 addr += size;
255 size = RADEON_UVD_HEAP_SIZE >> 3;
256 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
257 WREG32(UVD_VCPU_CACHE_SIZE2, size);
258
259 /* bits 28-31 */
260 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
261 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
262
263 /* bits 32-39 */
264 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
265 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
266
267 /* tell firmware which hardware it is running on */
268 switch (rdev->family) {
269 default:
270 return -EINVAL;
271 case CHIP_RV710:
272 chip_id = 0x01000005;
273 break;
274 case CHIP_RV730:
275 chip_id = 0x01000006;
276 break;
277 case CHIP_RV740:
278 chip_id = 0x01000007;
279 break;
280 case CHIP_CYPRESS:
281 case CHIP_HEMLOCK:
282 chip_id = 0x01000008;
283 break;
284 case CHIP_JUNIPER:
285 chip_id = 0x01000009;
286 break;
287 case CHIP_REDWOOD:
288 chip_id = 0x0100000a;
289 break;
290 case CHIP_CEDAR:
291 chip_id = 0x0100000b;
292 break;
293 case CHIP_SUMO:
294 chip_id = 0x0100000c;
295 break;
296 case CHIP_SUMO2:
297 chip_id = 0x0100000d;
298 break;
299 case CHIP_PALM:
300 chip_id = 0x0100000e;
301 break;
302 case CHIP_CAYMAN:
303 chip_id = 0x0100000f;
304 break;
305 case CHIP_BARTS:
306 chip_id = 0x01000010;
307 break;
308 case CHIP_TURKS:
309 chip_id = 0x01000011;
310 break;
311 case CHIP_CAICOS:
312 chip_id = 0x01000012;
313 break;
314 case CHIP_TAHITI:
315 chip_id = 0x01000014;
316 break;
317 case CHIP_VERDE:
318 chip_id = 0x01000015;
319 break;
320 case CHIP_PITCAIRN:
321 chip_id = 0x01000016;
322 break;
323 case CHIP_ARUBA:
324 chip_id = 0x01000017;
325 break;
326 }
327 WREG32(UVD_VCPU_CHIP_ID, chip_id);
328
329 return 0;
330}
331
Alex Deucher6f34be52010-11-21 10:59:01 -0500332u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
333{
334 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
335 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500336 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500337
338 /* Lock the graphics update lock */
339 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
340 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
341
342 /* update the scanout addresses */
343 if (radeon_crtc->crtc_id) {
344 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
345 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
346 } else {
347 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
348 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
349 }
350 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
351 (u32)crtc_base);
352 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
353 (u32)crtc_base);
354
355 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500356 for (i = 0; i < rdev->usec_timeout; i++) {
357 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
358 break;
359 udelay(1);
360 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500361 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
362
363 /* Unlock the lock, so double-buffering can take place inside vblank */
364 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
365 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
366
367 /* Return current update_pending status: */
368 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
369}
370
Alex Deucher21a81222010-07-02 12:58:16 -0400371/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500372int rv770_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400373{
374 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
375 ASIC_T_SHIFT;
Alex Deucher20d391d2011-02-01 16:12:34 -0500376 int actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400377
Alex Deucher20d391d2011-02-01 16:12:34 -0500378 if (temp & 0x400)
379 actual_temp = -256;
380 else if (temp & 0x200)
381 actual_temp = 255;
382 else if (temp & 0x100) {
383 actual_temp = temp & 0x1ff;
384 actual_temp |= ~0x1ff;
385 } else
386 actual_temp = temp & 0xff;
Alex Deucher21a81222010-07-02 12:58:16 -0400387
Alex Deucher20d391d2011-02-01 16:12:34 -0500388 return (actual_temp * 1000) / 2;
Alex Deucher21a81222010-07-02 12:58:16 -0400389}
390
Alex Deucher49e02b72010-04-23 17:57:27 -0400391void rv770_pm_misc(struct radeon_device *rdev)
392{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400393 int req_ps_idx = rdev->pm.requested_power_state_index;
394 int req_cm_idx = rdev->pm.requested_clock_mode_index;
395 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
396 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher4d601732010-06-07 18:15:18 -0400397
398 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
Alex Deuchera377e182011-06-20 13:00:31 -0400399 /* 0xff01 is a flag rather then an actual voltage */
400 if (voltage->voltage == 0xff01)
401 return;
Alex Deucher4d601732010-06-07 18:15:18 -0400402 if (voltage->voltage != rdev->pm.current_vddc) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400403 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400404 rdev->pm.current_vddc = voltage->voltage;
Rafał Miłecki0fcbe942010-06-07 18:25:21 -0400405 DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
Alex Deucher4d601732010-06-07 18:15:18 -0400406 }
407 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400408}
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000409
410/*
411 * GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200412 */
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400413static int rv770_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000414{
415 u32 tmp;
416 int r, i;
417
Jerome Glissec9a1be92011-11-03 11:16:49 -0400418 if (rdev->gart.robj == NULL) {
Jerome Glisse4aac0472009-09-14 18:29:49 +0200419 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
420 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000421 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200422 r = radeon_gart_table_vram_pin(rdev);
423 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000424 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000425 radeon_gart_restore(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000426 /* Setup L2 cache */
427 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
428 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
429 EFFECTIVE_L2_QUEUE_SIZE(7));
430 WREG32(VM_L2_CNTL2, 0);
431 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
432 /* Setup TLB control */
433 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
434 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
435 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
436 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
437 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
438 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
439 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucher0b8c30b2012-05-31 18:54:43 -0400440 if (rdev->family == CHIP_RV740)
441 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000442 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
443 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
444 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
445 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
446 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200447 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000448 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
449 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
450 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
451 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
452 (u32)(rdev->dummy_page.addr >> 12));
453 for (i = 1; i < 7; i++)
454 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
455
456 r600_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000457 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
458 (unsigned)(rdev->mc.gtt_size >> 20),
459 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000460 rdev->gart.ready = true;
461 return 0;
462}
463
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400464static void rv770_pcie_gart_disable(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000465{
466 u32 tmp;
Jerome Glissec9a1be92011-11-03 11:16:49 -0400467 int i;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000468
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000469 /* Disable all tables */
470 for (i = 0; i < 7; i++)
471 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
472
473 /* Setup L2 cache */
474 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
475 EFFECTIVE_L2_QUEUE_SIZE(7));
476 WREG32(VM_L2_CNTL2, 0);
477 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
478 /* Setup TLB control */
479 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
480 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
481 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
482 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
483 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
484 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
485 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
486 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400487 radeon_gart_table_vram_unpin(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200488}
489
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400490static void rv770_pcie_gart_fini(struct radeon_device *rdev)
Jerome Glisse4aac0472009-09-14 18:29:49 +0200491{
Jerome Glissef9274562010-03-17 14:44:29 +0000492 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200493 rv770_pcie_gart_disable(rdev);
494 radeon_gart_table_vram_free(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000495}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200496
497
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400498static void rv770_agp_enable(struct radeon_device *rdev)
Jerome Glisse1a029b72009-10-06 19:04:30 +0200499{
500 u32 tmp;
501 int i;
502
503 /* Setup L2 cache */
504 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
505 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
506 EFFECTIVE_L2_QUEUE_SIZE(7));
507 WREG32(VM_L2_CNTL2, 0);
508 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
509 /* Setup TLB control */
510 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
511 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
512 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
513 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
514 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
515 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
516 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
517 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
518 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
519 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
520 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
521 for (i = 0; i < 7; i++)
522 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
523}
524
Jerome Glissea3c19452009-10-01 18:02:13 +0200525static void rv770_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200526{
Jerome Glissea3c19452009-10-01 18:02:13 +0200527 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000528 u32 tmp;
529 int i, j;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200530
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000531 /* Initialize HDP */
532 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
533 WREG32((0x2c14 + j), 0x00000000);
534 WREG32((0x2c18 + j), 0x00000000);
535 WREG32((0x2c1c + j), 0x00000000);
536 WREG32((0x2c20 + j), 0x00000000);
537 WREG32((0x2c24 + j), 0x00000000);
538 }
Alex Deucher812d0462010-07-26 18:51:53 -0400539 /* r7xx hw bug. Read from HDP_DEBUG1 rather
540 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
541 */
542 tmp = RREG32(HDP_DEBUG1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200543
Jerome Glissea3c19452009-10-01 18:02:13 +0200544 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000545 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +0200546 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200547 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000548 /* Lockout access through VGA aperture*/
549 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000550 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +0200551 if (rdev->flags & RADEON_IS_AGP) {
552 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
553 /* VRAM before AGP */
554 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
555 rdev->mc.vram_start >> 12);
556 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
557 rdev->mc.gtt_end >> 12);
558 } else {
559 /* VRAM after AGP */
560 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
561 rdev->mc.gtt_start >> 12);
562 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
563 rdev->mc.vram_end >> 12);
564 }
565 } else {
566 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
567 rdev->mc.vram_start >> 12);
568 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
569 rdev->mc.vram_end >> 12);
570 }
Alex Deucher16cdf042011-10-28 10:30:02 -0400571 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200572 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000573 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
574 WREG32(MC_VM_FB_LOCATION, tmp);
575 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
576 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +0200577 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000578 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +0200579 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000580 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
581 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
582 } else {
583 WREG32(MC_VM_AGP_BASE, 0);
584 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
585 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
586 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000587 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +0200588 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000589 }
Jerome Glissea3c19452009-10-01 18:02:13 +0200590 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +1000591 /* we need to own VRAM, so turn off the VGA renderer here
592 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +0200593 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200594}
595
596
597/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000598 * CP.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200599 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000600void r700_cp_stop(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200601{
Dave Airlie53595332011-03-14 09:47:24 +1000602 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000603 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
Alex Deucher724c80e2010-08-27 18:25:25 -0400604 WREG32(SCRATCH_UMSK, 0);
Alex Deucher4d756582012-09-27 15:08:35 -0400605 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200606}
607
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000608static int rv770_cp_load_microcode(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200609{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000610 const __be32 *fw_data;
611 int i;
612
613 if (!rdev->me_fw || !rdev->pfp_fw)
614 return -EINVAL;
615
616 r700_cp_stop(rdev);
Cédric Cano4eace7fd2011-02-11 19:45:38 -0500617 WREG32(CP_RB_CNTL,
618#ifdef __BIG_ENDIAN
619 BUF_SWAP_32BIT |
620#endif
621 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000622
623 /* Reset cp */
624 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
625 RREG32(GRBM_SOFT_RESET);
626 mdelay(15);
627 WREG32(GRBM_SOFT_RESET, 0);
628
629 fw_data = (const __be32 *)rdev->pfp_fw->data;
630 WREG32(CP_PFP_UCODE_ADDR, 0);
631 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
632 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
633 WREG32(CP_PFP_UCODE_ADDR, 0);
634
635 fw_data = (const __be32 *)rdev->me_fw->data;
636 WREG32(CP_ME_RAM_WADDR, 0);
637 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
638 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
639
640 WREG32(CP_PFP_UCODE_ADDR, 0);
641 WREG32(CP_ME_RAM_WADDR, 0);
642 WREG32(CP_ME_RAM_RADDR, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200643 return 0;
644}
645
Alex Deucherfe251e22010-03-24 13:36:43 -0400646void r700_cp_fini(struct radeon_device *rdev)
647{
Christian König45df6802012-07-06 16:22:55 +0200648 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -0400649 r700_cp_stop(rdev);
Christian König45df6802012-07-06 16:22:55 +0200650 radeon_ring_fini(rdev, ring);
651 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucherfe251e22010-03-24 13:36:43 -0400652}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200653
654/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000655 * Core functions
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200656 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000657static void rv770_gpu_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200658{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000659 int i, j, num_qd_pipes;
Alex Deucherd03f5d52010-02-19 16:22:31 -0500660 u32 ta_aux_cntl;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000661 u32 sx_debug_1;
662 u32 smx_dc_ctl0;
Alex Deucherd03f5d52010-02-19 16:22:31 -0500663 u32 db_debug3;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000664 u32 num_gs_verts_per_thread;
665 u32 vgt_gs_per_es;
666 u32 gs_prim_buffer_depth = 0;
667 u32 sq_ms_fifo_sizes;
668 u32 sq_config;
669 u32 sq_thread_resource_mgmt;
670 u32 hdp_host_path_cntl;
671 u32 sq_dyn_gpr_size_simd_ab_0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000672 u32 gb_tiling_config = 0;
673 u32 cc_rb_backend_disable = 0;
674 u32 cc_gc_shader_pipe_config = 0;
675 u32 mc_arb_ramcfg;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400676 u32 db_debug4, tmp;
677 u32 inactive_pipes, shader_pipe_config;
678 u32 disabled_rb_mask;
679 unsigned active_number;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200680
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000681 /* setup chip specs */
Alex Deucher416a2bd2012-05-31 19:00:25 -0400682 rdev->config.rv770.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000683 switch (rdev->family) {
684 case CHIP_RV770:
685 rdev->config.rv770.max_pipes = 4;
686 rdev->config.rv770.max_tile_pipes = 8;
687 rdev->config.rv770.max_simds = 10;
688 rdev->config.rv770.max_backends = 4;
689 rdev->config.rv770.max_gprs = 256;
690 rdev->config.rv770.max_threads = 248;
691 rdev->config.rv770.max_stack_entries = 512;
692 rdev->config.rv770.max_hw_contexts = 8;
693 rdev->config.rv770.max_gs_threads = 16 * 2;
694 rdev->config.rv770.sx_max_export_size = 128;
695 rdev->config.rv770.sx_max_export_pos_size = 16;
696 rdev->config.rv770.sx_max_export_smx_size = 112;
697 rdev->config.rv770.sq_num_cf_insts = 2;
698
699 rdev->config.rv770.sx_num_of_sets = 7;
700 rdev->config.rv770.sc_prim_fifo_size = 0xF9;
701 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
702 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
703 break;
704 case CHIP_RV730:
705 rdev->config.rv770.max_pipes = 2;
706 rdev->config.rv770.max_tile_pipes = 4;
707 rdev->config.rv770.max_simds = 8;
708 rdev->config.rv770.max_backends = 2;
709 rdev->config.rv770.max_gprs = 128;
710 rdev->config.rv770.max_threads = 248;
711 rdev->config.rv770.max_stack_entries = 256;
712 rdev->config.rv770.max_hw_contexts = 8;
713 rdev->config.rv770.max_gs_threads = 16 * 2;
714 rdev->config.rv770.sx_max_export_size = 256;
715 rdev->config.rv770.sx_max_export_pos_size = 32;
716 rdev->config.rv770.sx_max_export_smx_size = 224;
717 rdev->config.rv770.sq_num_cf_insts = 2;
718
719 rdev->config.rv770.sx_num_of_sets = 7;
720 rdev->config.rv770.sc_prim_fifo_size = 0xf9;
721 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
722 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
723 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
724 rdev->config.rv770.sx_max_export_pos_size -= 16;
725 rdev->config.rv770.sx_max_export_smx_size += 16;
726 }
727 break;
728 case CHIP_RV710:
729 rdev->config.rv770.max_pipes = 2;
730 rdev->config.rv770.max_tile_pipes = 2;
731 rdev->config.rv770.max_simds = 2;
732 rdev->config.rv770.max_backends = 1;
733 rdev->config.rv770.max_gprs = 256;
734 rdev->config.rv770.max_threads = 192;
735 rdev->config.rv770.max_stack_entries = 256;
736 rdev->config.rv770.max_hw_contexts = 4;
737 rdev->config.rv770.max_gs_threads = 8 * 2;
738 rdev->config.rv770.sx_max_export_size = 128;
739 rdev->config.rv770.sx_max_export_pos_size = 16;
740 rdev->config.rv770.sx_max_export_smx_size = 112;
741 rdev->config.rv770.sq_num_cf_insts = 1;
742
743 rdev->config.rv770.sx_num_of_sets = 7;
744 rdev->config.rv770.sc_prim_fifo_size = 0x40;
745 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
746 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
747 break;
748 case CHIP_RV740:
749 rdev->config.rv770.max_pipes = 4;
750 rdev->config.rv770.max_tile_pipes = 4;
751 rdev->config.rv770.max_simds = 8;
752 rdev->config.rv770.max_backends = 4;
753 rdev->config.rv770.max_gprs = 256;
754 rdev->config.rv770.max_threads = 248;
755 rdev->config.rv770.max_stack_entries = 512;
756 rdev->config.rv770.max_hw_contexts = 8;
757 rdev->config.rv770.max_gs_threads = 16 * 2;
758 rdev->config.rv770.sx_max_export_size = 256;
759 rdev->config.rv770.sx_max_export_pos_size = 32;
760 rdev->config.rv770.sx_max_export_smx_size = 224;
761 rdev->config.rv770.sq_num_cf_insts = 2;
762
763 rdev->config.rv770.sx_num_of_sets = 7;
764 rdev->config.rv770.sc_prim_fifo_size = 0x100;
765 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
766 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
767
768 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
769 rdev->config.rv770.sx_max_export_pos_size -= 16;
770 rdev->config.rv770.sx_max_export_smx_size += 16;
771 }
772 break;
773 default:
774 break;
775 }
776
777 /* Initialize HDP */
778 j = 0;
779 for (i = 0; i < 32; i++) {
780 WREG32((0x2c14 + j), 0x00000000);
781 WREG32((0x2c18 + j), 0x00000000);
782 WREG32((0x2c1c + j), 0x00000000);
783 WREG32((0x2c20 + j), 0x00000000);
784 WREG32((0x2c24 + j), 0x00000000);
785 j += 0x18;
786 }
787
788 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
789
790 /* setup tiling, simd, pipe config */
791 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
792
Alex Deucher416a2bd2012-05-31 19:00:25 -0400793 shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
794 inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
795 for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
796 if (!(inactive_pipes & tmp)) {
797 active_number++;
798 }
799 tmp <<= 1;
800 }
801 if (active_number == 1) {
802 WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
803 } else {
804 WREG32(SPI_CONFIG_CNTL, 0);
805 }
806
807 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
808 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
809 if (tmp < rdev->config.rv770.max_backends) {
810 rdev->config.rv770.max_backends = tmp;
811 }
812
813 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
814 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
815 if (tmp < rdev->config.rv770.max_pipes) {
816 rdev->config.rv770.max_pipes = tmp;
817 }
818 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
819 if (tmp < rdev->config.rv770.max_simds) {
820 rdev->config.rv770.max_simds = tmp;
821 }
822
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000823 switch (rdev->config.rv770.max_tile_pipes) {
824 case 1:
Alex Deucherd03f5d52010-02-19 16:22:31 -0500825 default:
Alex Deucher416a2bd2012-05-31 19:00:25 -0400826 gb_tiling_config = PIPE_TILING(0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000827 break;
828 case 2:
Alex Deucher416a2bd2012-05-31 19:00:25 -0400829 gb_tiling_config = PIPE_TILING(1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000830 break;
831 case 4:
Alex Deucher416a2bd2012-05-31 19:00:25 -0400832 gb_tiling_config = PIPE_TILING(2);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000833 break;
834 case 8:
Alex Deucher416a2bd2012-05-31 19:00:25 -0400835 gb_tiling_config = PIPE_TILING(3);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000836 break;
837 }
Alex Deucherd03f5d52010-02-19 16:22:31 -0500838 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000839
Alex Deucher416a2bd2012-05-31 19:00:25 -0400840 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
841 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
842 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
843 R7XX_MAX_BACKENDS, disabled_rb_mask);
844 gb_tiling_config |= tmp << 16;
845 rdev->config.rv770.backend_map = tmp;
846
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000847 if (rdev->family == CHIP_RV770)
848 gb_tiling_config |= BANK_TILING(1);
Alex Deucher29d65402012-05-31 18:53:36 -0400849 else {
850 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
851 gb_tiling_config |= BANK_TILING(1);
852 else
853 gb_tiling_config |= BANK_TILING(0);
854 }
Jerome Glisse961fb592010-02-10 22:30:05 +0000855 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
Alex Deucher881fe6c2010-10-18 23:54:56 -0400856 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
Alex Deuchere29649d2009-11-03 10:04:01 -0500857 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000858 gb_tiling_config |= ROW_TILING(3);
859 gb_tiling_config |= SAMPLE_SPLIT(3);
860 } else {
861 gb_tiling_config |=
862 ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
863 gb_tiling_config |=
864 SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
865 }
866
867 gb_tiling_config |= BANK_SWAPS(1);
Alex Deuchere7aeeba2010-06-04 13:10:12 -0400868 rdev->config.rv770.tile_config = gb_tiling_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000869
870 WREG32(GB_TILING_CONFIG, gb_tiling_config);
871 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
872 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
Alex Deucher4d756582012-09-27 15:08:35 -0400873 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
874 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
Christian König9a210592013-04-08 12:41:37 +0200875 if (rdev->family == CHIP_RV730) {
876 WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
877 WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
878 WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
879 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000880
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000881 WREG32(CGTS_SYS_TCC_DISABLE, 0);
882 WREG32(CGTS_TCC_DISABLE, 0);
Alex Deucherf867c60d2010-03-05 14:50:37 -0500883 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
884 WREG32(CGTS_USER_TCC_DISABLE, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000885
Alex Deucher416a2bd2012-05-31 19:00:25 -0400886
887 num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000888 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
889 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
890
891 /* set HW defaults for 3D engine */
892 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
Alex Deuchere29649d2009-11-03 10:04:01 -0500893 ROQ_IB2_START(0x2b)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000894
895 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
896
Alex Deucherd03f5d52010-02-19 16:22:31 -0500897 ta_aux_cntl = RREG32(TA_CNTL_AUX);
898 WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000899
900 sx_debug_1 = RREG32(SX_DEBUG_1);
901 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
902 WREG32(SX_DEBUG_1, sx_debug_1);
903
904 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
905 smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
906 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
907 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
908
Alex Deucherd03f5d52010-02-19 16:22:31 -0500909 if (rdev->family != CHIP_RV740)
910 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
911 GS_FLUSH_CTL(4) |
912 ACK_FLUSH_CTL(3) |
913 SYNC_FLUSH_CTL));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000914
Alex Deucherb866d132012-06-14 22:06:36 +0200915 if (rdev->family != CHIP_RV770)
916 WREG32(SMX_SAR_CTL0, 0x00003f3f);
917
Alex Deucherd03f5d52010-02-19 16:22:31 -0500918 db_debug3 = RREG32(DB_DEBUG3);
919 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
920 switch (rdev->family) {
921 case CHIP_RV770:
922 case CHIP_RV740:
923 db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
924 break;
925 case CHIP_RV710:
926 case CHIP_RV730:
927 default:
928 db_debug3 |= DB_CLK_OFF_DELAY(2);
929 break;
930 }
931 WREG32(DB_DEBUG3, db_debug3);
932
933 if (rdev->family != CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000934 db_debug4 = RREG32(DB_DEBUG4);
935 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
936 WREG32(DB_DEBUG4, db_debug4);
937 }
938
939 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
Alex Deuchere29649d2009-11-03 10:04:01 -0500940 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
941 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000942
943 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
Alex Deuchere29649d2009-11-03 10:04:01 -0500944 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
945 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000946
947 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
948
949 WREG32(VGT_NUM_INSTANCES, 1);
950
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000951 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
952
953 WREG32(CP_PERFMON_CNTL, 0);
954
955 sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
956 DONE_FIFO_HIWATER(0xe0) |
957 ALU_UPDATE_FIFO_HIWATER(0x8));
958 switch (rdev->family) {
959 case CHIP_RV770:
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000960 case CHIP_RV730:
961 case CHIP_RV710:
Alex Deucherd03f5d52010-02-19 16:22:31 -0500962 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
963 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000964 case CHIP_RV740:
965 default:
966 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
967 break;
968 }
969 WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
970
971 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
972 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
973 */
974 sq_config = RREG32(SQ_CONFIG);
975 sq_config &= ~(PS_PRIO(3) |
976 VS_PRIO(3) |
977 GS_PRIO(3) |
978 ES_PRIO(3));
979 sq_config |= (DX9_CONSTS |
980 VC_ENABLE |
981 EXPORT_SRC_C |
982 PS_PRIO(0) |
983 VS_PRIO(1) |
984 GS_PRIO(2) |
985 ES_PRIO(3));
986 if (rdev->family == CHIP_RV710)
987 /* no vertex cache */
988 sq_config &= ~VC_ENABLE;
989
990 WREG32(SQ_CONFIG, sq_config);
991
992 WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
Dave Airliefe62e1a2009-09-21 14:06:30 +1000993 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
994 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000995
996 WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
Dave Airliefe62e1a2009-09-21 14:06:30 +1000997 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000998
999 sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
1000 NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
1001 NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
1002 if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
1003 sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
1004 else
1005 sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
1006 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1007
1008 WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
1009 NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
1010
1011 WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
1012 NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
1013
1014 sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
1015 SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
1016 SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
1017 SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
1018
1019 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
1020 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
1021 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
1022 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
1023 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
1024 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
1025 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
1026 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
1027
1028 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
Dave Airliefe62e1a2009-09-21 14:06:30 +10001029 FORCE_EOV_MAX_REZ_CNT(255)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001030
1031 if (rdev->family == CHIP_RV710)
1032 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
Dave Airliefe62e1a2009-09-21 14:06:30 +10001033 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001034 else
1035 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
Dave Airliefe62e1a2009-09-21 14:06:30 +10001036 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001037
1038 switch (rdev->family) {
1039 case CHIP_RV770:
1040 case CHIP_RV730:
1041 case CHIP_RV740:
1042 gs_prim_buffer_depth = 384;
1043 break;
1044 case CHIP_RV710:
1045 gs_prim_buffer_depth = 128;
1046 break;
1047 default:
1048 break;
1049 }
1050
1051 num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
1052 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
1053 /* Max value for this is 256 */
1054 if (vgt_gs_per_es > 256)
1055 vgt_gs_per_es = 256;
1056
1057 WREG32(VGT_ES_PER_GS, 128);
1058 WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
1059 WREG32(VGT_GS_PER_VS, 2);
1060
1061 /* more default values. 2D/3D driver should adjust as needed */
1062 WREG32(VGT_GS_VERTEX_REUSE, 16);
1063 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1064 WREG32(VGT_STRMOUT_EN, 0);
1065 WREG32(SX_MISC, 0);
1066 WREG32(PA_SC_MODE_CNTL, 0);
1067 WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
1068 WREG32(PA_SC_AA_CONFIG, 0);
1069 WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
1070 WREG32(PA_SC_LINE_STIPPLE, 0);
1071 WREG32(SPI_INPUT_Z, 0);
1072 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1073 WREG32(CB_COLOR7_FRAG, 0);
1074
1075 /* clear render buffer base addresses */
1076 WREG32(CB_COLOR0_BASE, 0);
1077 WREG32(CB_COLOR1_BASE, 0);
1078 WREG32(CB_COLOR2_BASE, 0);
1079 WREG32(CB_COLOR3_BASE, 0);
1080 WREG32(CB_COLOR4_BASE, 0);
1081 WREG32(CB_COLOR5_BASE, 0);
1082 WREG32(CB_COLOR6_BASE, 0);
1083 WREG32(CB_COLOR7_BASE, 0);
1084
1085 WREG32(TCP_CNTL, 0);
1086
1087 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1088 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1089
1090 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1091
1092 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1093 NUM_CLIP_SEQ(3)));
Alex Deucherb866d132012-06-14 22:06:36 +02001094 WREG32(VC_ENHANCE, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001095}
1096
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001097void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1098{
1099 u64 size_bf, size_af;
1100
1101 if (mc->mc_vram_size > 0xE0000000) {
1102 /* leave room for at least 512M GTT */
1103 dev_warn(rdev->dev, "limiting VRAM\n");
1104 mc->real_vram_size = 0xE0000000;
1105 mc->mc_vram_size = 0xE0000000;
1106 }
1107 if (rdev->flags & RADEON_IS_AGP) {
1108 size_bf = mc->gtt_start;
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04001109 size_af = mc->mc_mask - mc->gtt_end;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001110 if (size_bf > size_af) {
1111 if (mc->mc_vram_size > size_bf) {
1112 dev_warn(rdev->dev, "limiting VRAM\n");
1113 mc->real_vram_size = size_bf;
1114 mc->mc_vram_size = size_bf;
1115 }
1116 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1117 } else {
1118 if (mc->mc_vram_size > size_af) {
1119 dev_warn(rdev->dev, "limiting VRAM\n");
1120 mc->real_vram_size = size_af;
1121 mc->mc_vram_size = size_af;
1122 }
Jerome Glissedfc6ae52012-04-17 16:51:38 -04001123 mc->vram_start = mc->gtt_end + 1;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001124 }
1125 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1126 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1127 mc->mc_vram_size >> 20, mc->vram_start,
1128 mc->vram_end, mc->real_vram_size >> 20);
1129 } else {
Alex Deucherb4183e32010-12-15 11:04:10 -05001130 radeon_vram_location(rdev, &rdev->mc, 0);
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001131 rdev->mc.gtt_base_align = 0;
1132 radeon_gtt_location(rdev, mc);
1133 }
1134}
1135
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001136static int rv770_mc_init(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001137{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001138 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001139 int chansize, numchan;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001140
1141 /* Get VRAM informations */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001142 rdev->mc.vram_is_ddr = true;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001143 tmp = RREG32(MC_ARB_RAMCFG);
1144 if (tmp & CHANSIZE_OVERRIDE) {
1145 chansize = 16;
1146 } else if (tmp & CHANSIZE_MASK) {
1147 chansize = 64;
1148 } else {
1149 chansize = 32;
1150 }
1151 tmp = RREG32(MC_SHARED_CHMAP);
1152 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1153 case 0:
1154 default:
1155 numchan = 1;
1156 break;
1157 case 1:
1158 numchan = 2;
1159 break;
1160 case 2:
1161 numchan = 4;
1162 break;
1163 case 3:
1164 numchan = 8;
1165 break;
1166 }
1167 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001168 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06001169 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1170 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001171 /* Setup GPU memory space */
1172 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1173 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00001174 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001175 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04001176 radeon_update_bandwidth_info(rdev);
1177
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001178 return 0;
1179}
Jerome Glissed594e462010-02-17 21:54:29 +00001180
Alex Deucher43fb7782013-01-04 09:24:18 -05001181/**
1182 * rv770_copy_dma - copy pages using the DMA engine
1183 *
1184 * @rdev: radeon_device pointer
1185 * @src_offset: src GPU address
1186 * @dst_offset: dst GPU address
1187 * @num_gpu_pages: number of GPU pages to xfer
1188 * @fence: radeon fence object
1189 *
1190 * Copy GPU paging using the DMA engine (r7xx).
1191 * Used by the radeon ttm implementation to move pages if
1192 * registered as the asic copy callback.
1193 */
1194int rv770_copy_dma(struct radeon_device *rdev,
1195 uint64_t src_offset, uint64_t dst_offset,
1196 unsigned num_gpu_pages,
1197 struct radeon_fence **fence)
1198{
1199 struct radeon_semaphore *sem = NULL;
1200 int ring_index = rdev->asic->copy.dma_ring_index;
1201 struct radeon_ring *ring = &rdev->ring[ring_index];
1202 u32 size_in_dw, cur_size_in_dw;
1203 int i, num_loops;
1204 int r = 0;
1205
1206 r = radeon_semaphore_create(rdev, &sem);
1207 if (r) {
1208 DRM_ERROR("radeon: moving bo (%d).\n", r);
1209 return r;
1210 }
1211
1212 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
1213 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
1214 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
1215 if (r) {
1216 DRM_ERROR("radeon: moving bo (%d).\n", r);
1217 radeon_semaphore_free(rdev, &sem, NULL);
1218 return r;
1219 }
1220
1221 if (radeon_fence_need_sync(*fence, ring->idx)) {
1222 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
1223 ring->idx);
1224 radeon_fence_note_sync(*fence, ring->idx);
1225 } else {
1226 radeon_semaphore_free(rdev, &sem, NULL);
1227 }
1228
1229 for (i = 0; i < num_loops; i++) {
1230 cur_size_in_dw = size_in_dw;
1231 if (cur_size_in_dw > 0xFFFF)
1232 cur_size_in_dw = 0xFFFF;
1233 size_in_dw -= cur_size_in_dw;
1234 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
1235 radeon_ring_write(ring, dst_offset & 0xfffffffc);
1236 radeon_ring_write(ring, src_offset & 0xfffffffc);
1237 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
1238 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
1239 src_offset += cur_size_in_dw * 4;
1240 dst_offset += cur_size_in_dw * 4;
1241 }
1242
1243 r = radeon_fence_emit(rdev, fence, ring->idx);
1244 if (r) {
1245 radeon_ring_unlock_undo(rdev, ring);
1246 return r;
1247 }
1248
1249 radeon_ring_unlock_commit(rdev, ring);
1250 radeon_semaphore_free(rdev, &sem, *fence);
1251
1252 return r;
1253}
1254
Dave Airliefc30b8e2009-09-18 15:19:37 +10001255static int rv770_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001256{
Alex Deucher4d756582012-09-27 15:08:35 -04001257 struct radeon_ring *ring;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001258 int r;
1259
Alex Deucher9e46a482011-01-06 18:49:35 -05001260 /* enable pcie gen2 link */
1261 rv770_pcie_gen2_enable(rdev);
1262
Alex Deucher779720a2009-12-09 19:31:44 -05001263 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1264 r = r600_init_microcode(rdev);
1265 if (r) {
1266 DRM_ERROR("Failed to load firmware!\n");
1267 return r;
1268 }
1269 }
1270
Alex Deucher16cdf042011-10-28 10:30:02 -04001271 r = r600_vram_scratch_init(rdev);
1272 if (r)
1273 return r;
1274
Jerome Glissea3c19452009-10-01 18:02:13 +02001275 rv770_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001276 if (rdev->flags & RADEON_IS_AGP) {
1277 rv770_agp_enable(rdev);
1278 } else {
1279 r = rv770_pcie_gart_enable(rdev);
1280 if (r)
1281 return r;
1282 }
Alex Deucher16cdf042011-10-28 10:30:02 -04001283
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001284 rv770_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01001285 r = r600_blit_init(rdev);
1286 if (r) {
1287 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05001288 rdev->asic->copy.copy = NULL;
Jerome Glissec38c7b62010-02-04 17:27:27 +01001289 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1290 }
Alex Deucherb70d6bb2010-08-06 21:36:58 -04001291
Alex Deucher724c80e2010-08-27 18:25:25 -04001292 /* allocate wb buffer */
1293 r = radeon_wb_init(rdev);
1294 if (r)
1295 return r;
1296
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001297 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1298 if (r) {
1299 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1300 return r;
1301 }
1302
Alex Deucher4d756582012-09-27 15:08:35 -04001303 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1304 if (r) {
1305 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1306 return r;
1307 }
1308
Christian Königf2ba57b2013-04-08 12:41:29 +02001309 r = rv770_uvd_resume(rdev);
1310 if (!r) {
1311 r = radeon_fence_driver_start_ring(rdev,
1312 R600_RING_TYPE_UVD_INDEX);
1313 if (r)
1314 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1315 }
1316
1317 if (r)
1318 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
1319
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001320 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001321 r = r600_irq_init(rdev);
1322 if (r) {
1323 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1324 radeon_irq_kms_fini(rdev);
1325 return r;
1326 }
1327 r600_irq_set(rdev);
1328
Alex Deucher4d756582012-09-27 15:08:35 -04001329 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian Könige32eb502011-10-23 12:56:27 +02001330 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05001331 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
1332 0, 0xfffff, RADEON_CP_PACKET2);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001333 if (r)
1334 return r;
Alex Deucher4d756582012-09-27 15:08:35 -04001335
1336 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1337 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1338 DMA_RB_RPTR, DMA_RB_WPTR,
1339 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1340 if (r)
1341 return r;
1342
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001343 r = rv770_cp_load_microcode(rdev);
1344 if (r)
1345 return r;
1346 r = r600_cp_resume(rdev);
1347 if (r)
1348 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -04001349
Alex Deucher4d756582012-09-27 15:08:35 -04001350 r = r600_dma_resume(rdev);
1351 if (r)
1352 return r;
1353
Christian Königf2ba57b2013-04-08 12:41:29 +02001354 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1355 if (ring->ring_size) {
1356 r = radeon_ring_init(rdev, ring, ring->ring_size,
1357 R600_WB_UVD_RPTR_OFFSET,
1358 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
1359 0, 0xfffff, RADEON_CP_PACKET2);
1360 if (!r)
1361 r = r600_uvd_init(rdev);
1362
1363 if (r)
1364 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
1365 }
1366
Christian König2898c342012-07-05 11:55:34 +02001367 r = radeon_ib_pool_init(rdev);
1368 if (r) {
1369 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05001370 return r;
Christian König2898c342012-07-05 11:55:34 +02001371 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05001372
Alex Deucherd4e30ef2012-06-04 17:18:51 -04001373 r = r600_audio_init(rdev);
1374 if (r) {
1375 DRM_ERROR("radeon: audio init failed\n");
1376 return r;
1377 }
1378
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001379 return 0;
1380}
1381
Dave Airliefc30b8e2009-09-18 15:19:37 +10001382int rv770_resume(struct radeon_device *rdev)
1383{
1384 int r;
1385
Jerome Glisse1a029b72009-10-06 19:04:30 +02001386 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
1387 * posting will perform necessary task to bring back GPU into good
1388 * shape.
1389 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10001390 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02001391 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10001392
Jerome Glisseb15ba512011-11-15 11:48:34 -05001393 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10001394 r = rv770_startup(rdev);
1395 if (r) {
1396 DRM_ERROR("r600 startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05001397 rdev->accel_working = false;
Dave Airliefc30b8e2009-09-18 15:19:37 +10001398 return r;
1399 }
1400
Dave Airliefc30b8e2009-09-18 15:19:37 +10001401 return r;
1402
1403}
1404
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001405int rv770_suspend(struct radeon_device *rdev)
1406{
Rafał Miłecki8a8c6e72010-03-06 13:03:36 +00001407 r600_audio_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02001408 radeon_uvd_suspend(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001409 r700_cp_stop(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04001410 r600_dma_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01001411 r600_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001412 radeon_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001413 rv770_pcie_gart_disable(rdev);
Alex Deucher6ddddfe2011-10-14 10:51:22 -04001414
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001415 return 0;
1416}
1417
1418/* Plan is to move initialization in that function and use
1419 * helper function so that radeon_device_init pretty much
1420 * do nothing more than calling asic specific function. This
1421 * should also allow to remove a bunch of callback function
1422 * like vram_info.
1423 */
1424int rv770_init(struct radeon_device *rdev)
1425{
1426 int r;
1427
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001428 /* Read BIOS */
1429 if (!radeon_get_bios(rdev)) {
1430 if (ASIC_IS_AVIVO(rdev))
1431 return -EINVAL;
1432 }
1433 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02001434 if (!rdev->is_atom_bios) {
1435 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001436 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02001437 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001438 r = radeon_atombios_init(rdev);
1439 if (r)
1440 return r;
1441 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05001442 if (!radeon_card_posted(rdev)) {
Dave Airlie72542d72009-12-01 14:06:31 +10001443 if (!rdev->bios) {
1444 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1445 return -EINVAL;
1446 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001447 DRM_INFO("GPU not posted. posting now...\n");
1448 atom_asic_init(rdev->mode_info.atom_context);
1449 }
1450 /* Initialize scratch registers */
1451 r600_scratch_init(rdev);
1452 /* Initialize surface registers */
1453 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01001454 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02001455 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001456 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001457 r = radeon_fence_driver_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001458 if (r)
1459 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00001460 /* initialize AGP */
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001461 if (rdev->flags & RADEON_IS_AGP) {
1462 r = radeon_agp_init(rdev);
1463 if (r)
1464 radeon_agp_disable(rdev);
1465 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001466 r = rv770_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02001467 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001468 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001469 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01001470 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001471 if (r)
1472 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001473
1474 r = radeon_irq_kms_init(rdev);
1475 if (r)
1476 return r;
1477
Christian Könige32eb502011-10-23 12:56:27 +02001478 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1479 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001480
Alex Deucher4d756582012-09-27 15:08:35 -04001481 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
1482 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
1483
Christian Königf2ba57b2013-04-08 12:41:29 +02001484 r = radeon_uvd_init(rdev);
1485 if (!r) {
1486 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
1487 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
1488 4096);
1489 }
1490
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001491 rdev->ih.ring_obj = NULL;
1492 r600_ih_ring_init(rdev, 64 * 1024);
1493
Jerome Glisse4aac0472009-09-14 18:29:49 +02001494 r = r600_pcie_gart_init(rdev);
1495 if (r)
1496 return r;
1497
Alex Deucher779720a2009-12-09 19:31:44 -05001498 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10001499 r = rv770_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001500 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01001501 dev_err(rdev->dev, "disabling GPU acceleration\n");
Alex Deucherfe251e22010-03-24 13:36:43 -04001502 r700_cp_fini(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04001503 r600_dma_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01001504 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001505 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001506 radeon_ib_pool_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01001507 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02001508 rv770_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02001509 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001510 }
Rafał Miłecki8a8c6e72010-03-06 13:03:36 +00001511
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001512 return 0;
1513}
1514
1515void rv770_fini(struct radeon_device *rdev)
1516{
1517 r600_blit_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04001518 r700_cp_fini(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04001519 r600_dma_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001520 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001521 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001522 radeon_ib_pool_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001523 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001524 rv770_pcie_gart_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02001525 radeon_uvd_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04001526 r600_vram_scratch_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001527 radeon_gem_fini(rdev);
1528 radeon_fence_driver_fini(rdev);
Jerome Glissed0269ed2010-01-07 16:08:32 +01001529 radeon_agp_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01001530 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02001531 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001532 kfree(rdev->bios);
1533 rdev->bios = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001534}
Alex Deucher9e46a482011-01-06 18:49:35 -05001535
1536static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1537{
1538 u32 link_width_cntl, lanes, speed_cntl, tmp;
1539 u16 link_cntl2;
Dave Airlie197bbb32012-06-27 08:35:54 +01001540 u32 mask;
1541 int ret;
Alex Deucher9e46a482011-01-06 18:49:35 -05001542
Alex Deucherd42dd572011-01-12 20:05:11 -05001543 if (radeon_pcie_gen2 == 0)
1544 return;
1545
Alex Deucher9e46a482011-01-06 18:49:35 -05001546 if (rdev->flags & RADEON_IS_IGP)
1547 return;
1548
1549 if (!(rdev->flags & RADEON_IS_PCIE))
1550 return;
1551
1552 /* x2 cards have a special sequence */
1553 if (ASIC_IS_X2(rdev))
1554 return;
1555
Dave Airlie197bbb32012-06-27 08:35:54 +01001556 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
1557 if (ret != 0)
1558 return;
1559
1560 if (!(mask & DRM_PCIE_SPEED_50))
1561 return;
1562
1563 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
1564
Alex Deucher9e46a482011-01-06 18:49:35 -05001565 /* advertise upconfig capability */
Alex Deucher492d2b62012-10-25 16:06:59 -04001566 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001567 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04001568 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1569 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001570 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
1571 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
1572 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
1573 LC_RECONFIG_ARC_MISSING_ESCAPE);
1574 link_width_cntl |= lanes | LC_RECONFIG_NOW |
1575 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
Alex Deucher492d2b62012-10-25 16:06:59 -04001576 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001577 } else {
1578 link_width_cntl |= LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04001579 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001580 }
1581
Alex Deucher492d2b62012-10-25 16:06:59 -04001582 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001583 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1584 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1585
1586 tmp = RREG32(0x541c);
1587 WREG32(0x541c, tmp | 0x8);
1588 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
1589 link_cntl2 = RREG16(0x4088);
1590 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
1591 link_cntl2 |= 0x2;
1592 WREG16(0x4088, link_cntl2);
1593 WREG32(MM_CFGREGS_CNTL, 0);
1594
Alex Deucher492d2b62012-10-25 16:06:59 -04001595 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001596 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04001597 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001598
Alex Deucher492d2b62012-10-25 16:06:59 -04001599 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001600 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04001601 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001602
Alex Deucher492d2b62012-10-25 16:06:59 -04001603 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001604 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04001605 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001606
Alex Deucher492d2b62012-10-25 16:06:59 -04001607 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001608 speed_cntl |= LC_GEN2_EN_STRAP;
Alex Deucher492d2b62012-10-25 16:06:59 -04001609 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001610
1611 } else {
Alex Deucher492d2b62012-10-25 16:06:59 -04001612 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05001613 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
1614 if (1)
1615 link_width_cntl |= LC_UPCONFIGURE_DIS;
1616 else
1617 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04001618 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05001619 }
1620}