blob: d425a4fa0109fd03220908ad01199a8b8ab696be [file] [log] [blame]
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050028#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000029#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/radeon_drm.h>
Alex Deucher0fcdb612010-03-24 13:20:41 -040031#include "evergreend.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050032#include "atom.h"
33#include "avivod.h"
34#include "evergreen_reg.h"
Alex Deucher2281a372010-10-21 13:31:38 -040035#include "evergreen_blit_shaders.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050036
Alex Deucherfe251e22010-03-24 13:36:43 -040037#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39
Alex Deucher4a159032012-08-15 17:13:53 -040040static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050050static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040052void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -050053extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050055
Jerome Glisse285484e2011-12-16 17:03:42 -050056void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split)
59{
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
64 switch (*bankw) {
65 default:
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
70 }
71 switch (*bankh) {
72 default:
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
77 }
78 switch (*mtaspect) {
79 default:
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
84 }
85}
86
Alex Deucher23d33ba2013-04-08 12:41:32 +020087static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
88 u32 cntl_reg, u32 status_reg)
89{
90 int r, i;
91 struct atom_clock_dividers dividers;
92
93 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
94 clock, false, &dividers);
95 if (r)
96 return r;
97
98 WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
99
100 for (i = 0; i < 100; i++) {
101 if (RREG32(status_reg) & DCLK_STATUS)
102 break;
103 mdelay(10);
104 }
105 if (i == 100)
106 return -ETIMEDOUT;
107
108 return 0;
109}
110
111int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
112{
113 int r = 0;
114 u32 cg_scratch = RREG32(CG_SCRATCH1);
115
116 r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
117 if (r)
118 goto done;
119 cg_scratch &= 0xffff0000;
120 cg_scratch |= vclk / 100; /* Mhz */
121
122 r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
123 if (r)
124 goto done;
125 cg_scratch &= 0x0000ffff;
126 cg_scratch |= (dclk / 100) << 16; /* Mhz */
127
128done:
129 WREG32(CG_SCRATCH1, cg_scratch);
130
131 return r;
132}
133
Alex Deuchera8b49252013-04-08 12:41:33 +0200134static int evergreen_uvd_calc_post_div(unsigned target_freq,
135 unsigned vco_freq,
136 unsigned *div)
137{
138 /* target larger than vco frequency ? */
139 if (vco_freq < target_freq)
140 return -1; /* forget it */
141
142 /* Fclk = Fvco / PDIV */
143 *div = vco_freq / target_freq;
144
145 /* we alway need a frequency less than or equal the target */
146 if ((vco_freq / *div) > target_freq)
147 *div += 1;
148
149 /* dividers above 5 must be even */
150 if (*div > 5 && *div % 2)
151 *div += 1;
152
153 /* out of range ? */
154 if (*div >= 128)
155 return -1; /* forget it */
156
157 return vco_freq / *div;
158}
159
160static int evergreen_uvd_send_upll_ctlreq(struct radeon_device *rdev)
161{
162 unsigned i;
163
164 /* assert UPLL_CTLREQ */
165 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
166
167 /* wait for CTLACK and CTLACK2 to get asserted */
168 for (i = 0; i < 100; ++i) {
169 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
170 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
171 break;
172 mdelay(10);
173 }
174 if (i == 100)
175 return -ETIMEDOUT;
176
177 /* deassert UPLL_CTLREQ */
178 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
179
180 return 0;
181}
182
183int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
184{
185 /* start off with something large */
186 int optimal_diff_score = 0x7FFFFFF;
187 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
188 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
189 unsigned vco_freq;
190 int r;
191
192 /* loop through vco from low to high */
193 for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) {
194 unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384;
195 int calc_clk, diff_score, diff_vclk, diff_dclk;
196 unsigned vclk_div, dclk_div;
197
198 /* fb div out of range ? */
199 if (fb_div > 0x03FFFFFF)
200 break; /* it can oly get worse */
201
202 /* calc vclk with current vco freq. */
203 calc_clk = evergreen_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
204 if (calc_clk == -1)
205 break; /* vco is too big, it has to stop. */
206 diff_vclk = vclk - calc_clk;
207
208 /* calc dclk with current vco freq. */
209 calc_clk = evergreen_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
210 if (calc_clk == -1)
211 break; /* vco is too big, it has to stop. */
212 diff_dclk = dclk - calc_clk;
213
214 /* determine if this vco setting is better than current optimal settings */
215 diff_score = abs(diff_vclk) + abs(diff_dclk);
216 if (diff_score < optimal_diff_score) {
217 optimal_fb_div = fb_div;
218 optimal_vclk_div = vclk_div;
219 optimal_dclk_div = dclk_div;
220 optimal_vco_freq = vco_freq;
221 optimal_diff_score = diff_score;
222 if (optimal_diff_score == 0)
223 break; /* it can't get better than this */
224 }
225 }
226
227 /* set VCO_MODE to 1 */
228 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
229
230 /* toggle UPLL_SLEEP to 1 then back to 0 */
231 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
232 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
233
234 /* deassert UPLL_RESET */
235 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
236
237 mdelay(1);
238
239 /* bypass vclk and dclk with bclk */
240 WREG32_P(CG_UPLL_FUNC_CNTL_2,
241 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
242 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
243
244 /* put PLL in bypass mode */
245 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
246
247 r = evergreen_uvd_send_upll_ctlreq(rdev);
248 if (r)
249 return r;
250
251 /* assert UPLL_RESET again */
252 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
253
254 /* disable spread spectrum. */
255 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
256
257 /* set feedback divider */
258 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK);
259
260 /* set ref divider to 0 */
261 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
262
263 if (optimal_vco_freq < 187500)
264 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
265 else
266 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
267
268 /* set PDIV_A and PDIV_B */
269 WREG32_P(CG_UPLL_FUNC_CNTL_2,
270 UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div),
271 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
272
273 /* give the PLL some time to settle */
274 mdelay(15);
275
276 /* deassert PLL_RESET */
277 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
278
279 mdelay(15);
280
281 /* switch from bypass mode to normal mode */
282 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
283
284 r = evergreen_uvd_send_upll_ctlreq(rdev);
285 if (r)
286 return r;
287
288 /* switch VCLK and DCLK selection */
289 WREG32_P(CG_UPLL_FUNC_CNTL_2,
290 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
291 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
292
293 mdelay(100);
294
295 return 0;
296}
297
Alex Deucherd054ac12011-09-01 17:46:15 +0000298void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
299{
300 u16 ctl, v;
Jiang Liu32195ae2012-07-24 17:20:30 +0800301 int err;
Alex Deucherd054ac12011-09-01 17:46:15 +0000302
Jiang Liu32195ae2012-07-24 17:20:30 +0800303 err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000304 if (err)
305 return;
306
307 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
308
309 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
310 * to avoid hangs or perfomance issues
311 */
312 if ((v == 0) || (v == 6) || (v == 7)) {
313 ctl &= ~PCI_EXP_DEVCTL_READRQ;
314 ctl |= (2 << 12);
Jiang Liu32195ae2012-07-24 17:20:30 +0800315 pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000316 }
317}
318
Alex Deucher10257a62013-04-09 18:49:59 -0400319static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
320{
321 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
322 return true;
323 else
324 return false;
325}
326
327static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
328{
329 u32 pos1, pos2;
330
331 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
332 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
333
334 if (pos1 != pos2)
335 return true;
336 else
337 return false;
338}
339
Alex Deucher377edc82012-07-17 14:02:42 -0400340/**
341 * dce4_wait_for_vblank - vblank wait asic callback.
342 *
343 * @rdev: radeon_device pointer
344 * @crtc: crtc to wait for vblank on
345 *
346 * Wait for vblank on the requested crtc (evergreen+).
347 */
Alex Deucher3ae19b72012-02-23 17:53:37 -0500348void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
349{
Alex Deucher10257a62013-04-09 18:49:59 -0400350 unsigned i = 0;
Alex Deucher3ae19b72012-02-23 17:53:37 -0500351
Alex Deucher4a159032012-08-15 17:13:53 -0400352 if (crtc >= rdev->num_crtc)
353 return;
354
Alex Deucher10257a62013-04-09 18:49:59 -0400355 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
356 return;
357
358 /* depending on when we hit vblank, we may be close to active; if so,
359 * wait for another frame.
360 */
361 while (dce4_is_in_vblank(rdev, crtc)) {
362 if (i++ % 100 == 0) {
363 if (!dce4_is_counter_moving(rdev, crtc))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500364 break;
Alex Deucher3ae19b72012-02-23 17:53:37 -0500365 }
Alex Deucher10257a62013-04-09 18:49:59 -0400366 }
367
368 while (!dce4_is_in_vblank(rdev, crtc)) {
369 if (i++ % 100 == 0) {
370 if (!dce4_is_counter_moving(rdev, crtc))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500371 break;
Alex Deucher3ae19b72012-02-23 17:53:37 -0500372 }
373 }
374}
375
Alex Deucher377edc82012-07-17 14:02:42 -0400376/**
377 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
378 *
379 * @rdev: radeon_device pointer
380 * @crtc: crtc to prepare for pageflip on
381 *
382 * Pre-pageflip callback (evergreen+).
383 * Enables the pageflip irq (vblank irq).
384 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500385void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
386{
Alex Deucher6f34be52010-11-21 10:59:01 -0500387 /* enable the pflip int */
388 radeon_irq_kms_pflip_irq_get(rdev, crtc);
389}
390
Alex Deucher377edc82012-07-17 14:02:42 -0400391/**
392 * evergreen_post_page_flip - pos-pageflip callback.
393 *
394 * @rdev: radeon_device pointer
395 * @crtc: crtc to cleanup pageflip on
396 *
397 * Post-pageflip callback (evergreen+).
398 * Disables the pageflip irq (vblank irq).
399 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500400void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
401{
402 /* disable the pflip int */
403 radeon_irq_kms_pflip_irq_put(rdev, crtc);
404}
405
Alex Deucher377edc82012-07-17 14:02:42 -0400406/**
407 * evergreen_page_flip - pageflip callback.
408 *
409 * @rdev: radeon_device pointer
410 * @crtc_id: crtc to cleanup pageflip on
411 * @crtc_base: new address of the crtc (GPU MC address)
412 *
413 * Does the actual pageflip (evergreen+).
414 * During vblank we take the crtc lock and wait for the update_pending
415 * bit to go high, when it does, we release the lock, and allow the
416 * double buffered update to take place.
417 * Returns the current update pending status.
418 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500419u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
420{
421 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
422 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500423 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500424
425 /* Lock the graphics update lock */
426 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
427 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
428
429 /* update the scanout addresses */
430 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
431 upper_32_bits(crtc_base));
432 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
433 (u32)crtc_base);
434
435 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
436 upper_32_bits(crtc_base));
437 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
438 (u32)crtc_base);
439
440 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500441 for (i = 0; i < rdev->usec_timeout; i++) {
442 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
443 break;
444 udelay(1);
445 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500446 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
447
448 /* Unlock the lock, so double-buffering can take place inside vblank */
449 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
450 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
451
452 /* Return current update_pending status: */
453 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
454}
455
Alex Deucher21a81222010-07-02 12:58:16 -0400456/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500457int evergreen_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400458{
Alex Deucher1c88d742011-06-14 19:15:53 +0000459 u32 temp, toffset;
460 int actual_temp = 0;
Alex Deucher21a81222010-07-02 12:58:16 -0400461
Alex Deucher67b3f822011-05-25 18:45:37 -0400462 if (rdev->family == CHIP_JUNIPER) {
463 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
464 TOFFSET_SHIFT;
465 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
466 TS0_ADC_DOUT_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400467
Alex Deucher67b3f822011-05-25 18:45:37 -0400468 if (toffset & 0x100)
469 actual_temp = temp / 2 - (0x200 - toffset);
470 else
471 actual_temp = temp / 2 + toffset;
472
473 actual_temp = actual_temp * 1000;
474
475 } else {
476 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
477 ASIC_T_SHIFT;
478
479 if (temp & 0x400)
480 actual_temp = -256;
481 else if (temp & 0x200)
482 actual_temp = 255;
483 else if (temp & 0x100) {
484 actual_temp = temp & 0x1ff;
485 actual_temp |= ~0x1ff;
486 } else
487 actual_temp = temp & 0xff;
488
489 actual_temp = (actual_temp * 1000) / 2;
490 }
491
492 return actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400493}
494
Alex Deucher20d391d2011-02-01 16:12:34 -0500495int sumo_get_temp(struct radeon_device *rdev)
Alex Deuchere33df252010-11-22 17:56:32 -0500496{
497 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
Alex Deucher20d391d2011-02-01 16:12:34 -0500498 int actual_temp = temp - 49;
Alex Deuchere33df252010-11-22 17:56:32 -0500499
500 return actual_temp * 1000;
501}
502
Alex Deucher377edc82012-07-17 14:02:42 -0400503/**
504 * sumo_pm_init_profile - Initialize power profiles callback.
505 *
506 * @rdev: radeon_device pointer
507 *
508 * Initialize the power states used in profile mode
509 * (sumo, trinity, SI).
510 * Used for profile mode only.
511 */
Alex Deuchera4c9e2e2011-11-04 10:09:41 -0400512void sumo_pm_init_profile(struct radeon_device *rdev)
513{
514 int idx;
515
516 /* default */
517 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
518 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
519 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
520 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
521
522 /* low,mid sh/mh */
523 if (rdev->flags & RADEON_IS_MOBILITY)
524 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
525 else
526 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
527
528 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
529 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
530 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
531 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
532
533 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
534 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
535 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
536 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
537
538 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
539 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
540 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
541 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
542
543 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
544 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
545 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
546 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
547
548 /* high sh/mh */
549 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
550 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
551 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
552 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
553 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
554 rdev->pm.power_state[idx].num_clock_modes - 1;
555
556 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
557 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
558 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
559 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
560 rdev->pm.power_state[idx].num_clock_modes - 1;
561}
562
Alex Deucher377edc82012-07-17 14:02:42 -0400563/**
Alex Deucher27810fb2012-10-01 19:25:11 -0400564 * btc_pm_init_profile - Initialize power profiles callback.
565 *
566 * @rdev: radeon_device pointer
567 *
568 * Initialize the power states used in profile mode
569 * (BTC, cayman).
570 * Used for profile mode only.
571 */
572void btc_pm_init_profile(struct radeon_device *rdev)
573{
574 int idx;
575
576 /* default */
577 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
578 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
579 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
580 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
581 /* starting with BTC, there is one state that is used for both
582 * MH and SH. Difference is that we always use the high clock index for
583 * mclk.
584 */
585 if (rdev->flags & RADEON_IS_MOBILITY)
586 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
587 else
588 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
589 /* low sh */
590 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
591 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
592 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
593 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
594 /* mid sh */
595 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
596 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
597 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
598 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
599 /* high sh */
600 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
601 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
602 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
603 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
604 /* low mh */
605 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
606 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
607 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
608 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
609 /* mid mh */
610 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
611 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
612 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
613 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
614 /* high mh */
615 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
616 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
617 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
618 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
619}
620
621/**
Alex Deucher377edc82012-07-17 14:02:42 -0400622 * evergreen_pm_misc - set additional pm hw parameters callback.
623 *
624 * @rdev: radeon_device pointer
625 *
626 * Set non-clock parameters associated with a power state
627 * (voltage, etc.) (evergreen+).
628 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400629void evergreen_pm_misc(struct radeon_device *rdev)
630{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400631 int req_ps_idx = rdev->pm.requested_power_state_index;
632 int req_cm_idx = rdev->pm.requested_clock_mode_index;
633 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
634 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher49e02b72010-04-23 17:57:27 -0400635
Alex Deucher2feea492011-04-12 14:49:24 -0400636 if (voltage->type == VOLTAGE_SW) {
Alex Deuchera377e182011-06-20 13:00:31 -0400637 /* 0xff01 is a flag rather then an actual voltage */
638 if (voltage->voltage == 0xff01)
639 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400640 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400641 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400642 rdev->pm.current_vddc = voltage->voltage;
Alex Deucher2feea492011-04-12 14:49:24 -0400643 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
644 }
Alex Deucher7ae764b2013-02-11 08:44:48 -0500645
646 /* starting with BTC, there is one state that is used for both
647 * MH and SH. Difference is that we always use the high clock index for
648 * mclk and vddci.
649 */
650 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
651 (rdev->family >= CHIP_BARTS) &&
652 rdev->pm.active_crtc_count &&
653 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
654 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
655 voltage = &rdev->pm.power_state[req_ps_idx].
656 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
657
Alex Deuchera377e182011-06-20 13:00:31 -0400658 /* 0xff01 is a flag rather then an actual voltage */
659 if (voltage->vddci == 0xff01)
660 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400661 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
662 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
663 rdev->pm.current_vddci = voltage->vddci;
664 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
Alex Deucher4d601732010-06-07 18:15:18 -0400665 }
666 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400667}
668
Alex Deucher377edc82012-07-17 14:02:42 -0400669/**
670 * evergreen_pm_prepare - pre-power state change callback.
671 *
672 * @rdev: radeon_device pointer
673 *
674 * Prepare for a power state change (evergreen+).
675 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400676void evergreen_pm_prepare(struct radeon_device *rdev)
677{
678 struct drm_device *ddev = rdev->ddev;
679 struct drm_crtc *crtc;
680 struct radeon_crtc *radeon_crtc;
681 u32 tmp;
682
683 /* disable any active CRTCs */
684 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
685 radeon_crtc = to_radeon_crtc(crtc);
686 if (radeon_crtc->enabled) {
687 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
688 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
689 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
690 }
691 }
692}
693
Alex Deucher377edc82012-07-17 14:02:42 -0400694/**
695 * evergreen_pm_finish - post-power state change callback.
696 *
697 * @rdev: radeon_device pointer
698 *
699 * Clean up after a power state change (evergreen+).
700 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400701void evergreen_pm_finish(struct radeon_device *rdev)
702{
703 struct drm_device *ddev = rdev->ddev;
704 struct drm_crtc *crtc;
705 struct radeon_crtc *radeon_crtc;
706 u32 tmp;
707
708 /* enable any active CRTCs */
709 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
710 radeon_crtc = to_radeon_crtc(crtc);
711 if (radeon_crtc->enabled) {
712 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
713 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
714 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
715 }
716 }
717}
718
Alex Deucher377edc82012-07-17 14:02:42 -0400719/**
720 * evergreen_hpd_sense - hpd sense callback.
721 *
722 * @rdev: radeon_device pointer
723 * @hpd: hpd (hotplug detect) pin
724 *
725 * Checks if a digital monitor is connected (evergreen+).
726 * Returns true if connected, false if not connected.
727 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500728bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
729{
730 bool connected = false;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500731
732 switch (hpd) {
733 case RADEON_HPD_1:
734 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
735 connected = true;
736 break;
737 case RADEON_HPD_2:
738 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
739 connected = true;
740 break;
741 case RADEON_HPD_3:
742 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
743 connected = true;
744 break;
745 case RADEON_HPD_4:
746 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
747 connected = true;
748 break;
749 case RADEON_HPD_5:
750 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
751 connected = true;
752 break;
753 case RADEON_HPD_6:
754 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
755 connected = true;
756 break;
757 default:
758 break;
759 }
760
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500761 return connected;
762}
763
Alex Deucher377edc82012-07-17 14:02:42 -0400764/**
765 * evergreen_hpd_set_polarity - hpd set polarity callback.
766 *
767 * @rdev: radeon_device pointer
768 * @hpd: hpd (hotplug detect) pin
769 *
770 * Set the polarity of the hpd pin (evergreen+).
771 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500772void evergreen_hpd_set_polarity(struct radeon_device *rdev,
773 enum radeon_hpd_id hpd)
774{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500775 u32 tmp;
776 bool connected = evergreen_hpd_sense(rdev, hpd);
777
778 switch (hpd) {
779 case RADEON_HPD_1:
780 tmp = RREG32(DC_HPD1_INT_CONTROL);
781 if (connected)
782 tmp &= ~DC_HPDx_INT_POLARITY;
783 else
784 tmp |= DC_HPDx_INT_POLARITY;
785 WREG32(DC_HPD1_INT_CONTROL, tmp);
786 break;
787 case RADEON_HPD_2:
788 tmp = RREG32(DC_HPD2_INT_CONTROL);
789 if (connected)
790 tmp &= ~DC_HPDx_INT_POLARITY;
791 else
792 tmp |= DC_HPDx_INT_POLARITY;
793 WREG32(DC_HPD2_INT_CONTROL, tmp);
794 break;
795 case RADEON_HPD_3:
796 tmp = RREG32(DC_HPD3_INT_CONTROL);
797 if (connected)
798 tmp &= ~DC_HPDx_INT_POLARITY;
799 else
800 tmp |= DC_HPDx_INT_POLARITY;
801 WREG32(DC_HPD3_INT_CONTROL, tmp);
802 break;
803 case RADEON_HPD_4:
804 tmp = RREG32(DC_HPD4_INT_CONTROL);
805 if (connected)
806 tmp &= ~DC_HPDx_INT_POLARITY;
807 else
808 tmp |= DC_HPDx_INT_POLARITY;
809 WREG32(DC_HPD4_INT_CONTROL, tmp);
810 break;
811 case RADEON_HPD_5:
812 tmp = RREG32(DC_HPD5_INT_CONTROL);
813 if (connected)
814 tmp &= ~DC_HPDx_INT_POLARITY;
815 else
816 tmp |= DC_HPDx_INT_POLARITY;
817 WREG32(DC_HPD5_INT_CONTROL, tmp);
818 break;
819 case RADEON_HPD_6:
820 tmp = RREG32(DC_HPD6_INT_CONTROL);
821 if (connected)
822 tmp &= ~DC_HPDx_INT_POLARITY;
823 else
824 tmp |= DC_HPDx_INT_POLARITY;
825 WREG32(DC_HPD6_INT_CONTROL, tmp);
826 break;
827 default:
828 break;
829 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500830}
831
Alex Deucher377edc82012-07-17 14:02:42 -0400832/**
833 * evergreen_hpd_init - hpd setup callback.
834 *
835 * @rdev: radeon_device pointer
836 *
837 * Setup the hpd pins used by the card (evergreen+).
838 * Enable the pin, set the polarity, and enable the hpd interrupts.
839 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500840void evergreen_hpd_init(struct radeon_device *rdev)
841{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500842 struct drm_device *dev = rdev->ddev;
843 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200844 unsigned enabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500845 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
846 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500847
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500848 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
849 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
Alex Deucher2e97be72013-04-11 12:45:34 -0400850
851 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
852 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
853 /* don't try to enable hpd on eDP or LVDS avoid breaking the
854 * aux dp channel on imac and help (but not completely fix)
855 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
856 * also avoid interrupt storms during dpms.
857 */
858 continue;
859 }
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500860 switch (radeon_connector->hpd.hpd) {
861 case RADEON_HPD_1:
862 WREG32(DC_HPD1_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500863 break;
864 case RADEON_HPD_2:
865 WREG32(DC_HPD2_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500866 break;
867 case RADEON_HPD_3:
868 WREG32(DC_HPD3_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500869 break;
870 case RADEON_HPD_4:
871 WREG32(DC_HPD4_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500872 break;
873 case RADEON_HPD_5:
874 WREG32(DC_HPD5_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500875 break;
876 case RADEON_HPD_6:
877 WREG32(DC_HPD6_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500878 break;
879 default:
880 break;
881 }
Alex Deucher64912e92011-11-03 11:21:39 -0400882 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Christian Koenigfb982572012-05-17 01:33:30 +0200883 enabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500884 }
Christian Koenigfb982572012-05-17 01:33:30 +0200885 radeon_irq_kms_enable_hpd(rdev, enabled);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500886}
887
Alex Deucher377edc82012-07-17 14:02:42 -0400888/**
889 * evergreen_hpd_fini - hpd tear down callback.
890 *
891 * @rdev: radeon_device pointer
892 *
893 * Tear down the hpd pins used by the card (evergreen+).
894 * Disable the hpd interrupts.
895 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500896void evergreen_hpd_fini(struct radeon_device *rdev)
897{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500898 struct drm_device *dev = rdev->ddev;
899 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200900 unsigned disabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500901
902 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
903 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
904 switch (radeon_connector->hpd.hpd) {
905 case RADEON_HPD_1:
906 WREG32(DC_HPD1_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500907 break;
908 case RADEON_HPD_2:
909 WREG32(DC_HPD2_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500910 break;
911 case RADEON_HPD_3:
912 WREG32(DC_HPD3_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500913 break;
914 case RADEON_HPD_4:
915 WREG32(DC_HPD4_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500916 break;
917 case RADEON_HPD_5:
918 WREG32(DC_HPD5_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500919 break;
920 case RADEON_HPD_6:
921 WREG32(DC_HPD6_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500922 break;
923 default:
924 break;
925 }
Christian Koenigfb982572012-05-17 01:33:30 +0200926 disabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500927 }
Christian Koenigfb982572012-05-17 01:33:30 +0200928 radeon_irq_kms_disable_hpd(rdev, disabled);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500929}
930
Alex Deucherf9d9c362010-10-22 02:51:05 -0400931/* watermark setup */
932
933static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
934 struct radeon_crtc *radeon_crtc,
935 struct drm_display_mode *mode,
936 struct drm_display_mode *other_mode)
937{
Alex Deucher12dfc842011-04-14 19:07:34 -0400938 u32 tmp;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400939 /*
940 * Line Buffer Setup
941 * There are 3 line buffers, each one shared by 2 display controllers.
942 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
943 * the display controllers. The paritioning is done via one of four
944 * preset allocations specified in bits 2:0:
945 * first display controller
946 * 0 - first half of lb (3840 * 2)
947 * 1 - first 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400948 * 2 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400949 * 3 - first 1/4 of lb (1920 * 2)
950 * second display controller
951 * 4 - second half of lb (3840 * 2)
952 * 5 - second 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400953 * 6 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400954 * 7 - last 1/4 of lb (1920 * 2)
955 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400956 /* this can get tricky if we have two large displays on a paired group
957 * of crtcs. Ideally for multiple large displays we'd assign them to
958 * non-linked crtcs for maximum line buffer allocation.
959 */
960 if (radeon_crtc->base.enabled && mode) {
961 if (other_mode)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400962 tmp = 0; /* 1/2 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400963 else
964 tmp = 2; /* whole */
965 } else
966 tmp = 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400967
968 /* second controller of the pair uses second half of the lb */
969 if (radeon_crtc->crtc_id % 2)
970 tmp += 4;
971 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
972
Alex Deucher12dfc842011-04-14 19:07:34 -0400973 if (radeon_crtc->base.enabled && mode) {
974 switch (tmp) {
975 case 0:
976 case 4:
977 default:
978 if (ASIC_IS_DCE5(rdev))
979 return 4096 * 2;
980 else
981 return 3840 * 2;
982 case 1:
983 case 5:
984 if (ASIC_IS_DCE5(rdev))
985 return 6144 * 2;
986 else
987 return 5760 * 2;
988 case 2:
989 case 6:
990 if (ASIC_IS_DCE5(rdev))
991 return 8192 * 2;
992 else
993 return 7680 * 2;
994 case 3:
995 case 7:
996 if (ASIC_IS_DCE5(rdev))
997 return 2048 * 2;
998 else
999 return 1920 * 2;
1000 }
Alex Deucherf9d9c362010-10-22 02:51:05 -04001001 }
Alex Deucher12dfc842011-04-14 19:07:34 -04001002
1003 /* controller not enabled, so no lb used */
1004 return 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -04001005}
1006
Alex Deucherca7db222012-03-20 17:18:30 -04001007u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucherf9d9c362010-10-22 02:51:05 -04001008{
1009 u32 tmp = RREG32(MC_SHARED_CHMAP);
1010
1011 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1012 case 0:
1013 default:
1014 return 1;
1015 case 1:
1016 return 2;
1017 case 2:
1018 return 4;
1019 case 3:
1020 return 8;
1021 }
1022}
1023
1024struct evergreen_wm_params {
1025 u32 dram_channels; /* number of dram channels */
1026 u32 yclk; /* bandwidth per dram data pin in kHz */
1027 u32 sclk; /* engine clock in kHz */
1028 u32 disp_clk; /* display clock in kHz */
1029 u32 src_width; /* viewport width */
1030 u32 active_time; /* active display time in ns */
1031 u32 blank_time; /* blank time in ns */
1032 bool interlaced; /* mode is interlaced */
1033 fixed20_12 vsc; /* vertical scale ratio */
1034 u32 num_heads; /* number of active crtcs */
1035 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1036 u32 lb_size; /* line buffer allocated to pipe */
1037 u32 vtaps; /* vertical scaler taps */
1038};
1039
1040static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
1041{
1042 /* Calculate DRAM Bandwidth and the part allocated to display. */
1043 fixed20_12 dram_efficiency; /* 0.7 */
1044 fixed20_12 yclk, dram_channels, bandwidth;
1045 fixed20_12 a;
1046
1047 a.full = dfixed_const(1000);
1048 yclk.full = dfixed_const(wm->yclk);
1049 yclk.full = dfixed_div(yclk, a);
1050 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1051 a.full = dfixed_const(10);
1052 dram_efficiency.full = dfixed_const(7);
1053 dram_efficiency.full = dfixed_div(dram_efficiency, a);
1054 bandwidth.full = dfixed_mul(dram_channels, yclk);
1055 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1056
1057 return dfixed_trunc(bandwidth);
1058}
1059
1060static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1061{
1062 /* Calculate DRAM Bandwidth and the part allocated to display. */
1063 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1064 fixed20_12 yclk, dram_channels, bandwidth;
1065 fixed20_12 a;
1066
1067 a.full = dfixed_const(1000);
1068 yclk.full = dfixed_const(wm->yclk);
1069 yclk.full = dfixed_div(yclk, a);
1070 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1071 a.full = dfixed_const(10);
1072 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1073 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1074 bandwidth.full = dfixed_mul(dram_channels, yclk);
1075 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1076
1077 return dfixed_trunc(bandwidth);
1078}
1079
1080static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
1081{
1082 /* Calculate the display Data return Bandwidth */
1083 fixed20_12 return_efficiency; /* 0.8 */
1084 fixed20_12 sclk, bandwidth;
1085 fixed20_12 a;
1086
1087 a.full = dfixed_const(1000);
1088 sclk.full = dfixed_const(wm->sclk);
1089 sclk.full = dfixed_div(sclk, a);
1090 a.full = dfixed_const(10);
1091 return_efficiency.full = dfixed_const(8);
1092 return_efficiency.full = dfixed_div(return_efficiency, a);
1093 a.full = dfixed_const(32);
1094 bandwidth.full = dfixed_mul(a, sclk);
1095 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
1096
1097 return dfixed_trunc(bandwidth);
1098}
1099
1100static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
1101{
1102 /* Calculate the DMIF Request Bandwidth */
1103 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
1104 fixed20_12 disp_clk, bandwidth;
1105 fixed20_12 a;
1106
1107 a.full = dfixed_const(1000);
1108 disp_clk.full = dfixed_const(wm->disp_clk);
1109 disp_clk.full = dfixed_div(disp_clk, a);
1110 a.full = dfixed_const(10);
1111 disp_clk_request_efficiency.full = dfixed_const(8);
1112 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1113 a.full = dfixed_const(32);
1114 bandwidth.full = dfixed_mul(a, disp_clk);
1115 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
1116
1117 return dfixed_trunc(bandwidth);
1118}
1119
1120static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
1121{
1122 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1123 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
1124 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
1125 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
1126
1127 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1128}
1129
1130static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
1131{
1132 /* Calculate the display mode Average Bandwidth
1133 * DisplayMode should contain the source and destination dimensions,
1134 * timing, etc.
1135 */
1136 fixed20_12 bpp;
1137 fixed20_12 line_time;
1138 fixed20_12 src_width;
1139 fixed20_12 bandwidth;
1140 fixed20_12 a;
1141
1142 a.full = dfixed_const(1000);
1143 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1144 line_time.full = dfixed_div(line_time, a);
1145 bpp.full = dfixed_const(wm->bytes_per_pixel);
1146 src_width.full = dfixed_const(wm->src_width);
1147 bandwidth.full = dfixed_mul(src_width, bpp);
1148 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1149 bandwidth.full = dfixed_div(bandwidth, line_time);
1150
1151 return dfixed_trunc(bandwidth);
1152}
1153
1154static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
1155{
1156 /* First calcualte the latency in ns */
1157 u32 mc_latency = 2000; /* 2000 ns. */
1158 u32 available_bandwidth = evergreen_available_bandwidth(wm);
1159 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1160 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1161 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1162 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1163 (wm->num_heads * cursor_line_pair_return_time);
1164 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1165 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1166 fixed20_12 a, b, c;
1167
1168 if (wm->num_heads == 0)
1169 return 0;
1170
1171 a.full = dfixed_const(2);
1172 b.full = dfixed_const(1);
1173 if ((wm->vsc.full > a.full) ||
1174 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1175 (wm->vtaps >= 5) ||
1176 ((wm->vsc.full >= a.full) && wm->interlaced))
1177 max_src_lines_per_dst_line = 4;
1178 else
1179 max_src_lines_per_dst_line = 2;
1180
1181 a.full = dfixed_const(available_bandwidth);
1182 b.full = dfixed_const(wm->num_heads);
1183 a.full = dfixed_div(a, b);
1184
1185 b.full = dfixed_const(1000);
1186 c.full = dfixed_const(wm->disp_clk);
1187 b.full = dfixed_div(c, b);
1188 c.full = dfixed_const(wm->bytes_per_pixel);
1189 b.full = dfixed_mul(b, c);
1190
1191 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
1192
1193 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1194 b.full = dfixed_const(1000);
1195 c.full = dfixed_const(lb_fill_bw);
1196 b.full = dfixed_div(c, b);
1197 a.full = dfixed_div(a, b);
1198 line_fill_time = dfixed_trunc(a);
1199
1200 if (line_fill_time < wm->active_time)
1201 return latency;
1202 else
1203 return latency + (line_fill_time - wm->active_time);
1204
1205}
1206
1207static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1208{
1209 if (evergreen_average_bandwidth(wm) <=
1210 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
1211 return true;
1212 else
1213 return false;
1214};
1215
1216static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
1217{
1218 if (evergreen_average_bandwidth(wm) <=
1219 (evergreen_available_bandwidth(wm) / wm->num_heads))
1220 return true;
1221 else
1222 return false;
1223};
1224
1225static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
1226{
1227 u32 lb_partitions = wm->lb_size / wm->src_width;
1228 u32 line_time = wm->active_time + wm->blank_time;
1229 u32 latency_tolerant_lines;
1230 u32 latency_hiding;
1231 fixed20_12 a;
1232
1233 a.full = dfixed_const(1);
1234 if (wm->vsc.full > a.full)
1235 latency_tolerant_lines = 1;
1236 else {
1237 if (lb_partitions <= (wm->vtaps + 1))
1238 latency_tolerant_lines = 1;
1239 else
1240 latency_tolerant_lines = 2;
1241 }
1242
1243 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1244
1245 if (evergreen_latency_watermark(wm) <= latency_hiding)
1246 return true;
1247 else
1248 return false;
1249}
1250
1251static void evergreen_program_watermarks(struct radeon_device *rdev,
1252 struct radeon_crtc *radeon_crtc,
1253 u32 lb_size, u32 num_heads)
1254{
1255 struct drm_display_mode *mode = &radeon_crtc->base.mode;
1256 struct evergreen_wm_params wm;
1257 u32 pixel_period;
1258 u32 line_time = 0;
1259 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1260 u32 priority_a_mark = 0, priority_b_mark = 0;
1261 u32 priority_a_cnt = PRIORITY_OFF;
1262 u32 priority_b_cnt = PRIORITY_OFF;
1263 u32 pipe_offset = radeon_crtc->crtc_id * 16;
1264 u32 tmp, arb_control3;
1265 fixed20_12 a, b, c;
1266
1267 if (radeon_crtc->base.enabled && num_heads && mode) {
1268 pixel_period = 1000000 / (u32)mode->clock;
1269 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1270 priority_a_cnt = 0;
1271 priority_b_cnt = 0;
1272
1273 wm.yclk = rdev->pm.current_mclk * 10;
1274 wm.sclk = rdev->pm.current_sclk * 10;
1275 wm.disp_clk = mode->clock;
1276 wm.src_width = mode->crtc_hdisplay;
1277 wm.active_time = mode->crtc_hdisplay * pixel_period;
1278 wm.blank_time = line_time - wm.active_time;
1279 wm.interlaced = false;
1280 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1281 wm.interlaced = true;
1282 wm.vsc = radeon_crtc->vsc;
1283 wm.vtaps = 1;
1284 if (radeon_crtc->rmx_type != RMX_OFF)
1285 wm.vtaps = 2;
1286 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1287 wm.lb_size = lb_size;
1288 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
1289 wm.num_heads = num_heads;
1290
1291 /* set for high clocks */
1292 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
1293 /* set for low clocks */
1294 /* wm.yclk = low clk; wm.sclk = low clk */
1295 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
1296
1297 /* possibly force display priority to high */
1298 /* should really do this at mode validation time... */
1299 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
1300 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
1301 !evergreen_check_latency_hiding(&wm) ||
1302 (rdev->disp_priority == 2)) {
Alex Deucher92bdfd42011-08-04 17:28:40 +00001303 DRM_DEBUG_KMS("force priority to high\n");
Alex Deucherf9d9c362010-10-22 02:51:05 -04001304 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1305 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1306 }
1307
1308 a.full = dfixed_const(1000);
1309 b.full = dfixed_const(mode->clock);
1310 b.full = dfixed_div(b, a);
1311 c.full = dfixed_const(latency_watermark_a);
1312 c.full = dfixed_mul(c, b);
1313 c.full = dfixed_mul(c, radeon_crtc->hsc);
1314 c.full = dfixed_div(c, a);
1315 a.full = dfixed_const(16);
1316 c.full = dfixed_div(c, a);
1317 priority_a_mark = dfixed_trunc(c);
1318 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1319
1320 a.full = dfixed_const(1000);
1321 b.full = dfixed_const(mode->clock);
1322 b.full = dfixed_div(b, a);
1323 c.full = dfixed_const(latency_watermark_b);
1324 c.full = dfixed_mul(c, b);
1325 c.full = dfixed_mul(c, radeon_crtc->hsc);
1326 c.full = dfixed_div(c, a);
1327 a.full = dfixed_const(16);
1328 c.full = dfixed_div(c, a);
1329 priority_b_mark = dfixed_trunc(c);
1330 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1331 }
1332
1333 /* select wm A */
1334 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1335 tmp = arb_control3;
1336 tmp &= ~LATENCY_WATERMARK_MASK(3);
1337 tmp |= LATENCY_WATERMARK_MASK(1);
1338 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1339 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1340 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1341 LATENCY_HIGH_WATERMARK(line_time)));
1342 /* select wm B */
1343 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1344 tmp &= ~LATENCY_WATERMARK_MASK(3);
1345 tmp |= LATENCY_WATERMARK_MASK(2);
1346 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1347 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1348 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1349 LATENCY_HIGH_WATERMARK(line_time)));
1350 /* restore original selection */
1351 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
1352
1353 /* write the priority marks */
1354 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1355 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1356
1357}
1358
Alex Deucher377edc82012-07-17 14:02:42 -04001359/**
1360 * evergreen_bandwidth_update - update display watermarks callback.
1361 *
1362 * @rdev: radeon_device pointer
1363 *
1364 * Update the display watermarks based on the requested mode(s)
1365 * (evergreen+).
1366 */
Alex Deucher0ca2ab52010-02-26 13:57:45 -05001367void evergreen_bandwidth_update(struct radeon_device *rdev)
1368{
Alex Deucherf9d9c362010-10-22 02:51:05 -04001369 struct drm_display_mode *mode0 = NULL;
1370 struct drm_display_mode *mode1 = NULL;
1371 u32 num_heads = 0, lb_size;
1372 int i;
1373
1374 radeon_update_display_priority(rdev);
1375
1376 for (i = 0; i < rdev->num_crtc; i++) {
1377 if (rdev->mode_info.crtcs[i]->base.enabled)
1378 num_heads++;
1379 }
1380 for (i = 0; i < rdev->num_crtc; i += 2) {
1381 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
1382 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
1383 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
1384 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
1385 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
1386 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
1387 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001388}
1389
Alex Deucher377edc82012-07-17 14:02:42 -04001390/**
1391 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1392 *
1393 * @rdev: radeon_device pointer
1394 *
1395 * Wait for the MC (memory controller) to be idle.
1396 * (evergreen+).
1397 * Returns 0 if the MC is idle, -1 if not.
1398 */
Alex Deucherb9952a82011-03-02 20:07:33 -05001399int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001400{
1401 unsigned i;
1402 u32 tmp;
1403
1404 for (i = 0; i < rdev->usec_timeout; i++) {
1405 /* read MC_STATUS */
1406 tmp = RREG32(SRBM_STATUS) & 0x1F00;
1407 if (!tmp)
1408 return 0;
1409 udelay(1);
1410 }
1411 return -1;
1412}
1413
1414/*
1415 * GART
1416 */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001417void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1418{
1419 unsigned i;
1420 u32 tmp;
1421
Alex Deucher6f2f48a2010-12-15 11:01:56 -05001422 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1423
Alex Deucher0fcdb612010-03-24 13:20:41 -04001424 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1425 for (i = 0; i < rdev->usec_timeout; i++) {
1426 /* read MC_STATUS */
1427 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1428 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1429 if (tmp == 2) {
1430 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1431 return;
1432 }
1433 if (tmp) {
1434 return;
1435 }
1436 udelay(1);
1437 }
1438}
1439
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001440static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001441{
1442 u32 tmp;
Alex Deucher0fcdb612010-03-24 13:20:41 -04001443 int r;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001444
Jerome Glissec9a1be92011-11-03 11:16:49 -04001445 if (rdev->gart.robj == NULL) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001446 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1447 return -EINVAL;
1448 }
1449 r = radeon_gart_table_vram_pin(rdev);
1450 if (r)
1451 return r;
Dave Airlie82568562010-02-05 16:00:07 +10001452 radeon_gart_restore(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001453 /* Setup L2 cache */
1454 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1455 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1456 EFFECTIVE_L2_QUEUE_SIZE(7));
1457 WREG32(VM_L2_CNTL2, 0);
1458 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1459 /* Setup TLB control */
1460 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1461 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1462 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1463 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001464 if (rdev->flags & RADEON_IS_IGP) {
1465 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1466 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1467 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1468 } else {
1469 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1470 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1471 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucher0b8c30b2012-05-31 18:54:43 -04001472 if ((rdev->family == CHIP_JUNIPER) ||
1473 (rdev->family == CHIP_CYPRESS) ||
1474 (rdev->family == CHIP_HEMLOCK) ||
1475 (rdev->family == CHIP_BARTS))
1476 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001477 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001478 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1479 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1480 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1481 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1482 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1483 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1484 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1485 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1486 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1487 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1488 (u32)(rdev->dummy_page.addr >> 12));
Alex Deucher0fcdb612010-03-24 13:20:41 -04001489 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001490
Alex Deucher0fcdb612010-03-24 13:20:41 -04001491 evergreen_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +00001492 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1493 (unsigned)(rdev->mc.gtt_size >> 20),
1494 (unsigned long long)rdev->gart.table_addr);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001495 rdev->gart.ready = true;
1496 return 0;
1497}
1498
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001499static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001500{
1501 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001502
1503 /* Disable all tables */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001504 WREG32(VM_CONTEXT0_CNTL, 0);
1505 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001506
1507 /* Setup L2 cache */
1508 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1509 EFFECTIVE_L2_QUEUE_SIZE(7));
1510 WREG32(VM_L2_CNTL2, 0);
1511 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1512 /* Setup TLB control */
1513 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1514 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1515 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1516 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1517 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1518 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1519 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1520 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -04001521 radeon_gart_table_vram_unpin(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001522}
1523
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001524static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001525{
1526 evergreen_pcie_gart_disable(rdev);
1527 radeon_gart_table_vram_free(rdev);
1528 radeon_gart_fini(rdev);
1529}
1530
1531
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001532static void evergreen_agp_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001533{
1534 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001535
1536 /* Setup L2 cache */
1537 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1538 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1539 EFFECTIVE_L2_QUEUE_SIZE(7));
1540 WREG32(VM_L2_CNTL2, 0);
1541 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1542 /* Setup TLB control */
1543 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1544 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1545 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1546 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1547 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1548 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1549 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1550 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1551 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1552 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1553 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Alex Deucher0fcdb612010-03-24 13:20:41 -04001554 WREG32(VM_CONTEXT0_CNTL, 0);
1555 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001556}
1557
Alex Deucherb9952a82011-03-02 20:07:33 -05001558void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001559{
Alex Deucher62444b72012-08-15 17:18:42 -04001560 u32 crtc_enabled, tmp, frame_count, blackout;
1561 int i, j;
1562
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001563 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1564 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001565
Alex Deucher62444b72012-08-15 17:18:42 -04001566 /* disable VGA render */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001567 WREG32(VGA_RENDER_CONTROL, 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001568 /* blank the display controllers */
1569 for (i = 0; i < rdev->num_crtc; i++) {
1570 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1571 if (crtc_enabled) {
1572 save->crtc_enabled[i] = true;
1573 if (ASIC_IS_DCE6(rdev)) {
1574 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1575 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1576 radeon_wait_for_vblank(rdev, i);
Alex Deucherabf14572013-04-10 19:08:14 -04001577 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001578 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1579 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1580 }
1581 } else {
1582 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1583 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1584 radeon_wait_for_vblank(rdev, i);
Alex Deucherabf14572013-04-10 19:08:14 -04001585 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001586 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1587 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Alex Deucherabf14572013-04-10 19:08:14 -04001588 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001589 }
1590 }
1591 /* wait for the next frame */
1592 frame_count = radeon_get_vblank_counter(rdev, i);
1593 for (j = 0; j < rdev->usec_timeout; j++) {
1594 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1595 break;
1596 udelay(1);
1597 }
Alex Deucherabf14572013-04-10 19:08:14 -04001598
1599 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1600 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1601 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1602 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
1603 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1604 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1605 save->crtc_enabled[i] = false;
1606 /* ***** */
Alex Deucher804cc4a2012-11-19 09:11:27 -05001607 } else {
1608 save->crtc_enabled[i] = false;
Alex Deucher62444b72012-08-15 17:18:42 -04001609 }
Alex Deucher18007402010-11-22 17:56:28 -05001610 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001611
Alex Deucher62444b72012-08-15 17:18:42 -04001612 radeon_mc_wait_for_idle(rdev);
1613
1614 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1615 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1616 /* Block CPU access */
1617 WREG32(BIF_FB_EN, 0);
1618 /* blackout the MC */
1619 blackout &= ~BLACKOUT_MODE_MASK;
1620 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
Alex Deucherb7eff392011-07-08 11:44:56 -04001621 }
Alex Deuchered39fad2013-01-31 09:00:52 -05001622 /* wait for the MC to settle */
1623 udelay(100);
Alex Deucher968c0162013-04-10 09:58:42 -04001624
1625 /* lock double buffered regs */
1626 for (i = 0; i < rdev->num_crtc; i++) {
1627 if (save->crtc_enabled[i]) {
1628 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1629 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
1630 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1631 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1632 }
1633 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1634 if (!(tmp & 1)) {
1635 tmp |= 1;
1636 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1637 }
1638 }
1639 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001640}
1641
Alex Deucherb9952a82011-03-02 20:07:33 -05001642void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001643{
Alex Deucher62444b72012-08-15 17:18:42 -04001644 u32 tmp, frame_count;
1645 int i, j;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001646
Alex Deucher62444b72012-08-15 17:18:42 -04001647 /* update crtc base addresses */
1648 for (i = 0; i < rdev->num_crtc; i++) {
1649 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001650 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001651 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001652 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001653 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001654 (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001655 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001656 (u32)rdev->mc.vram_start);
Alex Deucherb7eff392011-07-08 11:44:56 -04001657 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001658 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1659 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001660
Alex Deucher968c0162013-04-10 09:58:42 -04001661 /* unlock regs and wait for update */
1662 for (i = 0; i < rdev->num_crtc; i++) {
1663 if (save->crtc_enabled[i]) {
1664 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
1665 if ((tmp & 0x3) != 0) {
1666 tmp &= ~0x3;
1667 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1668 }
1669 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1670 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
1671 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1672 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1673 }
1674 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1675 if (tmp & 1) {
1676 tmp &= ~1;
1677 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1678 }
1679 for (j = 0; j < rdev->usec_timeout; j++) {
1680 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1681 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
1682 break;
1683 udelay(1);
1684 }
1685 }
1686 }
1687
Alex Deucher62444b72012-08-15 17:18:42 -04001688 /* unblackout the MC */
1689 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1690 tmp &= ~BLACKOUT_MODE_MASK;
1691 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1692 /* allow CPU access */
1693 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1694
1695 for (i = 0; i < rdev->num_crtc; i++) {
Alex Deucher695ddeb2012-11-05 16:34:58 +00001696 if (save->crtc_enabled[i]) {
Alex Deucher62444b72012-08-15 17:18:42 -04001697 if (ASIC_IS_DCE6(rdev)) {
1698 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1699 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitebb5888202013-01-26 11:10:58 -05001700 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001701 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001702 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001703 } else {
1704 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1705 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitebb5888202013-01-26 11:10:58 -05001706 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001707 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001708 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001709 }
1710 /* wait for the next frame */
1711 frame_count = radeon_get_vblank_counter(rdev, i);
1712 for (j = 0; j < rdev->usec_timeout; j++) {
1713 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1714 break;
1715 udelay(1);
1716 }
1717 }
1718 }
1719 /* Unlock vga access */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001720 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1721 mdelay(1);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001722 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1723}
1724
Alex Deucher755d8192011-03-02 20:07:34 -05001725void evergreen_mc_program(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001726{
1727 struct evergreen_mc_save save;
1728 u32 tmp;
1729 int i, j;
1730
1731 /* Initialize HDP */
1732 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1733 WREG32((0x2c14 + j), 0x00000000);
1734 WREG32((0x2c18 + j), 0x00000000);
1735 WREG32((0x2c1c + j), 0x00000000);
1736 WREG32((0x2c20 + j), 0x00000000);
1737 WREG32((0x2c24 + j), 0x00000000);
1738 }
1739 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1740
1741 evergreen_mc_stop(rdev, &save);
1742 if (evergreen_mc_wait_for_idle(rdev)) {
1743 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1744 }
1745 /* Lockout access through VGA aperture*/
1746 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1747 /* Update configuration */
1748 if (rdev->flags & RADEON_IS_AGP) {
1749 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1750 /* VRAM before AGP */
1751 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1752 rdev->mc.vram_start >> 12);
1753 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1754 rdev->mc.gtt_end >> 12);
1755 } else {
1756 /* VRAM after AGP */
1757 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1758 rdev->mc.gtt_start >> 12);
1759 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1760 rdev->mc.vram_end >> 12);
1761 }
1762 } else {
1763 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1764 rdev->mc.vram_start >> 12);
1765 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1766 rdev->mc.vram_end >> 12);
1767 }
Alex Deucher3b9832f2011-11-10 08:59:39 -05001768 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001769 /* llano/ontario only */
1770 if ((rdev->family == CHIP_PALM) ||
1771 (rdev->family == CHIP_SUMO) ||
1772 (rdev->family == CHIP_SUMO2)) {
Alex Deucherb4183e32010-12-15 11:04:10 -05001773 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1774 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1775 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1776 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1777 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001778 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1779 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1780 WREG32(MC_VM_FB_LOCATION, tmp);
1781 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
Alex Deucherc46cb4d2011-01-06 19:12:37 -05001782 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001783 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001784 if (rdev->flags & RADEON_IS_AGP) {
1785 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1786 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1787 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1788 } else {
1789 WREG32(MC_VM_AGP_BASE, 0);
1790 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1791 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1792 }
1793 if (evergreen_mc_wait_for_idle(rdev)) {
1794 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1795 }
1796 evergreen_mc_resume(rdev, &save);
1797 /* we need to own VRAM, so turn off the VGA renderer here
1798 * to stop it overwriting our objects */
1799 rv515_vga_render_disable(rdev);
1800}
1801
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001802/*
1803 * CP.
1804 */
Alex Deucher12920592011-02-02 12:37:40 -05001805void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1806{
Christian König876dc9f2012-05-08 14:24:01 +02001807 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher89d35802012-07-17 14:02:31 -04001808 u32 next_rptr;
Christian König7b1f2482011-09-23 15:11:23 +02001809
Alex Deucher12920592011-02-02 12:37:40 -05001810 /* set to DX10/11 mode */
Christian Könige32eb502011-10-23 12:56:27 +02001811 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1812 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +02001813
1814 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04001815 next_rptr = ring->wptr + 3 + 4;
Christian König45df6802012-07-06 16:22:55 +02001816 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1817 radeon_ring_write(ring, ((ring->rptr_save_reg -
1818 PACKET3_SET_CONFIG_REG_START) >> 2));
1819 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04001820 } else if (rdev->wb.enabled) {
1821 next_rptr = ring->wptr + 5 + 4;
1822 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1823 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1824 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1825 radeon_ring_write(ring, next_rptr);
1826 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02001827 }
1828
Christian Könige32eb502011-10-23 12:56:27 +02001829 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1830 radeon_ring_write(ring,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001831#ifdef __BIG_ENDIAN
1832 (2 << 0) |
1833#endif
1834 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02001835 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1836 radeon_ring_write(ring, ib->length_dw);
Alex Deucher12920592011-02-02 12:37:40 -05001837}
1838
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001839
1840static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1841{
Alex Deucherfe251e22010-03-24 13:36:43 -04001842 const __be32 *fw_data;
1843 int i;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001844
Alex Deucherfe251e22010-03-24 13:36:43 -04001845 if (!rdev->me_fw || !rdev->pfp_fw)
1846 return -EINVAL;
1847
1848 r700_cp_stop(rdev);
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001849 WREG32(CP_RB_CNTL,
1850#ifdef __BIG_ENDIAN
1851 BUF_SWAP_32BIT |
1852#endif
1853 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Alex Deucherfe251e22010-03-24 13:36:43 -04001854
1855 fw_data = (const __be32 *)rdev->pfp_fw->data;
1856 WREG32(CP_PFP_UCODE_ADDR, 0);
1857 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1858 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1859 WREG32(CP_PFP_UCODE_ADDR, 0);
1860
1861 fw_data = (const __be32 *)rdev->me_fw->data;
1862 WREG32(CP_ME_RAM_WADDR, 0);
1863 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1864 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1865
1866 WREG32(CP_PFP_UCODE_ADDR, 0);
1867 WREG32(CP_ME_RAM_WADDR, 0);
1868 WREG32(CP_ME_RAM_RADDR, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001869 return 0;
1870}
1871
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001872static int evergreen_cp_start(struct radeon_device *rdev)
1873{
Christian Könige32eb502011-10-23 12:56:27 +02001874 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher2281a372010-10-21 13:31:38 -04001875 int r, i;
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001876 uint32_t cp_me;
1877
Christian Könige32eb502011-10-23 12:56:27 +02001878 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001879 if (r) {
1880 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1881 return r;
1882 }
Christian Könige32eb502011-10-23 12:56:27 +02001883 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1884 radeon_ring_write(ring, 0x1);
1885 radeon_ring_write(ring, 0x0);
1886 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1887 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1888 radeon_ring_write(ring, 0);
1889 radeon_ring_write(ring, 0);
1890 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001891
1892 cp_me = 0xff;
1893 WREG32(CP_ME_CNTL, cp_me);
1894
Christian Könige32eb502011-10-23 12:56:27 +02001895 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001896 if (r) {
1897 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1898 return r;
1899 }
Alex Deucher2281a372010-10-21 13:31:38 -04001900
1901 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001902 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1903 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001904
1905 for (i = 0; i < evergreen_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001906 radeon_ring_write(ring, evergreen_default_state[i]);
Alex Deucher2281a372010-10-21 13:31:38 -04001907
Christian Könige32eb502011-10-23 12:56:27 +02001908 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1909 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001910
1911 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001912 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1913 radeon_ring_write(ring, 0);
Alex Deucher2281a372010-10-21 13:31:38 -04001914
1915 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001916 radeon_ring_write(ring, 0xc0026f00);
1917 radeon_ring_write(ring, 0x00000000);
1918 radeon_ring_write(ring, 0x00000000);
1919 radeon_ring_write(ring, 0x00000000);
Alex Deucher2281a372010-10-21 13:31:38 -04001920
1921 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001922 radeon_ring_write(ring, 0xc0036f00);
1923 radeon_ring_write(ring, 0x00000bc4);
1924 radeon_ring_write(ring, 0xffffffff);
1925 radeon_ring_write(ring, 0xffffffff);
1926 radeon_ring_write(ring, 0xffffffff);
Alex Deucher2281a372010-10-21 13:31:38 -04001927
Christian Könige32eb502011-10-23 12:56:27 +02001928 radeon_ring_write(ring, 0xc0026900);
1929 radeon_ring_write(ring, 0x00000316);
1930 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1931 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher18ff84d2011-02-02 12:37:41 -05001932
Christian Könige32eb502011-10-23 12:56:27 +02001933 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001934
1935 return 0;
1936}
1937
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001938static int evergreen_cp_resume(struct radeon_device *rdev)
Alex Deucherfe251e22010-03-24 13:36:43 -04001939{
Christian Könige32eb502011-10-23 12:56:27 +02001940 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -04001941 u32 tmp;
1942 u32 rb_bufsz;
1943 int r;
1944
1945 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1946 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1947 SOFT_RESET_PA |
1948 SOFT_RESET_SH |
1949 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001950 SOFT_RESET_SPI |
Alex Deucherfe251e22010-03-24 13:36:43 -04001951 SOFT_RESET_SX));
1952 RREG32(GRBM_SOFT_RESET);
1953 mdelay(15);
1954 WREG32(GRBM_SOFT_RESET, 0);
1955 RREG32(GRBM_SOFT_RESET);
1956
1957 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02001958 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04001959 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucherfe251e22010-03-24 13:36:43 -04001960#ifdef __BIG_ENDIAN
1961 tmp |= BUF_SWAP_32BIT;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001962#endif
Alex Deucherfe251e22010-03-24 13:36:43 -04001963 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02001964 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f12012-01-20 14:47:43 -05001965 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucherfe251e22010-03-24 13:36:43 -04001966
1967 /* Set the write pointer delay */
1968 WREG32(CP_RB_WPTR_DELAY, 0);
1969
1970 /* Initialize the ring buffer's read and write pointers */
1971 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1972 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02001973 ring->wptr = 0;
1974 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04001975
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001976 /* set the wb address whether it's enabled or not */
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001977 WREG32(CP_RB_RPTR_ADDR,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001978 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04001979 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1980 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1981
1982 if (rdev->wb.enabled)
1983 WREG32(SCRATCH_UMSK, 0xff);
1984 else {
1985 tmp |= RB_NO_UPDATE;
1986 WREG32(SCRATCH_UMSK, 0);
1987 }
1988
Alex Deucherfe251e22010-03-24 13:36:43 -04001989 mdelay(1);
1990 WREG32(CP_RB_CNTL, tmp);
1991
Christian Könige32eb502011-10-23 12:56:27 +02001992 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Alex Deucherfe251e22010-03-24 13:36:43 -04001993 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1994
Christian Könige32eb502011-10-23 12:56:27 +02001995 ring->rptr = RREG32(CP_RB_RPTR);
Alex Deucherfe251e22010-03-24 13:36:43 -04001996
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001997 evergreen_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001998 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05001999 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Alex Deucherfe251e22010-03-24 13:36:43 -04002000 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02002001 ring->ready = false;
Alex Deucherfe251e22010-03-24 13:36:43 -04002002 return r;
2003 }
2004 return 0;
2005}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002006
2007/*
2008 * Core functions
2009 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002010static void evergreen_gpu_init(struct radeon_device *rdev)
2011{
Alex Deucher416a2bd2012-05-31 19:00:25 -04002012 u32 gb_addr_config;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002013 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002014 u32 sx_debug_1;
2015 u32 smx_dc_ctl0;
2016 u32 sq_config;
2017 u32 sq_lds_resource_mgmt;
2018 u32 sq_gpr_resource_mgmt_1;
2019 u32 sq_gpr_resource_mgmt_2;
2020 u32 sq_gpr_resource_mgmt_3;
2021 u32 sq_thread_resource_mgmt;
2022 u32 sq_thread_resource_mgmt_2;
2023 u32 sq_stack_resource_mgmt_1;
2024 u32 sq_stack_resource_mgmt_2;
2025 u32 sq_stack_resource_mgmt_3;
2026 u32 vgt_cache_invalidation;
Alex Deucherf25a5c62011-05-19 11:07:57 -04002027 u32 hdp_host_path_cntl, tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002028 u32 disabled_rb_mask;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002029 int i, j, num_shader_engines, ps_thread_count;
2030
2031 switch (rdev->family) {
2032 case CHIP_CYPRESS:
2033 case CHIP_HEMLOCK:
2034 rdev->config.evergreen.num_ses = 2;
2035 rdev->config.evergreen.max_pipes = 4;
2036 rdev->config.evergreen.max_tile_pipes = 8;
2037 rdev->config.evergreen.max_simds = 10;
2038 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2039 rdev->config.evergreen.max_gprs = 256;
2040 rdev->config.evergreen.max_threads = 248;
2041 rdev->config.evergreen.max_gs_threads = 32;
2042 rdev->config.evergreen.max_stack_entries = 512;
2043 rdev->config.evergreen.sx_num_of_sets = 4;
2044 rdev->config.evergreen.sx_max_export_size = 256;
2045 rdev->config.evergreen.sx_max_export_pos_size = 64;
2046 rdev->config.evergreen.sx_max_export_smx_size = 192;
2047 rdev->config.evergreen.max_hw_contexts = 8;
2048 rdev->config.evergreen.sq_num_cf_insts = 2;
2049
2050 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2051 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2052 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002053 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002054 break;
2055 case CHIP_JUNIPER:
2056 rdev->config.evergreen.num_ses = 1;
2057 rdev->config.evergreen.max_pipes = 4;
2058 rdev->config.evergreen.max_tile_pipes = 4;
2059 rdev->config.evergreen.max_simds = 10;
2060 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2061 rdev->config.evergreen.max_gprs = 256;
2062 rdev->config.evergreen.max_threads = 248;
2063 rdev->config.evergreen.max_gs_threads = 32;
2064 rdev->config.evergreen.max_stack_entries = 512;
2065 rdev->config.evergreen.sx_num_of_sets = 4;
2066 rdev->config.evergreen.sx_max_export_size = 256;
2067 rdev->config.evergreen.sx_max_export_pos_size = 64;
2068 rdev->config.evergreen.sx_max_export_smx_size = 192;
2069 rdev->config.evergreen.max_hw_contexts = 8;
2070 rdev->config.evergreen.sq_num_cf_insts = 2;
2071
2072 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2073 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2074 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002075 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002076 break;
2077 case CHIP_REDWOOD:
2078 rdev->config.evergreen.num_ses = 1;
2079 rdev->config.evergreen.max_pipes = 4;
2080 rdev->config.evergreen.max_tile_pipes = 4;
2081 rdev->config.evergreen.max_simds = 5;
2082 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2083 rdev->config.evergreen.max_gprs = 256;
2084 rdev->config.evergreen.max_threads = 248;
2085 rdev->config.evergreen.max_gs_threads = 32;
2086 rdev->config.evergreen.max_stack_entries = 256;
2087 rdev->config.evergreen.sx_num_of_sets = 4;
2088 rdev->config.evergreen.sx_max_export_size = 256;
2089 rdev->config.evergreen.sx_max_export_pos_size = 64;
2090 rdev->config.evergreen.sx_max_export_smx_size = 192;
2091 rdev->config.evergreen.max_hw_contexts = 8;
2092 rdev->config.evergreen.sq_num_cf_insts = 2;
2093
2094 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2095 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2096 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002097 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002098 break;
2099 case CHIP_CEDAR:
2100 default:
2101 rdev->config.evergreen.num_ses = 1;
2102 rdev->config.evergreen.max_pipes = 2;
2103 rdev->config.evergreen.max_tile_pipes = 2;
2104 rdev->config.evergreen.max_simds = 2;
2105 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2106 rdev->config.evergreen.max_gprs = 256;
2107 rdev->config.evergreen.max_threads = 192;
2108 rdev->config.evergreen.max_gs_threads = 16;
2109 rdev->config.evergreen.max_stack_entries = 256;
2110 rdev->config.evergreen.sx_num_of_sets = 4;
2111 rdev->config.evergreen.sx_max_export_size = 128;
2112 rdev->config.evergreen.sx_max_export_pos_size = 32;
2113 rdev->config.evergreen.sx_max_export_smx_size = 96;
2114 rdev->config.evergreen.max_hw_contexts = 4;
2115 rdev->config.evergreen.sq_num_cf_insts = 1;
2116
2117 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2118 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2119 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002120 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002121 break;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002122 case CHIP_PALM:
2123 rdev->config.evergreen.num_ses = 1;
2124 rdev->config.evergreen.max_pipes = 2;
2125 rdev->config.evergreen.max_tile_pipes = 2;
2126 rdev->config.evergreen.max_simds = 2;
2127 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2128 rdev->config.evergreen.max_gprs = 256;
2129 rdev->config.evergreen.max_threads = 192;
2130 rdev->config.evergreen.max_gs_threads = 16;
2131 rdev->config.evergreen.max_stack_entries = 256;
2132 rdev->config.evergreen.sx_num_of_sets = 4;
2133 rdev->config.evergreen.sx_max_export_size = 128;
2134 rdev->config.evergreen.sx_max_export_pos_size = 32;
2135 rdev->config.evergreen.sx_max_export_smx_size = 96;
2136 rdev->config.evergreen.max_hw_contexts = 4;
2137 rdev->config.evergreen.sq_num_cf_insts = 1;
2138
2139 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2140 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2141 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002142 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002143 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002144 case CHIP_SUMO:
2145 rdev->config.evergreen.num_ses = 1;
2146 rdev->config.evergreen.max_pipes = 4;
Jerome Glissebd25f072012-12-11 11:56:52 -05002147 rdev->config.evergreen.max_tile_pipes = 4;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002148 if (rdev->pdev->device == 0x9648)
2149 rdev->config.evergreen.max_simds = 3;
2150 else if ((rdev->pdev->device == 0x9647) ||
2151 (rdev->pdev->device == 0x964a))
2152 rdev->config.evergreen.max_simds = 4;
2153 else
2154 rdev->config.evergreen.max_simds = 5;
2155 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2156 rdev->config.evergreen.max_gprs = 256;
2157 rdev->config.evergreen.max_threads = 248;
2158 rdev->config.evergreen.max_gs_threads = 32;
2159 rdev->config.evergreen.max_stack_entries = 256;
2160 rdev->config.evergreen.sx_num_of_sets = 4;
2161 rdev->config.evergreen.sx_max_export_size = 256;
2162 rdev->config.evergreen.sx_max_export_pos_size = 64;
2163 rdev->config.evergreen.sx_max_export_smx_size = 192;
2164 rdev->config.evergreen.max_hw_contexts = 8;
2165 rdev->config.evergreen.sq_num_cf_insts = 2;
2166
2167 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2168 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2169 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05002170 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002171 break;
2172 case CHIP_SUMO2:
2173 rdev->config.evergreen.num_ses = 1;
2174 rdev->config.evergreen.max_pipes = 4;
2175 rdev->config.evergreen.max_tile_pipes = 4;
2176 rdev->config.evergreen.max_simds = 2;
2177 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2178 rdev->config.evergreen.max_gprs = 256;
2179 rdev->config.evergreen.max_threads = 248;
2180 rdev->config.evergreen.max_gs_threads = 32;
2181 rdev->config.evergreen.max_stack_entries = 512;
2182 rdev->config.evergreen.sx_num_of_sets = 4;
2183 rdev->config.evergreen.sx_max_export_size = 256;
2184 rdev->config.evergreen.sx_max_export_pos_size = 64;
2185 rdev->config.evergreen.sx_max_export_smx_size = 192;
2186 rdev->config.evergreen.max_hw_contexts = 8;
2187 rdev->config.evergreen.sq_num_cf_insts = 2;
2188
2189 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2190 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2191 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05002192 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002193 break;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002194 case CHIP_BARTS:
2195 rdev->config.evergreen.num_ses = 2;
2196 rdev->config.evergreen.max_pipes = 4;
2197 rdev->config.evergreen.max_tile_pipes = 8;
2198 rdev->config.evergreen.max_simds = 7;
2199 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2200 rdev->config.evergreen.max_gprs = 256;
2201 rdev->config.evergreen.max_threads = 248;
2202 rdev->config.evergreen.max_gs_threads = 32;
2203 rdev->config.evergreen.max_stack_entries = 512;
2204 rdev->config.evergreen.sx_num_of_sets = 4;
2205 rdev->config.evergreen.sx_max_export_size = 256;
2206 rdev->config.evergreen.sx_max_export_pos_size = 64;
2207 rdev->config.evergreen.sx_max_export_smx_size = 192;
2208 rdev->config.evergreen.max_hw_contexts = 8;
2209 rdev->config.evergreen.sq_num_cf_insts = 2;
2210
2211 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2212 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2213 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002214 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002215 break;
2216 case CHIP_TURKS:
2217 rdev->config.evergreen.num_ses = 1;
2218 rdev->config.evergreen.max_pipes = 4;
2219 rdev->config.evergreen.max_tile_pipes = 4;
2220 rdev->config.evergreen.max_simds = 6;
2221 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2222 rdev->config.evergreen.max_gprs = 256;
2223 rdev->config.evergreen.max_threads = 248;
2224 rdev->config.evergreen.max_gs_threads = 32;
2225 rdev->config.evergreen.max_stack_entries = 256;
2226 rdev->config.evergreen.sx_num_of_sets = 4;
2227 rdev->config.evergreen.sx_max_export_size = 256;
2228 rdev->config.evergreen.sx_max_export_pos_size = 64;
2229 rdev->config.evergreen.sx_max_export_smx_size = 192;
2230 rdev->config.evergreen.max_hw_contexts = 8;
2231 rdev->config.evergreen.sq_num_cf_insts = 2;
2232
2233 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2234 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2235 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002236 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002237 break;
2238 case CHIP_CAICOS:
2239 rdev->config.evergreen.num_ses = 1;
Jerome Glissebd25f072012-12-11 11:56:52 -05002240 rdev->config.evergreen.max_pipes = 2;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002241 rdev->config.evergreen.max_tile_pipes = 2;
2242 rdev->config.evergreen.max_simds = 2;
2243 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2244 rdev->config.evergreen.max_gprs = 256;
2245 rdev->config.evergreen.max_threads = 192;
2246 rdev->config.evergreen.max_gs_threads = 16;
2247 rdev->config.evergreen.max_stack_entries = 256;
2248 rdev->config.evergreen.sx_num_of_sets = 4;
2249 rdev->config.evergreen.sx_max_export_size = 128;
2250 rdev->config.evergreen.sx_max_export_pos_size = 32;
2251 rdev->config.evergreen.sx_max_export_smx_size = 96;
2252 rdev->config.evergreen.max_hw_contexts = 4;
2253 rdev->config.evergreen.sq_num_cf_insts = 1;
2254
2255 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2256 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2257 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002258 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002259 break;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002260 }
2261
2262 /* Initialize HDP */
2263 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2264 WREG32((0x2c14 + j), 0x00000000);
2265 WREG32((0x2c18 + j), 0x00000000);
2266 WREG32((0x2c1c + j), 0x00000000);
2267 WREG32((0x2c20 + j), 0x00000000);
2268 WREG32((0x2c24 + j), 0x00000000);
2269 }
2270
2271 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2272
Alex Deucherd054ac12011-09-01 17:46:15 +00002273 evergreen_fix_pci_max_read_req_size(rdev);
2274
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002275 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
Alex Deucher05b3ef62012-03-20 17:18:37 -04002276 if ((rdev->family == CHIP_PALM) ||
2277 (rdev->family == CHIP_SUMO) ||
2278 (rdev->family == CHIP_SUMO2))
Alex Deucherd9282fc2011-05-11 03:15:24 -04002279 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
2280 else
2281 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002282
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002283 /* setup tiling info dword. gb_addr_config is not adequate since it does
2284 * not have bank info, so create a custom tiling dword.
2285 * bits 3:0 num_pipes
2286 * bits 7:4 num_banks
2287 * bits 11:8 group_size
2288 * bits 15:12 row_size
2289 */
2290 rdev->config.evergreen.tile_config = 0;
2291 switch (rdev->config.evergreen.max_tile_pipes) {
2292 case 1:
2293 default:
2294 rdev->config.evergreen.tile_config |= (0 << 0);
2295 break;
2296 case 2:
2297 rdev->config.evergreen.tile_config |= (1 << 0);
2298 break;
2299 case 4:
2300 rdev->config.evergreen.tile_config |= (2 << 0);
2301 break;
2302 case 8:
2303 rdev->config.evergreen.tile_config |= (3 << 0);
2304 break;
2305 }
Alex Deucherd698a342011-06-23 00:49:29 -04002306 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
Alex Deucher5bfa4872011-05-20 12:35:22 -04002307 if (rdev->flags & RADEON_IS_IGP)
Alex Deucherd698a342011-06-23 00:49:29 -04002308 rdev->config.evergreen.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -04002309 else {
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002310 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
2311 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -04002312 rdev->config.evergreen.tile_config |= 0 << 4;
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002313 break;
2314 case 1: /* eight banks */
2315 rdev->config.evergreen.tile_config |= 1 << 4;
2316 break;
2317 case 2: /* sixteen banks */
2318 default:
2319 rdev->config.evergreen.tile_config |= 2 << 4;
2320 break;
2321 }
Alex Deucher29d65402012-05-31 18:53:36 -04002322 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002323 rdev->config.evergreen.tile_config |= 0 << 8;
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002324 rdev->config.evergreen.tile_config |=
2325 ((gb_addr_config & 0x30000000) >> 28) << 12;
2326
Alex Deucher416a2bd2012-05-31 19:00:25 -04002327 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2328
2329 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2330 u32 efuse_straps_4;
2331 u32 efuse_straps_3;
2332
2333 WREG32(RCU_IND_INDEX, 0x204);
2334 efuse_straps_4 = RREG32(RCU_IND_DATA);
2335 WREG32(RCU_IND_INDEX, 0x203);
2336 efuse_straps_3 = RREG32(RCU_IND_DATA);
2337 tmp = (((efuse_straps_4 & 0xf) << 4) |
2338 ((efuse_straps_3 & 0xf0000000) >> 28));
2339 } else {
2340 tmp = 0;
2341 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
2342 u32 rb_disable_bitmap;
2343
2344 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2345 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2346 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
2347 tmp <<= 4;
2348 tmp |= rb_disable_bitmap;
2349 }
2350 }
2351 /* enabled rb are just the one not disabled :) */
2352 disabled_rb_mask = tmp;
Alex Deuchercedb6552013-04-09 10:13:22 -04002353 tmp = 0;
2354 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
2355 tmp |= (1 << i);
2356 /* if all the backends are disabled, fix it up here */
2357 if ((disabled_rb_mask & tmp) == tmp) {
2358 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
2359 disabled_rb_mask &= ~(1 << i);
2360 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002361
2362 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2363 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2364
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002365 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2366 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2367 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002368 WREG32(DMA_TILING_CONFIG, gb_addr_config);
Christian König9a210592013-04-08 12:41:37 +02002369 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
2370 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2371 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002372
Alex Deucherf7eb9732013-01-30 13:57:40 -05002373 if ((rdev->config.evergreen.max_backends == 1) &&
2374 (rdev->flags & RADEON_IS_IGP)) {
2375 if ((disabled_rb_mask & 3) == 1) {
2376 /* RB0 disabled, RB1 enabled */
2377 tmp = 0x11111111;
2378 } else {
2379 /* RB1 disabled, RB0 enabled */
2380 tmp = 0x00000000;
2381 }
2382 } else {
2383 tmp = gb_addr_config & NUM_PIPES_MASK;
2384 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2385 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
2386 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002387 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002388
2389 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2390 WREG32(CGTS_TCC_DISABLE, 0);
2391 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2392 WREG32(CGTS_USER_TCC_DISABLE, 0);
2393
2394 /* set HW defaults for 3D engine */
2395 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2396 ROQ_IB2_START(0x2b)));
2397
2398 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2399
2400 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2401 SYNC_GRADIENT |
2402 SYNC_WALKER |
2403 SYNC_ALIGNER));
2404
2405 sx_debug_1 = RREG32(SX_DEBUG_1);
2406 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2407 WREG32(SX_DEBUG_1, sx_debug_1);
2408
2409
2410 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2411 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2412 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2413 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2414
Alex Deucherb866d132012-06-14 22:06:36 +02002415 if (rdev->family <= CHIP_SUMO2)
2416 WREG32(SMX_SAR_CTL0, 0x00010000);
2417
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002418 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2419 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2420 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2421
2422 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2423 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2424 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2425
2426 WREG32(VGT_NUM_INSTANCES, 1);
2427 WREG32(SPI_CONFIG_CNTL, 0);
2428 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2429 WREG32(CP_PERFMON_CNTL, 0);
2430
2431 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2432 FETCH_FIFO_HIWATER(0x4) |
2433 DONE_FIFO_HIWATER(0xe0) |
2434 ALU_UPDATE_FIFO_HIWATER(0x8)));
2435
2436 sq_config = RREG32(SQ_CONFIG);
2437 sq_config &= ~(PS_PRIO(3) |
2438 VS_PRIO(3) |
2439 GS_PRIO(3) |
2440 ES_PRIO(3));
2441 sq_config |= (VC_ENABLE |
2442 EXPORT_SRC_C |
2443 PS_PRIO(0) |
2444 VS_PRIO(1) |
2445 GS_PRIO(2) |
2446 ES_PRIO(3));
2447
Alex Deucherd5e455e2010-11-22 17:56:29 -05002448 switch (rdev->family) {
2449 case CHIP_CEDAR:
2450 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002451 case CHIP_SUMO:
2452 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002453 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002454 /* no vertex cache */
2455 sq_config &= ~VC_ENABLE;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002456 break;
2457 default:
2458 break;
2459 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002460
2461 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2462
2463 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2464 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2465 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2466 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2467 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2468 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2469 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2470
Alex Deucherd5e455e2010-11-22 17:56:29 -05002471 switch (rdev->family) {
2472 case CHIP_CEDAR:
2473 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002474 case CHIP_SUMO:
2475 case CHIP_SUMO2:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002476 ps_thread_count = 96;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002477 break;
2478 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002479 ps_thread_count = 128;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002480 break;
2481 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002482
2483 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
Alex Deucherf96b35c2010-06-16 12:24:07 -04002484 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2485 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2486 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2487 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2488 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002489
2490 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2491 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2492 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2493 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2494 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2495 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2496
2497 WREG32(SQ_CONFIG, sq_config);
2498 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2499 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2500 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2501 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2502 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2503 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2504 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2505 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2506 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2507 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2508
2509 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2510 FORCE_EOV_MAX_REZ_CNT(255)));
2511
Alex Deucherd5e455e2010-11-22 17:56:29 -05002512 switch (rdev->family) {
2513 case CHIP_CEDAR:
2514 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002515 case CHIP_SUMO:
2516 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002517 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002518 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002519 break;
2520 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002521 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002522 break;
2523 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002524 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2525 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2526
2527 WREG32(VGT_GS_VERTEX_REUSE, 16);
Alex Deucher12920592011-02-02 12:37:40 -05002528 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002529 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2530
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002531 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2532 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2533
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002534 WREG32(CB_PERF_CTR0_SEL_0, 0);
2535 WREG32(CB_PERF_CTR0_SEL_1, 0);
2536 WREG32(CB_PERF_CTR1_SEL_0, 0);
2537 WREG32(CB_PERF_CTR1_SEL_1, 0);
2538 WREG32(CB_PERF_CTR2_SEL_0, 0);
2539 WREG32(CB_PERF_CTR2_SEL_1, 0);
2540 WREG32(CB_PERF_CTR3_SEL_0, 0);
2541 WREG32(CB_PERF_CTR3_SEL_1, 0);
2542
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002543 /* clear render buffer base addresses */
2544 WREG32(CB_COLOR0_BASE, 0);
2545 WREG32(CB_COLOR1_BASE, 0);
2546 WREG32(CB_COLOR2_BASE, 0);
2547 WREG32(CB_COLOR3_BASE, 0);
2548 WREG32(CB_COLOR4_BASE, 0);
2549 WREG32(CB_COLOR5_BASE, 0);
2550 WREG32(CB_COLOR6_BASE, 0);
2551 WREG32(CB_COLOR7_BASE, 0);
2552 WREG32(CB_COLOR8_BASE, 0);
2553 WREG32(CB_COLOR9_BASE, 0);
2554 WREG32(CB_COLOR10_BASE, 0);
2555 WREG32(CB_COLOR11_BASE, 0);
2556
2557 /* set the shader const cache sizes to 0 */
2558 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2559 WREG32(i, 0);
2560 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2561 WREG32(i, 0);
2562
Alex Deucherf25a5c62011-05-19 11:07:57 -04002563 tmp = RREG32(HDP_MISC_CNTL);
2564 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2565 WREG32(HDP_MISC_CNTL, tmp);
2566
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002567 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2568 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2569
2570 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2571
2572 udelay(50);
2573
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002574}
2575
2576int evergreen_mc_init(struct radeon_device *rdev)
2577{
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002578 u32 tmp;
2579 int chansize, numchan;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002580
2581 /* Get VRAM informations */
2582 rdev->mc.vram_is_ddr = true;
Alex Deucher05b3ef62012-03-20 17:18:37 -04002583 if ((rdev->family == CHIP_PALM) ||
2584 (rdev->family == CHIP_SUMO) ||
2585 (rdev->family == CHIP_SUMO2))
Alex Deucher82084412011-07-01 13:18:28 -04002586 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2587 else
2588 tmp = RREG32(MC_ARB_RAMCFG);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002589 if (tmp & CHANSIZE_OVERRIDE) {
2590 chansize = 16;
2591 } else if (tmp & CHANSIZE_MASK) {
2592 chansize = 64;
2593 } else {
2594 chansize = 32;
2595 }
2596 tmp = RREG32(MC_SHARED_CHMAP);
2597 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2598 case 0:
2599 default:
2600 numchan = 1;
2601 break;
2602 case 1:
2603 numchan = 2;
2604 break;
2605 case 2:
2606 numchan = 4;
2607 break;
2608 case 3:
2609 numchan = 8;
2610 break;
2611 }
2612 rdev->mc.vram_width = numchan * chansize;
2613 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06002614 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2615 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002616 /* Setup GPU memory space */
Alex Deucher05b3ef62012-03-20 17:18:37 -04002617 if ((rdev->family == CHIP_PALM) ||
2618 (rdev->family == CHIP_SUMO) ||
2619 (rdev->family == CHIP_SUMO2)) {
Alex Deucher6eb18f82010-11-22 17:56:27 -05002620 /* size in bytes on fusion */
2621 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2622 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2623 } else {
Alex Deucher05b3ef62012-03-20 17:18:37 -04002624 /* size in MB on evergreen/cayman/tn */
Alex Deucher6eb18f82010-11-22 17:56:27 -05002625 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2626 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2627 }
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00002628 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05002629 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04002630 radeon_update_bandwidth_info(rdev);
2631
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002632 return 0;
2633}
Jerome Glissed594e462010-02-17 21:54:29 +00002634
Alex Deucher187e3592013-01-18 14:51:38 -05002635void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
Alex Deucher747943e2010-03-24 13:26:36 -04002636{
Jerome Glisse64c56e82013-01-02 17:30:35 -05002637 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002638 RREG32(GRBM_STATUS));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002639 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002640 RREG32(GRBM_STATUS_SE0));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002641 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002642 RREG32(GRBM_STATUS_SE1));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002643 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002644 RREG32(SRBM_STATUS));
Alex Deuchera65a4362013-01-18 18:55:54 -05002645 dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
2646 RREG32(SRBM_STATUS2));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04002647 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2648 RREG32(CP_STALLED_STAT1));
2649 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2650 RREG32(CP_STALLED_STAT2));
2651 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2652 RREG32(CP_BUSY_STAT));
2653 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2654 RREG32(CP_STAT));
Alex Deucher0ecebb92013-01-03 12:40:13 -05002655 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2656 RREG32(DMA_STATUS_REG));
Alex Deucher168757e2013-01-18 19:17:22 -05002657 if (rdev->family >= CHIP_CAYMAN) {
2658 dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
2659 RREG32(DMA_STATUS_REG + 0x800));
2660 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002661}
2662
Alex Deucher168757e2013-01-18 19:17:22 -05002663bool evergreen_is_display_hung(struct radeon_device *rdev)
Alex Deuchera65a4362013-01-18 18:55:54 -05002664{
2665 u32 crtc_hung = 0;
2666 u32 crtc_status[6];
2667 u32 i, j, tmp;
2668
2669 for (i = 0; i < rdev->num_crtc; i++) {
2670 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
2671 crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2672 crtc_hung |= (1 << i);
2673 }
2674 }
2675
2676 for (j = 0; j < 10; j++) {
2677 for (i = 0; i < rdev->num_crtc; i++) {
2678 if (crtc_hung & (1 << i)) {
2679 tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2680 if (tmp != crtc_status[i])
2681 crtc_hung &= ~(1 << i);
2682 }
2683 }
2684 if (crtc_hung == 0)
2685 return false;
2686 udelay(100);
2687 }
2688
2689 return true;
2690}
2691
2692static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2693{
2694 u32 reset_mask = 0;
2695 u32 tmp;
2696
2697 /* GRBM_STATUS */
2698 tmp = RREG32(GRBM_STATUS);
2699 if (tmp & (PA_BUSY | SC_BUSY |
2700 SH_BUSY | SX_BUSY |
2701 TA_BUSY | VGT_BUSY |
2702 DB_BUSY | CB_BUSY |
2703 SPI_BUSY | VGT_BUSY_NO_DMA))
2704 reset_mask |= RADEON_RESET_GFX;
2705
2706 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
2707 CP_BUSY | CP_COHERENCY_BUSY))
2708 reset_mask |= RADEON_RESET_CP;
2709
2710 if (tmp & GRBM_EE_BUSY)
2711 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
2712
2713 /* DMA_STATUS_REG */
2714 tmp = RREG32(DMA_STATUS_REG);
2715 if (!(tmp & DMA_IDLE))
2716 reset_mask |= RADEON_RESET_DMA;
2717
2718 /* SRBM_STATUS2 */
2719 tmp = RREG32(SRBM_STATUS2);
2720 if (tmp & DMA_BUSY)
2721 reset_mask |= RADEON_RESET_DMA;
2722
2723 /* SRBM_STATUS */
2724 tmp = RREG32(SRBM_STATUS);
2725 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
2726 reset_mask |= RADEON_RESET_RLC;
2727
2728 if (tmp & IH_BUSY)
2729 reset_mask |= RADEON_RESET_IH;
2730
2731 if (tmp & SEM_BUSY)
2732 reset_mask |= RADEON_RESET_SEM;
2733
2734 if (tmp & GRBM_RQ_PENDING)
2735 reset_mask |= RADEON_RESET_GRBM;
2736
2737 if (tmp & VMC_BUSY)
2738 reset_mask |= RADEON_RESET_VMC;
2739
2740 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
2741 MCC_BUSY | MCD_BUSY))
2742 reset_mask |= RADEON_RESET_MC;
2743
2744 if (evergreen_is_display_hung(rdev))
2745 reset_mask |= RADEON_RESET_DISPLAY;
2746
2747 /* VM_L2_STATUS */
2748 tmp = RREG32(VM_L2_STATUS);
2749 if (tmp & L2_BUSY)
2750 reset_mask |= RADEON_RESET_VMC;
2751
Alex Deucherd808fc82013-02-28 10:03:08 -05002752 /* Skip MC reset as it's mostly likely not hung, just busy */
2753 if (reset_mask & RADEON_RESET_MC) {
2754 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2755 reset_mask &= ~RADEON_RESET_MC;
2756 }
2757
Alex Deuchera65a4362013-01-18 18:55:54 -05002758 return reset_mask;
2759}
2760
2761static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher0ecebb92013-01-03 12:40:13 -05002762{
2763 struct evergreen_mc_save save;
Alex Deucherb7630472013-01-18 14:28:41 -05002764 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
2765 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05002766
Alex Deucher0ecebb92013-01-03 12:40:13 -05002767 if (reset_mask == 0)
Alex Deuchera65a4362013-01-18 18:55:54 -05002768 return;
Alex Deucher0ecebb92013-01-03 12:40:13 -05002769
2770 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2771
Alex Deucherb7630472013-01-18 14:28:41 -05002772 evergreen_print_gpu_status_regs(rdev);
2773
Alex Deucherb7630472013-01-18 14:28:41 -05002774 /* Disable CP parsing/prefetching */
2775 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2776
2777 if (reset_mask & RADEON_RESET_DMA) {
2778 /* Disable DMA */
2779 tmp = RREG32(DMA_RB_CNTL);
2780 tmp &= ~DMA_RB_ENABLE;
2781 WREG32(DMA_RB_CNTL, tmp);
2782 }
2783
Alex Deucherb21b6e72013-01-23 18:57:56 -05002784 udelay(50);
2785
2786 evergreen_mc_stop(rdev, &save);
2787 if (evergreen_mc_wait_for_idle(rdev)) {
2788 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2789 }
2790
Alex Deucherb7630472013-01-18 14:28:41 -05002791 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
2792 grbm_soft_reset |= SOFT_RESET_DB |
2793 SOFT_RESET_CB |
2794 SOFT_RESET_PA |
2795 SOFT_RESET_SC |
2796 SOFT_RESET_SPI |
2797 SOFT_RESET_SX |
2798 SOFT_RESET_SH |
2799 SOFT_RESET_TC |
2800 SOFT_RESET_TA |
2801 SOFT_RESET_VC |
2802 SOFT_RESET_VGT;
2803 }
2804
2805 if (reset_mask & RADEON_RESET_CP) {
2806 grbm_soft_reset |= SOFT_RESET_CP |
2807 SOFT_RESET_VGT;
2808
2809 srbm_soft_reset |= SOFT_RESET_GRBM;
2810 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002811
2812 if (reset_mask & RADEON_RESET_DMA)
Alex Deucherb7630472013-01-18 14:28:41 -05002813 srbm_soft_reset |= SOFT_RESET_DMA;
2814
Alex Deuchera65a4362013-01-18 18:55:54 -05002815 if (reset_mask & RADEON_RESET_DISPLAY)
2816 srbm_soft_reset |= SOFT_RESET_DC;
2817
2818 if (reset_mask & RADEON_RESET_RLC)
2819 srbm_soft_reset |= SOFT_RESET_RLC;
2820
2821 if (reset_mask & RADEON_RESET_SEM)
2822 srbm_soft_reset |= SOFT_RESET_SEM;
2823
2824 if (reset_mask & RADEON_RESET_IH)
2825 srbm_soft_reset |= SOFT_RESET_IH;
2826
2827 if (reset_mask & RADEON_RESET_GRBM)
2828 srbm_soft_reset |= SOFT_RESET_GRBM;
2829
2830 if (reset_mask & RADEON_RESET_VMC)
2831 srbm_soft_reset |= SOFT_RESET_VMC;
2832
Alex Deucher24178ec2013-01-24 15:00:17 -05002833 if (!(rdev->flags & RADEON_IS_IGP)) {
2834 if (reset_mask & RADEON_RESET_MC)
2835 srbm_soft_reset |= SOFT_RESET_MC;
2836 }
Alex Deuchera65a4362013-01-18 18:55:54 -05002837
Alex Deucherb7630472013-01-18 14:28:41 -05002838 if (grbm_soft_reset) {
2839 tmp = RREG32(GRBM_SOFT_RESET);
2840 tmp |= grbm_soft_reset;
2841 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2842 WREG32(GRBM_SOFT_RESET, tmp);
2843 tmp = RREG32(GRBM_SOFT_RESET);
2844
2845 udelay(50);
2846
2847 tmp &= ~grbm_soft_reset;
2848 WREG32(GRBM_SOFT_RESET, tmp);
2849 tmp = RREG32(GRBM_SOFT_RESET);
2850 }
2851
2852 if (srbm_soft_reset) {
2853 tmp = RREG32(SRBM_SOFT_RESET);
2854 tmp |= srbm_soft_reset;
2855 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2856 WREG32(SRBM_SOFT_RESET, tmp);
2857 tmp = RREG32(SRBM_SOFT_RESET);
2858
2859 udelay(50);
2860
2861 tmp &= ~srbm_soft_reset;
2862 WREG32(SRBM_SOFT_RESET, tmp);
2863 tmp = RREG32(SRBM_SOFT_RESET);
2864 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002865
2866 /* Wait a little for things to settle down */
2867 udelay(50);
2868
Alex Deucher747943e2010-03-24 13:26:36 -04002869 evergreen_mc_resume(rdev, &save);
Alex Deucherb7630472013-01-18 14:28:41 -05002870 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05002871
Alex Deucherb7630472013-01-18 14:28:41 -05002872 evergreen_print_gpu_status_regs(rdev);
Alex Deucher747943e2010-03-24 13:26:36 -04002873}
2874
Jerome Glissea2d07b72010-03-09 14:45:11 +00002875int evergreen_asic_reset(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002876{
Alex Deuchera65a4362013-01-18 18:55:54 -05002877 u32 reset_mask;
2878
2879 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2880
2881 if (reset_mask)
2882 r600_set_bios_scratch_engine_hung(rdev, true);
2883
2884 evergreen_gpu_soft_reset(rdev, reset_mask);
2885
2886 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2887
2888 if (!reset_mask)
2889 r600_set_bios_scratch_engine_hung(rdev, false);
2890
2891 return 0;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002892}
2893
Alex Deucher123bc182013-01-24 11:37:19 -05002894/**
2895 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
2896 *
2897 * @rdev: radeon_device pointer
2898 * @ring: radeon_ring structure holding ring information
2899 *
2900 * Check if the GFX engine is locked up.
2901 * Returns true if the engine appears to be locked up, false if not.
2902 */
2903bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2904{
2905 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2906
2907 if (!(reset_mask & (RADEON_RESET_GFX |
2908 RADEON_RESET_COMPUTE |
2909 RADEON_RESET_CP))) {
2910 radeon_ring_lockup_update(ring);
2911 return false;
2912 }
2913 /* force CP activities */
2914 radeon_ring_force_activity(rdev, ring);
2915 return radeon_ring_test_lockup(rdev, ring);
2916}
2917
2918/**
2919 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
2920 *
2921 * @rdev: radeon_device pointer
2922 * @ring: radeon_ring structure holding ring information
2923 *
2924 * Check if the async DMA engine is locked up.
2925 * Returns true if the engine appears to be locked up, false if not.
2926 */
2927bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2928{
2929 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2930
2931 if (!(reset_mask & RADEON_RESET_DMA)) {
2932 radeon_ring_lockup_update(ring);
2933 return false;
2934 }
2935 /* force ring activities */
2936 radeon_ring_force_activity(rdev, ring);
2937 return radeon_ring_test_lockup(rdev, ring);
2938}
2939
Alex Deucher45f9a392010-03-24 13:55:51 -04002940/* Interrupts */
2941
2942u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2943{
Alex Deucher46437052012-08-15 17:10:32 -04002944 if (crtc >= rdev->num_crtc)
Alex Deucher45f9a392010-03-24 13:55:51 -04002945 return 0;
Alex Deucher46437052012-08-15 17:10:32 -04002946 else
2947 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
Alex Deucher45f9a392010-03-24 13:55:51 -04002948}
2949
2950void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2951{
2952 u32 tmp;
2953
Alex Deucher1b370782011-11-17 20:13:28 -05002954 if (rdev->family >= CHIP_CAYMAN) {
2955 cayman_cp_int_cntl_setup(rdev, 0,
2956 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2957 cayman_cp_int_cntl_setup(rdev, 1, 0);
2958 cayman_cp_int_cntl_setup(rdev, 2, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -05002959 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2960 WREG32(CAYMAN_DMA1_CNTL, tmp);
Alex Deucher1b370782011-11-17 20:13:28 -05002961 } else
2962 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002963 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2964 WREG32(DMA_CNTL, tmp);
Alex Deucher45f9a392010-03-24 13:55:51 -04002965 WREG32(GRBM_INT_CNTL, 0);
2966 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2967 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002968 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002969 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2970 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002971 }
2972 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002973 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2974 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2975 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002976
2977 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2978 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002979 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002980 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2981 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002982 }
2983 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002984 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2985 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2986 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002987
Alex Deucher05b3ef62012-03-20 17:18:37 -04002988 /* only one DAC on DCE6 */
2989 if (!ASIC_IS_DCE6(rdev))
2990 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
Alex Deucher45f9a392010-03-24 13:55:51 -04002991 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2992
2993 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2994 WREG32(DC_HPD1_INT_CONTROL, tmp);
2995 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2996 WREG32(DC_HPD2_INT_CONTROL, tmp);
2997 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2998 WREG32(DC_HPD3_INT_CONTROL, tmp);
2999 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3000 WREG32(DC_HPD4_INT_CONTROL, tmp);
3001 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3002 WREG32(DC_HPD5_INT_CONTROL, tmp);
3003 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3004 WREG32(DC_HPD6_INT_CONTROL, tmp);
3005
3006}
3007
3008int evergreen_irq_set(struct radeon_device *rdev)
3009{
3010 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
Alex Deucher1b370782011-11-17 20:13:28 -05003011 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04003012 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
3013 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
Alex Deucher2031f772010-04-22 12:52:11 -04003014 u32 grbm_int_cntl = 0;
Alex Deucher6f34be52010-11-21 10:59:01 -05003015 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucherf122c612012-03-30 08:59:57 -04003016 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
Alex Deucherf60cbd12012-12-04 15:27:33 -05003017 u32 dma_cntl, dma_cntl1 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04003018
3019 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00003020 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Alex Deucher45f9a392010-03-24 13:55:51 -04003021 return -EINVAL;
3022 }
3023 /* don't enable anything if the ih is disabled */
3024 if (!rdev->ih.enabled) {
3025 r600_disable_interrupts(rdev);
3026 /* force the active interrupt state to all disabled */
3027 evergreen_disable_interrupt_state(rdev);
3028 return 0;
3029 }
3030
3031 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3032 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3033 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3034 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3035 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3036 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3037
Alex Deucherf122c612012-03-30 08:59:57 -04003038 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3039 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3040 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3041 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3042 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3043 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3044
Alex Deucher233d1ad2012-12-04 15:25:59 -05003045 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3046
Alex Deucher1b370782011-11-17 20:13:28 -05003047 if (rdev->family >= CHIP_CAYMAN) {
3048 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02003049 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003050 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
3051 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3052 }
Christian Koenig736fc372012-05-17 19:52:00 +02003053 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003054 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
3055 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
3056 }
Christian Koenig736fc372012-05-17 19:52:00 +02003057 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003058 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
3059 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
3060 }
3061 } else {
Christian Koenig736fc372012-05-17 19:52:00 +02003062 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003063 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
3064 cp_int_cntl |= RB_INT_ENABLE;
3065 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3066 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003067 }
Alex Deucher1b370782011-11-17 20:13:28 -05003068
Alex Deucher233d1ad2012-12-04 15:25:59 -05003069 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3070 DRM_DEBUG("r600_irq_set: sw int dma\n");
3071 dma_cntl |= TRAP_ENABLE;
3072 }
3073
Alex Deucherf60cbd12012-12-04 15:27:33 -05003074 if (rdev->family >= CHIP_CAYMAN) {
3075 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
3076 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
3077 DRM_DEBUG("r600_irq_set: sw int dma1\n");
3078 dma_cntl1 |= TRAP_ENABLE;
3079 }
3080 }
3081
Alex Deucher6f34be52010-11-21 10:59:01 -05003082 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003083 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003084 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
3085 crtc1 |= VBLANK_INT_MASK;
3086 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003087 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003088 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003089 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
3090 crtc2 |= VBLANK_INT_MASK;
3091 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003092 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003093 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003094 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
3095 crtc3 |= VBLANK_INT_MASK;
3096 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003097 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003098 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003099 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
3100 crtc4 |= VBLANK_INT_MASK;
3101 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003102 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003103 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003104 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
3105 crtc5 |= VBLANK_INT_MASK;
3106 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003107 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003108 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003109 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
3110 crtc6 |= VBLANK_INT_MASK;
3111 }
3112 if (rdev->irq.hpd[0]) {
3113 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
3114 hpd1 |= DC_HPDx_INT_EN;
3115 }
3116 if (rdev->irq.hpd[1]) {
3117 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
3118 hpd2 |= DC_HPDx_INT_EN;
3119 }
3120 if (rdev->irq.hpd[2]) {
3121 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
3122 hpd3 |= DC_HPDx_INT_EN;
3123 }
3124 if (rdev->irq.hpd[3]) {
3125 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
3126 hpd4 |= DC_HPDx_INT_EN;
3127 }
3128 if (rdev->irq.hpd[4]) {
3129 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
3130 hpd5 |= DC_HPDx_INT_EN;
3131 }
3132 if (rdev->irq.hpd[5]) {
3133 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
3134 hpd6 |= DC_HPDx_INT_EN;
3135 }
Alex Deucherf122c612012-03-30 08:59:57 -04003136 if (rdev->irq.afmt[0]) {
3137 DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
3138 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3139 }
3140 if (rdev->irq.afmt[1]) {
3141 DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
3142 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3143 }
3144 if (rdev->irq.afmt[2]) {
3145 DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
3146 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3147 }
3148 if (rdev->irq.afmt[3]) {
3149 DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
3150 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3151 }
3152 if (rdev->irq.afmt[4]) {
3153 DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
3154 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3155 }
3156 if (rdev->irq.afmt[5]) {
3157 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
3158 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3159 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003160
Alex Deucher1b370782011-11-17 20:13:28 -05003161 if (rdev->family >= CHIP_CAYMAN) {
3162 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
3163 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
3164 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
3165 } else
3166 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003167
3168 WREG32(DMA_CNTL, dma_cntl);
3169
Alex Deucherf60cbd12012-12-04 15:27:33 -05003170 if (rdev->family >= CHIP_CAYMAN)
3171 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
3172
Alex Deucher2031f772010-04-22 12:52:11 -04003173 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deucher45f9a392010-03-24 13:55:51 -04003174
3175 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
3176 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
Alex Deucherb7eff392011-07-08 11:44:56 -04003177 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05003178 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
3179 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
Alex Deucherb7eff392011-07-08 11:44:56 -04003180 }
3181 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05003182 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
3183 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3184 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003185
Alex Deucher6f34be52010-11-21 10:59:01 -05003186 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
3187 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
Alex Deucherb7eff392011-07-08 11:44:56 -04003188 if (rdev->num_crtc >= 4) {
3189 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
3190 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
3191 }
3192 if (rdev->num_crtc >= 6) {
3193 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
3194 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
3195 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003196
Alex Deucher45f9a392010-03-24 13:55:51 -04003197 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3198 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3199 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3200 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3201 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3202 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3203
Alex Deucherf122c612012-03-30 08:59:57 -04003204 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
3205 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
3206 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
3207 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
3208 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
3209 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
3210
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003211 return 0;
3212}
3213
Andi Kleencbdd4502011-10-13 16:08:46 -07003214static void evergreen_irq_ack(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003215{
3216 u32 tmp;
3217
Alex Deucher6f34be52010-11-21 10:59:01 -05003218 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3219 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3220 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
3221 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
3222 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
3223 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
3224 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3225 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
Alex Deucherb7eff392011-07-08 11:44:56 -04003226 if (rdev->num_crtc >= 4) {
3227 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3228 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3229 }
3230 if (rdev->num_crtc >= 6) {
3231 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3232 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3233 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003234
Alex Deucherf122c612012-03-30 08:59:57 -04003235 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3236 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
3237 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3238 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3239 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3240 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3241
Alex Deucher6f34be52010-11-21 10:59:01 -05003242 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
3243 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3244 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
3245 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
Alex Deucher6f34be52010-11-21 10:59:01 -05003246 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003247 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003248 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003249 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003250 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003251 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003252 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003253 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
3254
Alex Deucherb7eff392011-07-08 11:44:56 -04003255 if (rdev->num_crtc >= 4) {
3256 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
3257 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3258 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
3259 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3260 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
3261 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
3262 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
3263 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
3264 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
3265 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
3266 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
3267 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
3268 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003269
Alex Deucherb7eff392011-07-08 11:44:56 -04003270 if (rdev->num_crtc >= 6) {
3271 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
3272 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3273 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
3274 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3275 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
3276 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
3277 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
3278 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
3279 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
3280 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
3281 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
3282 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
3283 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003284
Alex Deucher6f34be52010-11-21 10:59:01 -05003285 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003286 tmp = RREG32(DC_HPD1_INT_CONTROL);
3287 tmp |= DC_HPDx_INT_ACK;
3288 WREG32(DC_HPD1_INT_CONTROL, tmp);
3289 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003290 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003291 tmp = RREG32(DC_HPD2_INT_CONTROL);
3292 tmp |= DC_HPDx_INT_ACK;
3293 WREG32(DC_HPD2_INT_CONTROL, tmp);
3294 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003295 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003296 tmp = RREG32(DC_HPD3_INT_CONTROL);
3297 tmp |= DC_HPDx_INT_ACK;
3298 WREG32(DC_HPD3_INT_CONTROL, tmp);
3299 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003300 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003301 tmp = RREG32(DC_HPD4_INT_CONTROL);
3302 tmp |= DC_HPDx_INT_ACK;
3303 WREG32(DC_HPD4_INT_CONTROL, tmp);
3304 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003305 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003306 tmp = RREG32(DC_HPD5_INT_CONTROL);
3307 tmp |= DC_HPDx_INT_ACK;
3308 WREG32(DC_HPD5_INT_CONTROL, tmp);
3309 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003310 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003311 tmp = RREG32(DC_HPD5_INT_CONTROL);
3312 tmp |= DC_HPDx_INT_ACK;
3313 WREG32(DC_HPD6_INT_CONTROL, tmp);
3314 }
Alex Deucherf122c612012-03-30 08:59:57 -04003315 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3316 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
3317 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3318 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
3319 }
3320 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3321 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3322 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3323 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
3324 }
3325 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3326 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
3327 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3328 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
3329 }
3330 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3331 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
3332 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3333 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
3334 }
3335 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3336 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
3337 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3338 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
3339 }
3340 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3341 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3342 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3343 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
3344 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003345}
3346
Lauri Kasanen1109ca02012-08-31 13:43:50 -04003347static void evergreen_irq_disable(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003348{
Alex Deucher45f9a392010-03-24 13:55:51 -04003349 r600_disable_interrupts(rdev);
3350 /* Wait and acknowledge irq */
3351 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003352 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003353 evergreen_disable_interrupt_state(rdev);
3354}
3355
Alex Deucher755d8192011-03-02 20:07:34 -05003356void evergreen_irq_suspend(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003357{
3358 evergreen_irq_disable(rdev);
3359 r600_rlc_stop(rdev);
3360}
3361
Andi Kleencbdd4502011-10-13 16:08:46 -07003362static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003363{
3364 u32 wptr, tmp;
3365
Alex Deucher724c80e2010-08-27 18:25:25 -04003366 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04003367 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04003368 else
3369 wptr = RREG32(IH_RB_WPTR);
Alex Deucher45f9a392010-03-24 13:55:51 -04003370
3371 if (wptr & RB_OVERFLOW) {
3372 /* When a ring buffer overflow happen start parsing interrupt
3373 * from the last not overwritten vector (wptr + 16). Hopefully
3374 * this should allow us to catchup.
3375 */
3376 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3377 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3378 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3379 tmp = RREG32(IH_RB_CNTL);
3380 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3381 WREG32(IH_RB_CNTL, tmp);
3382 }
3383 return (wptr & rdev->ih.ptr_mask);
3384}
3385
3386int evergreen_irq_process(struct radeon_device *rdev)
3387{
Dave Airlie682f1a52011-06-18 03:59:51 +00003388 u32 wptr;
3389 u32 rptr;
Alex Deucher45f9a392010-03-24 13:55:51 -04003390 u32 src_id, src_data;
3391 u32 ring_index;
Alex Deucher45f9a392010-03-24 13:55:51 -04003392 bool queue_hotplug = false;
Alex Deucherf122c612012-03-30 08:59:57 -04003393 bool queue_hdmi = false;
Alex Deucher45f9a392010-03-24 13:55:51 -04003394
Dave Airlie682f1a52011-06-18 03:59:51 +00003395 if (!rdev->ih.enabled || rdev->shutdown)
Alex Deucher45f9a392010-03-24 13:55:51 -04003396 return IRQ_NONE;
3397
Dave Airlie682f1a52011-06-18 03:59:51 +00003398 wptr = evergreen_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02003399
3400restart_ih:
3401 /* is somebody else already processing irqs? */
3402 if (atomic_xchg(&rdev->ih.lock, 1))
3403 return IRQ_NONE;
3404
Dave Airlie682f1a52011-06-18 03:59:51 +00003405 rptr = rdev->ih.rptr;
3406 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Alex Deucher45f9a392010-03-24 13:55:51 -04003407
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10003408 /* Order reading of wptr vs. reading of IH ring data */
3409 rmb();
3410
Alex Deucher45f9a392010-03-24 13:55:51 -04003411 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003412 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003413
Alex Deucher45f9a392010-03-24 13:55:51 -04003414 while (rptr != wptr) {
3415 /* wptr/rptr are in bytes! */
3416 ring_index = rptr / 4;
Alex Deucher0f234f5f2011-02-13 19:06:33 -05003417 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3418 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucher45f9a392010-03-24 13:55:51 -04003419
3420 switch (src_id) {
3421 case 1: /* D1 vblank/vline */
3422 switch (src_data) {
3423 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003424 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003425 if (rdev->irq.crtc_vblank_int[0]) {
3426 drm_handle_vblank(rdev->ddev, 0);
3427 rdev->pm.vblank_sync = true;
3428 wake_up(&rdev->irq.vblank_queue);
3429 }
Christian Koenig736fc372012-05-17 19:52:00 +02003430 if (atomic_read(&rdev->irq.pflip[0]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003431 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003432 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003433 DRM_DEBUG("IH: D1 vblank\n");
3434 }
3435 break;
3436 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003437 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3438 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003439 DRM_DEBUG("IH: D1 vline\n");
3440 }
3441 break;
3442 default:
3443 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3444 break;
3445 }
3446 break;
3447 case 2: /* D2 vblank/vline */
3448 switch (src_data) {
3449 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003450 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003451 if (rdev->irq.crtc_vblank_int[1]) {
3452 drm_handle_vblank(rdev->ddev, 1);
3453 rdev->pm.vblank_sync = true;
3454 wake_up(&rdev->irq.vblank_queue);
3455 }
Christian Koenig736fc372012-05-17 19:52:00 +02003456 if (atomic_read(&rdev->irq.pflip[1]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003457 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003458 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003459 DRM_DEBUG("IH: D2 vblank\n");
3460 }
3461 break;
3462 case 1: /* D2 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003463 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3464 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003465 DRM_DEBUG("IH: D2 vline\n");
3466 }
3467 break;
3468 default:
3469 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3470 break;
3471 }
3472 break;
3473 case 3: /* D3 vblank/vline */
3474 switch (src_data) {
3475 case 0: /* D3 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003476 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3477 if (rdev->irq.crtc_vblank_int[2]) {
3478 drm_handle_vblank(rdev->ddev, 2);
3479 rdev->pm.vblank_sync = true;
3480 wake_up(&rdev->irq.vblank_queue);
3481 }
Christian Koenig736fc372012-05-17 19:52:00 +02003482 if (atomic_read(&rdev->irq.pflip[2]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003483 radeon_crtc_handle_flip(rdev, 2);
3484 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003485 DRM_DEBUG("IH: D3 vblank\n");
3486 }
3487 break;
3488 case 1: /* D3 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003489 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3490 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003491 DRM_DEBUG("IH: D3 vline\n");
3492 }
3493 break;
3494 default:
3495 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3496 break;
3497 }
3498 break;
3499 case 4: /* D4 vblank/vline */
3500 switch (src_data) {
3501 case 0: /* D4 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003502 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3503 if (rdev->irq.crtc_vblank_int[3]) {
3504 drm_handle_vblank(rdev->ddev, 3);
3505 rdev->pm.vblank_sync = true;
3506 wake_up(&rdev->irq.vblank_queue);
3507 }
Christian Koenig736fc372012-05-17 19:52:00 +02003508 if (atomic_read(&rdev->irq.pflip[3]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003509 radeon_crtc_handle_flip(rdev, 3);
3510 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003511 DRM_DEBUG("IH: D4 vblank\n");
3512 }
3513 break;
3514 case 1: /* D4 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003515 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3516 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003517 DRM_DEBUG("IH: D4 vline\n");
3518 }
3519 break;
3520 default:
3521 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3522 break;
3523 }
3524 break;
3525 case 5: /* D5 vblank/vline */
3526 switch (src_data) {
3527 case 0: /* D5 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003528 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3529 if (rdev->irq.crtc_vblank_int[4]) {
3530 drm_handle_vblank(rdev->ddev, 4);
3531 rdev->pm.vblank_sync = true;
3532 wake_up(&rdev->irq.vblank_queue);
3533 }
Christian Koenig736fc372012-05-17 19:52:00 +02003534 if (atomic_read(&rdev->irq.pflip[4]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003535 radeon_crtc_handle_flip(rdev, 4);
3536 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003537 DRM_DEBUG("IH: D5 vblank\n");
3538 }
3539 break;
3540 case 1: /* D5 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003541 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3542 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003543 DRM_DEBUG("IH: D5 vline\n");
3544 }
3545 break;
3546 default:
3547 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3548 break;
3549 }
3550 break;
3551 case 6: /* D6 vblank/vline */
3552 switch (src_data) {
3553 case 0: /* D6 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003554 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3555 if (rdev->irq.crtc_vblank_int[5]) {
3556 drm_handle_vblank(rdev->ddev, 5);
3557 rdev->pm.vblank_sync = true;
3558 wake_up(&rdev->irq.vblank_queue);
3559 }
Christian Koenig736fc372012-05-17 19:52:00 +02003560 if (atomic_read(&rdev->irq.pflip[5]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003561 radeon_crtc_handle_flip(rdev, 5);
3562 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003563 DRM_DEBUG("IH: D6 vblank\n");
3564 }
3565 break;
3566 case 1: /* D6 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003567 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3568 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003569 DRM_DEBUG("IH: D6 vline\n");
3570 }
3571 break;
3572 default:
3573 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3574 break;
3575 }
3576 break;
3577 case 42: /* HPD hotplug */
3578 switch (src_data) {
3579 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003580 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3581 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003582 queue_hotplug = true;
3583 DRM_DEBUG("IH: HPD1\n");
3584 }
3585 break;
3586 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003587 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3588 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003589 queue_hotplug = true;
3590 DRM_DEBUG("IH: HPD2\n");
3591 }
3592 break;
3593 case 2:
Alex Deucher6f34be52010-11-21 10:59:01 -05003594 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3595 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003596 queue_hotplug = true;
3597 DRM_DEBUG("IH: HPD3\n");
3598 }
3599 break;
3600 case 3:
Alex Deucher6f34be52010-11-21 10:59:01 -05003601 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3602 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003603 queue_hotplug = true;
3604 DRM_DEBUG("IH: HPD4\n");
3605 }
3606 break;
3607 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003608 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3609 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003610 queue_hotplug = true;
3611 DRM_DEBUG("IH: HPD5\n");
3612 }
3613 break;
3614 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003615 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3616 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003617 queue_hotplug = true;
3618 DRM_DEBUG("IH: HPD6\n");
3619 }
3620 break;
3621 default:
3622 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3623 break;
3624 }
3625 break;
Alex Deucherf122c612012-03-30 08:59:57 -04003626 case 44: /* hdmi */
3627 switch (src_data) {
3628 case 0:
3629 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3630 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3631 queue_hdmi = true;
3632 DRM_DEBUG("IH: HDMI0\n");
3633 }
3634 break;
3635 case 1:
3636 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3637 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3638 queue_hdmi = true;
3639 DRM_DEBUG("IH: HDMI1\n");
3640 }
3641 break;
3642 case 2:
3643 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3644 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3645 queue_hdmi = true;
3646 DRM_DEBUG("IH: HDMI2\n");
3647 }
3648 break;
3649 case 3:
3650 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3651 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3652 queue_hdmi = true;
3653 DRM_DEBUG("IH: HDMI3\n");
3654 }
3655 break;
3656 case 4:
3657 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3658 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3659 queue_hdmi = true;
3660 DRM_DEBUG("IH: HDMI4\n");
3661 }
3662 break;
3663 case 5:
3664 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3665 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3666 queue_hdmi = true;
3667 DRM_DEBUG("IH: HDMI5\n");
3668 }
3669 break;
3670 default:
3671 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3672 break;
3673 }
Christian Königf2ba57b2013-04-08 12:41:29 +02003674 case 124: /* UVD */
3675 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3676 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
Alex Deucherf122c612012-03-30 08:59:57 -04003677 break;
Christian Königae133a12012-09-18 15:30:44 -04003678 case 146:
3679 case 147:
3680 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3681 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3682 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3683 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3684 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3685 /* reset addr and status */
3686 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3687 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003688 case 176: /* CP_INT in ring buffer */
3689 case 177: /* CP_INT in IB1 */
3690 case 178: /* CP_INT in IB2 */
3691 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04003692 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003693 break;
3694 case 181: /* CP EOP event */
3695 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher1b370782011-11-17 20:13:28 -05003696 if (rdev->family >= CHIP_CAYMAN) {
3697 switch (src_data) {
3698 case 0:
3699 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3700 break;
3701 case 1:
3702 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3703 break;
3704 case 2:
3705 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3706 break;
3707 }
3708 } else
3709 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003710 break;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003711 case 224: /* DMA trap event */
3712 DRM_DEBUG("IH: DMA trap\n");
3713 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3714 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003715 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003716 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003717 break;
Alex Deucherf60cbd12012-12-04 15:27:33 -05003718 case 244: /* DMA trap event */
3719 if (rdev->family >= CHIP_CAYMAN) {
3720 DRM_DEBUG("IH: DMA1 trap\n");
3721 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3722 }
3723 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003724 default:
3725 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3726 break;
3727 }
3728
3729 /* wptr/rptr are in bytes! */
3730 rptr += 16;
3731 rptr &= rdev->ih.ptr_mask;
3732 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003733 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003734 schedule_work(&rdev->hotplug_work);
Alex Deucherf122c612012-03-30 08:59:57 -04003735 if (queue_hdmi)
3736 schedule_work(&rdev->audio_work);
Alex Deucher45f9a392010-03-24 13:55:51 -04003737 rdev->ih.rptr = rptr;
3738 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02003739 atomic_set(&rdev->ih.lock, 0);
3740
3741 /* make sure wptr hasn't changed while processing */
3742 wptr = evergreen_get_ih_wptr(rdev);
3743 if (wptr != rptr)
3744 goto restart_ih;
3745
Alex Deucher45f9a392010-03-24 13:55:51 -04003746 return IRQ_HANDLED;
3747}
3748
Alex Deucher233d1ad2012-12-04 15:25:59 -05003749/**
3750 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3751 *
3752 * @rdev: radeon_device pointer
3753 * @fence: radeon fence object
3754 *
3755 * Add a DMA fence packet to the ring to write
3756 * the fence seq number and DMA trap packet to generate
3757 * an interrupt if needed (evergreen-SI).
3758 */
3759void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3760 struct radeon_fence *fence)
3761{
3762 struct radeon_ring *ring = &rdev->ring[fence->ring];
3763 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3764 /* write the fence */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003765 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003766 radeon_ring_write(ring, addr & 0xfffffffc);
3767 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3768 radeon_ring_write(ring, fence->seq);
3769 /* generate an interrupt */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003770 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003771 /* flush HDP */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003772 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
Alex Deucher4b681c22013-01-03 19:54:34 -05003773 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003774 radeon_ring_write(ring, 1);
3775}
3776
3777/**
3778 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3779 *
3780 * @rdev: radeon_device pointer
3781 * @ib: IB object to schedule
3782 *
3783 * Schedule an IB in the DMA ring (evergreen).
3784 */
3785void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3786 struct radeon_ib *ib)
3787{
3788 struct radeon_ring *ring = &rdev->ring[ib->ring];
3789
3790 if (rdev->wb.enabled) {
3791 u32 next_rptr = ring->wptr + 4;
3792 while ((next_rptr & 7) != 5)
3793 next_rptr++;
3794 next_rptr += 3;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003795 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003796 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3797 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3798 radeon_ring_write(ring, next_rptr);
3799 }
3800
3801 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3802 * Pad as necessary with NOPs.
3803 */
3804 while ((ring->wptr & 7) != 5)
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003805 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
3806 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003807 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3808 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3809
3810}
3811
3812/**
3813 * evergreen_copy_dma - copy pages using the DMA engine
3814 *
3815 * @rdev: radeon_device pointer
3816 * @src_offset: src GPU address
3817 * @dst_offset: dst GPU address
3818 * @num_gpu_pages: number of GPU pages to xfer
3819 * @fence: radeon fence object
3820 *
3821 * Copy GPU paging using the DMA engine (evergreen-cayman).
3822 * Used by the radeon ttm implementation to move pages if
3823 * registered as the asic copy callback.
3824 */
3825int evergreen_copy_dma(struct radeon_device *rdev,
3826 uint64_t src_offset, uint64_t dst_offset,
3827 unsigned num_gpu_pages,
3828 struct radeon_fence **fence)
3829{
3830 struct radeon_semaphore *sem = NULL;
3831 int ring_index = rdev->asic->copy.dma_ring_index;
3832 struct radeon_ring *ring = &rdev->ring[ring_index];
3833 u32 size_in_dw, cur_size_in_dw;
3834 int i, num_loops;
3835 int r = 0;
3836
3837 r = radeon_semaphore_create(rdev, &sem);
3838 if (r) {
3839 DRM_ERROR("radeon: moving bo (%d).\n", r);
3840 return r;
3841 }
3842
3843 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3844 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3845 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3846 if (r) {
3847 DRM_ERROR("radeon: moving bo (%d).\n", r);
3848 radeon_semaphore_free(rdev, &sem, NULL);
3849 return r;
3850 }
3851
3852 if (radeon_fence_need_sync(*fence, ring->idx)) {
3853 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3854 ring->idx);
3855 radeon_fence_note_sync(*fence, ring->idx);
3856 } else {
3857 radeon_semaphore_free(rdev, &sem, NULL);
3858 }
3859
3860 for (i = 0; i < num_loops; i++) {
3861 cur_size_in_dw = size_in_dw;
3862 if (cur_size_in_dw > 0xFFFFF)
3863 cur_size_in_dw = 0xFFFFF;
3864 size_in_dw -= cur_size_in_dw;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003865 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003866 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3867 radeon_ring_write(ring, src_offset & 0xfffffffc);
3868 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3869 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3870 src_offset += cur_size_in_dw * 4;
3871 dst_offset += cur_size_in_dw * 4;
3872 }
3873
3874 r = radeon_fence_emit(rdev, fence, ring->idx);
3875 if (r) {
3876 radeon_ring_unlock_undo(rdev, ring);
3877 return r;
3878 }
3879
3880 radeon_ring_unlock_commit(rdev, ring);
3881 radeon_semaphore_free(rdev, &sem, *fence);
3882
3883 return r;
3884}
3885
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003886static int evergreen_startup(struct radeon_device *rdev)
3887{
Christian Königf2ba57b2013-04-08 12:41:29 +02003888 struct radeon_ring *ring;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003889 int r;
3890
Alex Deucher9e46a482011-01-06 18:49:35 -05003891 /* enable pcie gen2 link */
Ilija Hadziccd540332011-09-20 10:22:57 -04003892 evergreen_pcie_gen2_enable(rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -05003893
Alex Deucher0af62b02011-01-06 21:19:31 -05003894 if (ASIC_IS_DCE5(rdev)) {
3895 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3896 r = ni_init_microcode(rdev);
3897 if (r) {
3898 DRM_ERROR("Failed to load firmware!\n");
3899 return r;
3900 }
3901 }
Alex Deucher755d8192011-03-02 20:07:34 -05003902 r = ni_mc_load_microcode(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003903 if (r) {
Alex Deucher0af62b02011-01-06 21:19:31 -05003904 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003905 return r;
3906 }
Alex Deucher0af62b02011-01-06 21:19:31 -05003907 } else {
3908 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3909 r = r600_init_microcode(rdev);
3910 if (r) {
3911 DRM_ERROR("Failed to load firmware!\n");
3912 return r;
3913 }
3914 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003915 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003916
Alex Deucher16cdf042011-10-28 10:30:02 -04003917 r = r600_vram_scratch_init(rdev);
3918 if (r)
3919 return r;
3920
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003921 evergreen_mc_program(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003922 if (rdev->flags & RADEON_IS_AGP) {
Alex Deucher0fcdb612010-03-24 13:20:41 -04003923 evergreen_agp_enable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003924 } else {
3925 r = evergreen_pcie_gart_enable(rdev);
3926 if (r)
3927 return r;
3928 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003929 evergreen_gpu_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003930
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003931 r = evergreen_blit_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003932 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003933 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003934 rdev->asic->copy.copy = NULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003935 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003936 }
3937
Alex Deucher724c80e2010-08-27 18:25:25 -04003938 /* allocate wb buffer */
3939 r = radeon_wb_init(rdev);
3940 if (r)
3941 return r;
3942
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003943 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3944 if (r) {
3945 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3946 return r;
3947 }
3948
Alex Deucher233d1ad2012-12-04 15:25:59 -05003949 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3950 if (r) {
3951 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3952 return r;
3953 }
3954
Christian Königf2ba57b2013-04-08 12:41:29 +02003955 r = rv770_uvd_resume(rdev);
3956 if (!r) {
3957 r = radeon_fence_driver_start_ring(rdev,
3958 R600_RING_TYPE_UVD_INDEX);
3959 if (r)
3960 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
3961 }
3962
3963 if (r)
3964 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3965
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003966 /* Enable IRQ */
3967 r = r600_irq_init(rdev);
3968 if (r) {
3969 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3970 radeon_irq_kms_fini(rdev);
3971 return r;
3972 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003973 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003974
Christian Königf2ba57b2013-04-08 12:41:29 +02003975 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian Könige32eb502011-10-23 12:56:27 +02003976 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003977 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3978 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003979 if (r)
3980 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003981
3982 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3983 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3984 DMA_RB_RPTR, DMA_RB_WPTR,
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003985 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003986 if (r)
3987 return r;
3988
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003989 r = evergreen_cp_load_microcode(rdev);
3990 if (r)
3991 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003992 r = evergreen_cp_resume(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003993 if (r)
3994 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003995 r = r600_dma_resume(rdev);
3996 if (r)
3997 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003998
Christian Königf2ba57b2013-04-08 12:41:29 +02003999 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
4000 if (ring->ring_size) {
4001 r = radeon_ring_init(rdev, ring, ring->ring_size,
4002 R600_WB_UVD_RPTR_OFFSET,
4003 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
4004 0, 0xfffff, RADEON_CP_PACKET2);
4005 if (!r)
4006 r = r600_uvd_init(rdev);
4007
4008 if (r)
4009 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
4010 }
4011
Christian König2898c342012-07-05 11:55:34 +02004012 r = radeon_ib_pool_init(rdev);
4013 if (r) {
4014 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05004015 return r;
Christian König2898c342012-07-05 11:55:34 +02004016 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05004017
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004018 r = r600_audio_init(rdev);
4019 if (r) {
4020 DRM_ERROR("radeon: audio init failed\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -05004021 return r;
4022 }
4023
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004024 return 0;
4025}
4026
4027int evergreen_resume(struct radeon_device *rdev)
4028{
4029 int r;
4030
Alex Deucher86f5c9e2010-12-20 12:35:04 -05004031 /* reset the asic, the gfx blocks are often in a bad state
4032 * after the driver is unloaded or after a resume
4033 */
4034 if (radeon_asic_reset(rdev))
4035 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004036 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
4037 * posting will perform necessary task to bring back GPU into good
4038 * shape.
4039 */
4040 /* post card */
4041 atom_asic_init(rdev->mode_info.atom_context);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004042
Jerome Glisseb15ba512011-11-15 11:48:34 -05004043 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004044 r = evergreen_startup(rdev);
4045 if (r) {
Alex Deucher755d8192011-03-02 20:07:34 -05004046 DRM_ERROR("evergreen startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05004047 rdev->accel_working = false;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004048 return r;
4049 }
Alex Deucherfe251e22010-03-24 13:36:43 -04004050
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004051 return r;
4052
4053}
4054
4055int evergreen_suspend(struct radeon_device *rdev)
4056{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004057 r600_audio_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004058 radeon_uvd_suspend(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004059 r700_cp_stop(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004060 r600_dma_stop(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004061 r600_uvd_rbc_stop(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04004062 evergreen_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004063 radeon_wb_disable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004064 evergreen_pcie_gart_disable(rdev);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04004065
4066 return 0;
4067}
4068
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004069/* Plan is to move initialization in that function and use
4070 * helper function so that radeon_device_init pretty much
4071 * do nothing more than calling asic specific function. This
4072 * should also allow to remove a bunch of callback function
4073 * like vram_info.
4074 */
4075int evergreen_init(struct radeon_device *rdev)
4076{
4077 int r;
4078
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004079 /* Read BIOS */
4080 if (!radeon_get_bios(rdev)) {
4081 if (ASIC_IS_AVIVO(rdev))
4082 return -EINVAL;
4083 }
4084 /* Must be an ATOMBIOS */
4085 if (!rdev->is_atom_bios) {
Alex Deucher755d8192011-03-02 20:07:34 -05004086 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004087 return -EINVAL;
4088 }
4089 r = radeon_atombios_init(rdev);
4090 if (r)
4091 return r;
Alex Deucher86f5c9e2010-12-20 12:35:04 -05004092 /* reset the asic, the gfx blocks are often in a bad state
4093 * after the driver is unloaded or after a resume
4094 */
4095 if (radeon_asic_reset(rdev))
4096 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004097 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05004098 if (!radeon_card_posted(rdev)) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004099 if (!rdev->bios) {
4100 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
4101 return -EINVAL;
4102 }
4103 DRM_INFO("GPU not posted. posting now...\n");
4104 atom_asic_init(rdev->mode_info.atom_context);
4105 }
4106 /* Initialize scratch registers */
4107 r600_scratch_init(rdev);
4108 /* Initialize surface registers */
4109 radeon_surface_init(rdev);
4110 /* Initialize clocks */
4111 radeon_get_clock_info(rdev->ddev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004112 /* Fence driver */
4113 r = radeon_fence_driver_init(rdev);
4114 if (r)
4115 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00004116 /* initialize AGP */
4117 if (rdev->flags & RADEON_IS_AGP) {
4118 r = radeon_agp_init(rdev);
4119 if (r)
4120 radeon_agp_disable(rdev);
4121 }
4122 /* initialize memory controller */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004123 r = evergreen_mc_init(rdev);
4124 if (r)
4125 return r;
4126 /* Memory manager */
4127 r = radeon_bo_init(rdev);
4128 if (r)
4129 return r;
Alex Deucher45f9a392010-03-24 13:55:51 -04004130
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004131 r = radeon_irq_kms_init(rdev);
4132 if (r)
4133 return r;
4134
Christian Könige32eb502011-10-23 12:56:27 +02004135 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
4136 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004137
Alex Deucher233d1ad2012-12-04 15:25:59 -05004138 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
4139 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
4140
Christian Königf2ba57b2013-04-08 12:41:29 +02004141 r = radeon_uvd_init(rdev);
4142 if (!r) {
4143 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4144 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
4145 4096);
4146 }
4147
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004148 rdev->ih.ring_obj = NULL;
4149 r600_ih_ring_init(rdev, 64 * 1024);
4150
4151 r = r600_pcie_gart_init(rdev);
4152 if (r)
4153 return r;
Alex Deucher0fcdb612010-03-24 13:20:41 -04004154
Alex Deucher148a03b2010-06-03 19:00:03 -04004155 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004156 r = evergreen_startup(rdev);
4157 if (r) {
Alex Deucherfe251e22010-03-24 13:36:43 -04004158 dev_err(rdev->dev, "disabling GPU acceleration\n");
4159 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004160 r600_dma_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04004161 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004162 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02004163 radeon_ib_pool_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04004164 radeon_irq_kms_fini(rdev);
Alex Deucher0fcdb612010-03-24 13:20:41 -04004165 evergreen_pcie_gart_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004166 rdev->accel_working = false;
4167 }
Alex Deucher77e00f22011-12-21 11:58:17 -05004168
4169 /* Don't start up if the MC ucode is missing on BTC parts.
4170 * The default clocks and voltages before the MC ucode
4171 * is loaded are not suffient for advanced operations.
4172 */
4173 if (ASIC_IS_DCE5(rdev)) {
4174 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
4175 DRM_ERROR("radeon: MC ucode required for NI+.\n");
4176 return -EINVAL;
4177 }
4178 }
4179
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004180 return 0;
4181}
4182
4183void evergreen_fini(struct radeon_device *rdev)
4184{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004185 r600_audio_fini(rdev);
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04004186 r600_blit_fini(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04004187 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004188 r600_dma_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004189 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004190 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02004191 radeon_ib_pool_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004192 radeon_irq_kms_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004193 evergreen_pcie_gart_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004194 radeon_uvd_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04004195 r600_vram_scratch_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004196 radeon_gem_fini(rdev);
4197 radeon_fence_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004198 radeon_agp_fini(rdev);
4199 radeon_bo_fini(rdev);
4200 radeon_atombios_fini(rdev);
4201 kfree(rdev->bios);
4202 rdev->bios = NULL;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004203}
Alex Deucher9e46a482011-01-06 18:49:35 -05004204
Ilija Hadzicb07759b2011-09-20 10:22:58 -04004205void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
Alex Deucher9e46a482011-01-06 18:49:35 -05004206{
Dave Airlie197bbb32012-06-27 08:35:54 +01004207 u32 link_width_cntl, speed_cntl, mask;
4208 int ret;
Alex Deucher9e46a482011-01-06 18:49:35 -05004209
Alex Deucherd42dd572011-01-12 20:05:11 -05004210 if (radeon_pcie_gen2 == 0)
4211 return;
4212
Alex Deucher9e46a482011-01-06 18:49:35 -05004213 if (rdev->flags & RADEON_IS_IGP)
4214 return;
4215
4216 if (!(rdev->flags & RADEON_IS_PCIE))
4217 return;
4218
4219 /* x2 cards have a special sequence */
4220 if (ASIC_IS_X2(rdev))
4221 return;
4222
Dave Airlie197bbb32012-06-27 08:35:54 +01004223 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4224 if (ret != 0)
4225 return;
4226
4227 if (!(mask & DRM_PCIE_SPEED_50))
4228 return;
4229
Alex Deucher492d2b62012-10-25 16:06:59 -04004230 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher3691fee2012-10-08 17:46:27 -04004231 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4232 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4233 return;
4234 }
4235
Dave Airlie197bbb32012-06-27 08:35:54 +01004236 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4237
Alex Deucher9e46a482011-01-06 18:49:35 -05004238 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
4239 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4240
Alex Deucher492d2b62012-10-25 16:06:59 -04004241 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004242 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004243 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004244
Alex Deucher492d2b62012-10-25 16:06:59 -04004245 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004246 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04004247 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004248
Alex Deucher492d2b62012-10-25 16:06:59 -04004249 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004250 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04004251 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004252
Alex Deucher492d2b62012-10-25 16:06:59 -04004253 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004254 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04004255 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004256
Alex Deucher492d2b62012-10-25 16:06:59 -04004257 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004258 speed_cntl |= LC_GEN2_EN_STRAP;
Alex Deucher492d2b62012-10-25 16:06:59 -04004259 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004260
4261 } else {
Alex Deucher492d2b62012-10-25 16:06:59 -04004262 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004263 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4264 if (1)
4265 link_width_cntl |= LC_UPCONFIGURE_DIS;
4266 else
4267 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004268 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004269 }
4270}