blob: c31bd843925913783f4ae141f5d90d130bfbd3d4 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
Dave Airlie3f7dc91a2009-08-27 11:10:15 +100032#include "rs600_reg_safe.h"
33
Jerome Glisse771fe6b2009-06-05 14:42:42 +020034/* rs600 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev);
36int r100_gui_wait_for_idle(struct radeon_device *rdev);
37int r300_mc_wait_for_idle(struct radeon_device *rdev);
38void r420_pipes_init(struct radeon_device *rdev);
39
40/* This files gather functions specifics to :
41 * rs600
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47void rs600_disable_vga(struct radeon_device *rdev);
48
49
50/*
51 * GART.
52 */
53void rs600_gart_tlb_flush(struct radeon_device *rdev)
54{
55 uint32_t tmp;
56
57 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
58 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
59 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
60
61 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
62 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
63 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
64
65 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
66 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
67 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
68 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
69}
70
Jerome Glisse4aac0472009-09-14 18:29:49 +020071int rs600_gart_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020072{
Jerome Glisse771fe6b2009-06-05 14:42:42 +020073 int r;
74
Jerome Glisse4aac0472009-09-14 18:29:49 +020075 if (rdev->gart.table.vram.robj) {
76 WARN(1, "RS600 GART already initialized.\n");
77 return 0;
78 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +020079 /* Initialize common gart structure */
80 r = radeon_gart_init(rdev);
81 if (r) {
82 return r;
83 }
84 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
Jerome Glisse4aac0472009-09-14 18:29:49 +020085 return radeon_gart_table_vram_alloc(rdev);
86}
87
88int rs600_gart_enable(struct radeon_device *rdev)
89{
90 uint32_t tmp;
91 int r, i;
92
93 if (rdev->gart.table.vram.robj == NULL) {
94 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
95 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020096 }
Jerome Glisse4aac0472009-09-14 18:29:49 +020097 r = radeon_gart_table_vram_pin(rdev);
98 if (r)
99 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200100 /* FIXME: setup default page */
101 WREG32_MC(RS600_MC_PT0_CNTL,
102 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
103 RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
104 for (i = 0; i < 19; i++) {
105 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
106 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
107 RS600_SYSTEM_ACCESS_MODE_IN_SYS |
108 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
109 RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
110 RS600_ENABLE_FRAGMENT_PROCESSING |
111 RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
112 }
113
114 /* System context map to GART space */
115 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
116 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
117 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
118
119 /* enable first context */
120 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
121 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
122 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
123 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
124 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
125 /* disable all other contexts */
126 for (i = 1; i < 8; i++) {
127 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
128 }
129
130 /* setup the page table */
131 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
132 rdev->gart.table_addr);
133 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
134
135 /* enable page tables */
136 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
137 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
138 tmp = RREG32_MC(RS600_MC_CNTL1);
139 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
140 rs600_gart_tlb_flush(rdev);
141 rdev->gart.ready = true;
142 return 0;
143}
144
145void rs600_gart_disable(struct radeon_device *rdev)
146{
147 uint32_t tmp;
148
149 /* FIXME: disable out of gart access */
150 WREG32_MC(RS600_MC_PT0_CNTL, 0);
151 tmp = RREG32_MC(RS600_MC_CNTL1);
152 tmp &= ~RS600_ENABLE_PAGE_TABLES;
153 WREG32_MC(RS600_MC_CNTL1, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200154 if (rdev->gart.table.vram.robj) {
155 radeon_object_kunmap(rdev->gart.table.vram.robj);
156 radeon_object_unpin(rdev->gart.table.vram.robj);
157 }
158}
159
160void rs600_gart_fini(struct radeon_device *rdev)
161{
162 rs600_gart_disable(rdev);
163 radeon_gart_table_vram_free(rdev);
164 radeon_gart_fini(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200165}
166
167#define R600_PTE_VALID (1 << 0)
168#define R600_PTE_SYSTEM (1 << 1)
169#define R600_PTE_SNOOPED (1 << 2)
170#define R600_PTE_READABLE (1 << 5)
171#define R600_PTE_WRITEABLE (1 << 6)
172
173int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
174{
175 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
176
177 if (i < 0 || i > rdev->gart.num_gpu_pages) {
178 return -EINVAL;
179 }
180 addr = addr & 0xFFFFFFFFFFFFF000ULL;
181 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
182 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
183 writeq(addr, ((void __iomem *)ptr) + (i * 8));
184 return 0;
185}
186
187
188/*
189 * MC.
190 */
191void rs600_mc_disable_clients(struct radeon_device *rdev)
192{
193 unsigned tmp;
194
195 if (r100_gui_wait_for_idle(rdev)) {
196 printk(KERN_WARNING "Failed to wait GUI idle while "
197 "programming pipes. Bad things might happen.\n");
198 }
199
200 tmp = RREG32(AVIVO_D1VGA_CONTROL);
201 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
202 tmp = RREG32(AVIVO_D2VGA_CONTROL);
203 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
204
205 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
206 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
207 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
208 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
209
210 /* make sure all previous write got through */
211 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
212
213 mdelay(1);
214}
215
216int rs600_mc_init(struct radeon_device *rdev)
217{
218 uint32_t tmp;
219 int r;
220
221 if (r100_debugfs_rbbm_init(rdev)) {
222 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
223 }
224
225 rs600_gpu_init(rdev);
226 rs600_gart_disable(rdev);
227
228 /* Setup GPU memory space */
229 rdev->mc.vram_location = 0xFFFFFFFFUL;
230 rdev->mc.gtt_location = 0xFFFFFFFFUL;
231 r = radeon_mc_setup(rdev);
232 if (r) {
233 return r;
234 }
235
236 /* Program GPU memory space */
237 /* Enable bus master */
238 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
239 WREG32(RADEON_BUS_CNTL, tmp);
240 /* FIXME: What does AGP means for such chipset ? */
241 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
242 /* FIXME: are this AGP reg in indirect MC range ? */
243 WREG32_MC(RS600_MC_AGP_BASE, 0);
244 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
245 rs600_mc_disable_clients(rdev);
246 if (rs600_mc_wait_for_idle(rdev)) {
247 printk(KERN_WARNING "Failed to wait MC idle while "
248 "programming pipes. Bad things might happen.\n");
249 }
Dave Airlie7a50f012009-07-21 20:39:30 +1000250 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200251 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
252 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
253 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
254 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
255 return 0;
256}
257
258void rs600_mc_fini(struct radeon_device *rdev)
259{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200260}
261
262
263/*
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200264 * Interrupts
265 */
266int rs600_irq_set(struct radeon_device *rdev)
267{
268 uint32_t tmp = 0;
269 uint32_t mode_int = 0;
270
271 if (rdev->irq.sw_int) {
272 tmp |= RADEON_SW_INT_ENABLE;
273 }
274 if (rdev->irq.crtc_vblank_int[0]) {
275 tmp |= AVIVO_DISPLAY_INT_STATUS;
276 mode_int |= AVIVO_D1MODE_INT_MASK;
277 }
278 if (rdev->irq.crtc_vblank_int[1]) {
279 tmp |= AVIVO_DISPLAY_INT_STATUS;
280 mode_int |= AVIVO_D2MODE_INT_MASK;
281 }
282 WREG32(RADEON_GEN_INT_CNTL, tmp);
283 WREG32(AVIVO_DxMODE_INT_MASK, mode_int);
284 return 0;
285}
286
287static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
288{
289 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
290 uint32_t irq_mask = RADEON_SW_INT_TEST;
291
292 if (irqs & AVIVO_DISPLAY_INT_STATUS) {
293 *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS);
294 if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
295 WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
296 }
297 if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
298 WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
299 }
300 } else {
301 *r500_disp_int = 0;
302 }
303
304 if (irqs) {
305 WREG32(RADEON_GEN_INT_STATUS, irqs);
306 }
307 return irqs & irq_mask;
308}
309
310int rs600_irq_process(struct radeon_device *rdev)
311{
312 uint32_t status;
313 uint32_t r500_disp_int;
314
315 status = rs600_irq_ack(rdev, &r500_disp_int);
316 if (!status && !r500_disp_int) {
317 return IRQ_NONE;
318 }
319 while (status || r500_disp_int) {
320 /* SW interrupt */
321 if (status & RADEON_SW_INT_TEST) {
322 radeon_fence_process(rdev);
323 }
324 /* Vertical blank interrupts */
325 if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
326 drm_handle_vblank(rdev->ddev, 0);
327 }
328 if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
329 drm_handle_vblank(rdev->ddev, 1);
330 }
331 status = rs600_irq_ack(rdev, &r500_disp_int);
332 }
333 return IRQ_HANDLED;
334}
335
336u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
337{
338 if (crtc == 0)
339 return RREG32(AVIVO_D1CRTC_FRAME_COUNT);
340 else
341 return RREG32(AVIVO_D2CRTC_FRAME_COUNT);
342}
343
344
345/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200346 * Global GPU functions
347 */
348void rs600_disable_vga(struct radeon_device *rdev)
349{
350 unsigned tmp;
351
352 WREG32(0x330, 0);
353 WREG32(0x338, 0);
354 tmp = RREG32(0x300);
355 tmp &= ~(3 << 16);
356 WREG32(0x300, tmp);
357 WREG32(0x308, (1 << 8));
358 WREG32(0x310, rdev->mc.vram_location);
359 WREG32(0x594, 0);
360}
361
362int rs600_mc_wait_for_idle(struct radeon_device *rdev)
363{
364 unsigned i;
365 uint32_t tmp;
366
367 for (i = 0; i < rdev->usec_timeout; i++) {
368 /* read MC_STATUS */
369 tmp = RREG32_MC(RS600_MC_STATUS);
370 if (tmp & RS600_MC_STATUS_IDLE) {
371 return 0;
372 }
373 DRM_UDELAY(1);
374 }
375 return -1;
376}
377
378void rs600_errata(struct radeon_device *rdev)
379{
380 rdev->pll_errata = 0;
381}
382
383void rs600_gpu_init(struct radeon_device *rdev)
384{
385 /* FIXME: HDP same place on rs600 ? */
386 r100_hdp_reset(rdev);
387 rs600_disable_vga(rdev);
388 /* FIXME: is this correct ? */
389 r420_pipes_init(rdev);
390 if (rs600_mc_wait_for_idle(rdev)) {
391 printk(KERN_WARNING "Failed to wait MC idle while "
392 "programming pipes. Bad things might happen.\n");
393 }
394}
395
396
397/*
398 * VRAM info.
399 */
400void rs600_vram_info(struct radeon_device *rdev)
401{
402 /* FIXME: to do or is these values sane ? */
403 rdev->mc.vram_is_ddr = true;
404 rdev->mc.vram_width = 128;
405}
406
Jerome Glissec93bb852009-07-13 21:04:08 +0200407void rs600_bandwidth_update(struct radeon_device *rdev)
408{
409 /* FIXME: implement, should this be like rs690 ? */
410}
411
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200412
413/*
414 * Indirect registers accessor
415 */
416uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
417{
418 uint32_t r;
419
420 WREG32(RS600_MC_INDEX,
421 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
422 r = RREG32(RS600_MC_DATA);
423 return r;
424}
425
426void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
427{
428 WREG32(RS600_MC_INDEX,
429 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
430 ((reg) & RS600_MC_ADDR_MASK));
431 WREG32(RS600_MC_DATA, v);
432}
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000433
434int rs600_init(struct radeon_device *rdev)
435{
436 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
437 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
438 return 0;
439}