blob: 654aca1cdf051250fc9f431ab5a50e72088f99f5 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
Dave Airliee024e112009-06-24 09:48:08 +100033#include "radeon_drm.h"
Dave Airlie551ebd82009-09-01 15:25:57 +100034#include "r100_track.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100035#include "r300d.h"
Jerome Glisseca6ffc62009-10-01 10:20:52 +020036#include "rv350d.h"
Dave Airlie50f15302009-08-21 13:21:01 +100037#include "r300_reg_safe.h"
38
Jerome Glissecafe6602010-01-07 12:39:21 +010039/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
40 *
41 * GPU Errata:
42 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
43 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
44 * However, scheduling such write to the ring seems harmless, i suspect
45 * the CP read collide with the flush somehow, or maybe the MC, hard to
46 * tell. (Jerome Glisse)
47 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020048
49/*
50 * rv370,rv380 PCIE GART
51 */
Jerome Glisse207bf9e2009-09-30 15:35:32 +020052static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
53
Jerome Glisse771fe6b2009-06-05 14:42:42 +020054void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
55{
56 uint32_t tmp;
57 int i;
58
59 /* Workaround HW bug do flush 2 times */
60 for (i = 0; i < 2; i++) {
61 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
62 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
63 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
64 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020065 }
Dave Airliede1b2892009-08-12 18:43:14 +100066 mb();
Jerome Glisse771fe6b2009-06-05 14:42:42 +020067}
68
Jerome Glisse4aac0472009-09-14 18:29:49 +020069int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
70{
71 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
72
73 if (i < 0 || i > rdev->gart.num_gpu_pages) {
74 return -EINVAL;
75 }
76 addr = (lower_32_bits(addr) >> 8) |
77 ((upper_32_bits(addr) & 0xff) << 24) |
78 0xc;
79 /* on x86 we want this to be CPU endian, on powerpc
80 * on powerpc without HW swappers, it'll get swapped on way
81 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
82 writel(addr, ((void __iomem *)ptr) + (i * 4));
83 return 0;
84}
85
86int rv370_pcie_gart_init(struct radeon_device *rdev)
87{
88 int r;
89
90 if (rdev->gart.table.vram.robj) {
91 WARN(1, "RV370 PCIE GART already initialized.\n");
92 return 0;
93 }
94 /* Initialize common gart structure */
95 r = radeon_gart_init(rdev);
96 if (r)
97 return r;
98 r = rv370_debugfs_pcie_gart_info_init(rdev);
99 if (r)
100 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
101 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
102 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
103 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
104 return radeon_gart_table_vram_alloc(rdev);
105}
106
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107int rv370_pcie_gart_enable(struct radeon_device *rdev)
108{
109 uint32_t table_addr;
110 uint32_t tmp;
111 int r;
112
Jerome Glisse4aac0472009-09-14 18:29:49 +0200113 if (rdev->gart.table.vram.robj == NULL) {
114 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
115 return -EINVAL;
116 }
117 r = radeon_gart_table_vram_pin(rdev);
118 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200119 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000120 radeon_gart_restore(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200121 /* discard memory request outside of configured range */
122 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
123 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
124 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
Matt Turnera77f1712009-10-14 00:34:41 -0400125 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200126 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
127 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
128 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
129 table_addr = rdev->gart.table_addr;
130 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
131 /* FIXME: setup default page */
132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
133 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
134 /* Clear error */
135 WREG32_PCIE(0x18, 0);
136 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
137 tmp |= RADEON_PCIE_TX_GART_EN;
138 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
139 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
140 rv370_pcie_gart_tlb_flush(rdev);
141 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000142 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200143 rdev->gart.ready = true;
144 return 0;
145}
146
147void rv370_pcie_gart_disable(struct radeon_device *rdev)
148{
Jerome Glisse4c788672009-11-20 14:29:23 +0100149 u32 tmp;
150 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200151
152 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
153 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
154 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
155 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100156 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
157 if (likely(r == 0)) {
158 radeon_bo_kunmap(rdev->gart.table.vram.robj);
159 radeon_bo_unpin(rdev->gart.table.vram.robj);
160 radeon_bo_unreserve(rdev->gart.table.vram.robj);
161 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200162 }
163}
164
Jerome Glisse4aac0472009-09-14 18:29:49 +0200165void rv370_pcie_gart_fini(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200166{
Jerome Glisse4aac0472009-09-14 18:29:49 +0200167 rv370_pcie_gart_disable(rdev);
168 radeon_gart_table_vram_free(rdev);
169 radeon_gart_fini(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200170}
171
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200172void r300_fence_ring_emit(struct radeon_device *rdev,
173 struct radeon_fence *fence)
174{
175 /* Who ever call radeon_fence_emit should call ring_lock and ask
176 * for enough space (today caller are ib schedule and buffer move) */
177 /* Write SC register so SC & US assert idle */
Alex Deucher4612dc92010-02-05 01:58:28 -0500178 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200179 radeon_ring_write(rdev, 0);
Alex Deucher4612dc92010-02-05 01:58:28 -0500180 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200181 radeon_ring_write(rdev, 0);
182 /* Flush 3D cache */
Alex Deucher4612dc92010-02-05 01:58:28 -0500183 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
184 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
185 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
186 radeon_ring_write(rdev, R300_ZC_FLUSH);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200187 /* Wait until IDLE & CLEAN */
Alex Deucher4612dc92010-02-05 01:58:28 -0500188 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
189 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
190 RADEON_WAIT_2D_IDLECLEAN |
191 RADEON_WAIT_DMA_GUI_IDLE));
Jerome Glissecafe6602010-01-07 12:39:21 +0100192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
194 RADEON_HDP_READ_BUFFER_INVALIDATE);
195 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
196 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200197 /* Emit fence sequence & fire IRQ */
198 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
199 radeon_ring_write(rdev, fence->seq);
200 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
201 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
202}
203
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200204int r300_copy_dma(struct radeon_device *rdev,
205 uint64_t src_offset,
206 uint64_t dst_offset,
207 unsigned num_pages,
208 struct radeon_fence *fence)
209{
210 uint32_t size;
211 uint32_t cur_size;
212 int i, num_loops;
213 int r = 0;
214
215 /* radeon pitch is /64 */
216 size = num_pages << PAGE_SHIFT;
217 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
218 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
219 if (r) {
220 DRM_ERROR("radeon: moving bo (%d).\n", r);
221 return r;
222 }
223 /* Must wait for 2D idle & clean before DMA or hangs might happen */
Jerome Glisse068a1172009-06-17 13:28:30 +0200224 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
Alex Deucher4612dc92010-02-05 01:58:28 -0500225 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200226 for (i = 0; i < num_loops; i++) {
227 cur_size = size;
228 if (cur_size > 0x1FFFFF) {
229 cur_size = 0x1FFFFF;
230 }
231 size -= cur_size;
232 radeon_ring_write(rdev, PACKET0(0x720, 2));
233 radeon_ring_write(rdev, src_offset);
234 radeon_ring_write(rdev, dst_offset);
235 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
236 src_offset += cur_size;
237 dst_offset += cur_size;
238 }
239 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
240 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
241 if (fence) {
242 r = radeon_fence_emit(rdev, fence);
243 }
244 radeon_ring_unlock_commit(rdev);
245 return r;
246}
247
248void r300_ring_start(struct radeon_device *rdev)
249{
250 unsigned gb_tile_config;
251 int r;
252
253 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
254 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
Jerome Glisse068a1172009-06-17 13:28:30 +0200255 switch(rdev->num_gb_pipes) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200256 case 2:
257 gb_tile_config |= R300_PIPE_COUNT_R300;
258 break;
259 case 3:
260 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
261 break;
262 case 4:
263 gb_tile_config |= R300_PIPE_COUNT_R420;
264 break;
265 case 1:
266 default:
267 gb_tile_config |= R300_PIPE_COUNT_RV350;
268 break;
269 }
270
271 r = radeon_ring_lock(rdev, 64);
272 if (r) {
273 return;
274 }
275 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
276 radeon_ring_write(rdev,
277 RADEON_ISYNC_ANY2D_IDLE3D |
278 RADEON_ISYNC_ANY3D_IDLE2D |
279 RADEON_ISYNC_WAIT_IDLEGUI |
280 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
281 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
282 radeon_ring_write(rdev, gb_tile_config);
283 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
284 radeon_ring_write(rdev,
285 RADEON_WAIT_2D_IDLECLEAN |
286 RADEON_WAIT_3D_IDLECLEAN);
Alex Deucher4612dc92010-02-05 01:58:28 -0500287 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
288 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200289 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
290 radeon_ring_write(rdev, 0);
291 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
292 radeon_ring_write(rdev, 0);
293 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
294 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
295 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
296 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
297 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
298 radeon_ring_write(rdev,
299 RADEON_WAIT_2D_IDLECLEAN |
300 RADEON_WAIT_3D_IDLECLEAN);
301 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
302 radeon_ring_write(rdev, 0);
303 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
304 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
305 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
306 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
307 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
308 radeon_ring_write(rdev,
309 ((6 << R300_MS_X0_SHIFT) |
310 (6 << R300_MS_Y0_SHIFT) |
311 (6 << R300_MS_X1_SHIFT) |
312 (6 << R300_MS_Y1_SHIFT) |
313 (6 << R300_MS_X2_SHIFT) |
314 (6 << R300_MS_Y2_SHIFT) |
315 (6 << R300_MSBD0_Y_SHIFT) |
316 (6 << R300_MSBD0_X_SHIFT)));
317 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
318 radeon_ring_write(rdev,
319 ((6 << R300_MS_X3_SHIFT) |
320 (6 << R300_MS_Y3_SHIFT) |
321 (6 << R300_MS_X4_SHIFT) |
322 (6 << R300_MS_Y4_SHIFT) |
323 (6 << R300_MS_X5_SHIFT) |
324 (6 << R300_MS_Y5_SHIFT) |
325 (6 << R300_MSBD1_SHIFT)));
326 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
327 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
328 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
329 radeon_ring_write(rdev,
330 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
331 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
332 radeon_ring_write(rdev,
333 R300_GEOMETRY_ROUND_NEAREST |
334 R300_COLOR_ROUND_NEAREST);
335 radeon_ring_unlock_commit(rdev);
336}
337
338void r300_errata(struct radeon_device *rdev)
339{
340 rdev->pll_errata = 0;
341
342 if (rdev->family == CHIP_R300 &&
343 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
344 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
345 }
346}
347
348int r300_mc_wait_for_idle(struct radeon_device *rdev)
349{
350 unsigned i;
351 uint32_t tmp;
352
353 for (i = 0; i < rdev->usec_timeout; i++) {
354 /* read MC_STATUS */
Alex Deucher4612dc92010-02-05 01:58:28 -0500355 tmp = RREG32(RADEON_MC_STATUS);
356 if (tmp & R300_MC_IDLE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200357 return 0;
358 }
359 DRM_UDELAY(1);
360 }
361 return -1;
362}
363
364void r300_gpu_init(struct radeon_device *rdev)
365{
366 uint32_t gb_tile_config, tmp;
367
368 r100_hdp_reset(rdev);
369 /* FIXME: rv380 one pipes ? */
370 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
371 /* r300,r350 */
372 rdev->num_gb_pipes = 2;
373 } else {
374 /* rv350,rv370,rv380 */
375 rdev->num_gb_pipes = 1;
376 }
Alex Deucherf779b3e2009-08-19 19:11:39 -0400377 rdev->num_z_pipes = 1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200378 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
379 switch (rdev->num_gb_pipes) {
380 case 2:
381 gb_tile_config |= R300_PIPE_COUNT_R300;
382 break;
383 case 3:
384 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
385 break;
386 case 4:
387 gb_tile_config |= R300_PIPE_COUNT_R420;
388 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200389 default:
Jerome Glisse068a1172009-06-17 13:28:30 +0200390 case 1:
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200391 gb_tile_config |= R300_PIPE_COUNT_RV350;
392 break;
393 }
394 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
395
396 if (r100_gui_wait_for_idle(rdev)) {
397 printk(KERN_WARNING "Failed to wait GUI idle while "
398 "programming pipes. Bad things might happen.\n");
399 }
400
Alex Deucher4612dc92010-02-05 01:58:28 -0500401 tmp = RREG32(R300_DST_PIPE_CONFIG);
402 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200403
404 WREG32(R300_RB2D_DSTCACHE_MODE,
405 R300_DC_AUTOFLUSH_ENABLE |
406 R300_DC_DC_DISABLE_IGNORE_PE);
407
408 if (r100_gui_wait_for_idle(rdev)) {
409 printk(KERN_WARNING "Failed to wait GUI idle while "
410 "programming pipes. Bad things might happen.\n");
411 }
412 if (r300_mc_wait_for_idle(rdev)) {
413 printk(KERN_WARNING "Failed to wait MC idle while "
414 "programming pipes. Bad things might happen.\n");
415 }
Alex Deucherf779b3e2009-08-19 19:11:39 -0400416 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
417 rdev->num_gb_pipes, rdev->num_z_pipes);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200418}
419
420int r300_ga_reset(struct radeon_device *rdev)
421{
422 uint32_t tmp;
423 bool reinit_cp;
424 int i;
425
426 reinit_cp = rdev->cp.ready;
427 rdev->cp.ready = false;
428 for (i = 0; i < rdev->usec_timeout; i++) {
429 WREG32(RADEON_CP_CSQ_MODE, 0);
430 WREG32(RADEON_CP_CSQ_CNTL, 0);
431 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
432 (void)RREG32(RADEON_RBBM_SOFT_RESET);
433 udelay(200);
434 WREG32(RADEON_RBBM_SOFT_RESET, 0);
435 /* Wait to prevent race in RBBM_STATUS */
436 mdelay(1);
437 tmp = RREG32(RADEON_RBBM_STATUS);
438 if (tmp & ((1 << 20) | (1 << 26))) {
439 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
440 /* GA still busy soft reset it */
441 WREG32(0x429C, 0x200);
442 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
Alex Deucher4612dc92010-02-05 01:58:28 -0500443 WREG32(R300_RE_SCISSORS_TL, 0);
444 WREG32(R300_RE_SCISSORS_BR, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200445 WREG32(0x24AC, 0);
446 }
447 /* Wait to prevent race in RBBM_STATUS */
448 mdelay(1);
449 tmp = RREG32(RADEON_RBBM_STATUS);
450 if (!(tmp & ((1 << 20) | (1 << 26)))) {
451 break;
452 }
453 }
454 for (i = 0; i < rdev->usec_timeout; i++) {
455 tmp = RREG32(RADEON_RBBM_STATUS);
456 if (!(tmp & ((1 << 20) | (1 << 26)))) {
457 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
458 tmp);
459 if (reinit_cp) {
460 return r100_cp_init(rdev, rdev->cp.ring_size);
461 }
462 return 0;
463 }
464 DRM_UDELAY(1);
465 }
466 tmp = RREG32(RADEON_RBBM_STATUS);
467 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
468 return -1;
469}
470
471int r300_gpu_reset(struct radeon_device *rdev)
472{
473 uint32_t status;
474
475 /* reset order likely matter */
476 status = RREG32(RADEON_RBBM_STATUS);
477 /* reset HDP */
478 r100_hdp_reset(rdev);
479 /* reset rb2d */
480 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
481 r100_rb2d_reset(rdev);
482 }
483 /* reset GA */
484 if (status & ((1 << 20) | (1 << 26))) {
485 r300_ga_reset(rdev);
486 }
487 /* reset CP */
488 status = RREG32(RADEON_RBBM_STATUS);
489 if (status & (1 << 16)) {
490 r100_cp_reset(rdev);
491 }
492 /* Check if GPU is idle */
493 status = RREG32(RADEON_RBBM_STATUS);
Alex Deucher4612dc92010-02-05 01:58:28 -0500494 if (status & RADEON_RBBM_ACTIVE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200495 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
496 return -1;
497 }
498 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
499 return 0;
500}
501
502
503/*
504 * r300,r350,rv350,rv380 VRAM info
505 */
506void r300_vram_info(struct radeon_device *rdev)
507{
508 uint32_t tmp;
509
510 /* DDR for all card after R300 & IGP */
511 rdev->mc.vram_is_ddr = true;
Dave Airlie5ff55712010-02-05 13:57:03 +1000512
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200513 tmp = RREG32(RADEON_MEM_CNTL);
Dave Airlie5ff55712010-02-05 13:57:03 +1000514 tmp &= R300_MEM_NUM_CHANNELS_MASK;
515 switch (tmp) {
516 case 0: rdev->mc.vram_width = 64; break;
517 case 1: rdev->mc.vram_width = 128; break;
518 case 2: rdev->mc.vram_width = 256; break;
519 default: rdev->mc.vram_width = 128; break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200520 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200521
Dave Airlie2a0f8912009-07-11 04:44:47 +1000522 r100_vram_init_sizes(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200523}
524
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200525void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
526{
527 uint32_t link_width_cntl, mask;
528
529 if (rdev->flags & RADEON_IS_IGP)
530 return;
531
532 if (!(rdev->flags & RADEON_IS_PCIE))
533 return;
534
535 /* FIXME wait for idle */
536
537 switch (lanes) {
538 case 0:
539 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
540 break;
541 case 1:
542 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
543 break;
544 case 2:
545 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
546 break;
547 case 4:
548 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
549 break;
550 case 8:
551 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
552 break;
553 case 12:
554 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
555 break;
556 case 16:
557 default:
558 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
559 break;
560 }
561
562 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
563
564 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
565 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
566 return;
567
568 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
569 RADEON_PCIE_LC_RECONFIG_NOW |
570 RADEON_PCIE_LC_RECONFIG_LATER |
571 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
572 link_width_cntl |= mask;
573 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
574 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
575 RADEON_PCIE_LC_RECONFIG_NOW));
576
577 /* wait for lane set to complete */
578 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
579 while (link_width_cntl == 0xffffffff)
580 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
581
582}
583
Alex Deucherc836a412009-12-23 10:07:50 -0500584int rv370_get_pcie_lanes(struct radeon_device *rdev)
585{
586 u32 link_width_cntl;
587
588 if (rdev->flags & RADEON_IS_IGP)
589 return 0;
590
591 if (!(rdev->flags & RADEON_IS_PCIE))
592 return 0;
593
594 /* FIXME wait for idle */
595
596 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
597
598 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
599 case RADEON_PCIE_LC_LINK_WIDTH_X0:
600 return 0;
601 case RADEON_PCIE_LC_LINK_WIDTH_X1:
602 return 1;
603 case RADEON_PCIE_LC_LINK_WIDTH_X2:
604 return 2;
605 case RADEON_PCIE_LC_LINK_WIDTH_X4:
606 return 4;
607 case RADEON_PCIE_LC_LINK_WIDTH_X8:
608 return 8;
609 case RADEON_PCIE_LC_LINK_WIDTH_X16:
610 default:
611 return 16;
612 }
613}
614
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200615#if defined(CONFIG_DEBUG_FS)
616static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
617{
618 struct drm_info_node *node = (struct drm_info_node *) m->private;
619 struct drm_device *dev = node->minor->dev;
620 struct radeon_device *rdev = dev->dev_private;
621 uint32_t tmp;
622
623 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
624 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
625 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
626 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
627 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
628 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
629 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
630 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
631 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
632 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
633 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
634 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
635 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
636 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
637 return 0;
638}
639
640static struct drm_info_list rv370_pcie_gart_info_list[] = {
641 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
642};
643#endif
644
Jerome Glisse207bf9e2009-09-30 15:35:32 +0200645static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200646{
647#if defined(CONFIG_DEBUG_FS)
648 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
649#else
650 return 0;
651#endif
652}
653
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200654static int r300_packet0_check(struct radeon_cs_parser *p,
655 struct radeon_cs_packet *pkt,
656 unsigned idx, unsigned reg)
657{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200658 struct radeon_cs_reloc *reloc;
Dave Airlie551ebd82009-09-01 15:25:57 +1000659 struct r100_cs_track *track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200660 volatile uint32_t *ib;
Dave Airliee024e112009-06-24 09:48:08 +1000661 uint32_t tmp, tile_flags = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200662 unsigned i;
663 int r;
Dave Airlie513bcb42009-09-23 16:56:27 +1000664 u32 idx_value;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200665
666 ib = p->ib->ptr;
Dave Airlie551ebd82009-09-01 15:25:57 +1000667 track = (struct r100_cs_track *)p->track;
Dave Airlie513bcb42009-09-23 16:56:27 +1000668 idx_value = radeon_get_ib_value(p, idx);
669
Jerome Glisse068a1172009-06-17 13:28:30 +0200670 switch(reg) {
Dave Airlie531369e2009-06-29 11:21:25 +1000671 case AVIVO_D1MODE_VLINE_START_END:
672 case RADEON_CRTC_GUI_TRIG_VLINE:
673 r = r100_cs_packet_parse_vline(p);
674 if (r) {
675 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
676 idx, reg);
677 r100_cs_dump_packet(p, pkt);
678 return r;
679 }
680 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200681 case RADEON_DST_PITCH_OFFSET:
682 case RADEON_SRC_PITCH_OFFSET:
Dave Airlie551ebd82009-09-01 15:25:57 +1000683 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
684 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200685 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200686 break;
687 case R300_RB3D_COLOROFFSET0:
688 case R300_RB3D_COLOROFFSET1:
689 case R300_RB3D_COLOROFFSET2:
690 case R300_RB3D_COLOROFFSET3:
691 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
692 r = r100_cs_packet_next_reloc(p, &reloc);
693 if (r) {
694 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
695 idx, reg);
696 r100_cs_dump_packet(p, pkt);
697 return r;
698 }
699 track->cb[i].robj = reloc->robj;
Dave Airlie513bcb42009-09-23 16:56:27 +1000700 track->cb[i].offset = idx_value;
701 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200702 break;
703 case R300_ZB_DEPTHOFFSET:
704 r = r100_cs_packet_next_reloc(p, &reloc);
705 if (r) {
706 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
707 idx, reg);
708 r100_cs_dump_packet(p, pkt);
709 return r;
710 }
711 track->zb.robj = reloc->robj;
Dave Airlie513bcb42009-09-23 16:56:27 +1000712 track->zb.offset = idx_value;
713 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200714 break;
715 case R300_TX_OFFSET_0:
716 case R300_TX_OFFSET_0+4:
717 case R300_TX_OFFSET_0+8:
718 case R300_TX_OFFSET_0+12:
719 case R300_TX_OFFSET_0+16:
720 case R300_TX_OFFSET_0+20:
721 case R300_TX_OFFSET_0+24:
722 case R300_TX_OFFSET_0+28:
723 case R300_TX_OFFSET_0+32:
724 case R300_TX_OFFSET_0+36:
725 case R300_TX_OFFSET_0+40:
726 case R300_TX_OFFSET_0+44:
727 case R300_TX_OFFSET_0+48:
728 case R300_TX_OFFSET_0+52:
729 case R300_TX_OFFSET_0+56:
730 case R300_TX_OFFSET_0+60:
Jerome Glisse068a1172009-06-17 13:28:30 +0200731 i = (reg - R300_TX_OFFSET_0) >> 2;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200732 r = r100_cs_packet_next_reloc(p, &reloc);
733 if (r) {
734 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
735 idx, reg);
736 r100_cs_dump_packet(p, pkt);
737 return r;
738 }
Maciej Cencora6e726772009-12-15 23:13:08 +0100739
740 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
741 tile_flags |= R300_TXO_MACRO_TILE;
742 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
743 tile_flags |= R300_TXO_MICRO_TILE;
744
745 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
746 tmp |= tile_flags;
747 ib[idx] = tmp;
Jerome Glisse068a1172009-06-17 13:28:30 +0200748 track->textures[i].robj = reloc->robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200749 break;
750 /* Tracked registers */
Jerome Glisse068a1172009-06-17 13:28:30 +0200751 case 0x2084:
752 /* VAP_VF_CNTL */
Dave Airlie513bcb42009-09-23 16:56:27 +1000753 track->vap_vf_cntl = idx_value;
Jerome Glisse068a1172009-06-17 13:28:30 +0200754 break;
755 case 0x20B4:
756 /* VAP_VTX_SIZE */
Dave Airlie513bcb42009-09-23 16:56:27 +1000757 track->vtx_size = idx_value & 0x7F;
Jerome Glisse068a1172009-06-17 13:28:30 +0200758 break;
759 case 0x2134:
760 /* VAP_VF_MAX_VTX_INDX */
Dave Airlie513bcb42009-09-23 16:56:27 +1000761 track->max_indx = idx_value & 0x00FFFFFFUL;
Jerome Glisse068a1172009-06-17 13:28:30 +0200762 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200763 case 0x43E4:
764 /* SC_SCISSOR1 */
Dave Airlie513bcb42009-09-23 16:56:27 +1000765 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200766 if (p->rdev->family < CHIP_RV515) {
767 track->maxy -= 1440;
768 }
769 break;
770 case 0x4E00:
771 /* RB3D_CCTL */
Dave Airlie513bcb42009-09-23 16:56:27 +1000772 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200773 break;
774 case 0x4E38:
775 case 0x4E3C:
776 case 0x4E40:
777 case 0x4E44:
778 /* RB3D_COLORPITCH0 */
779 /* RB3D_COLORPITCH1 */
780 /* RB3D_COLORPITCH2 */
781 /* RB3D_COLORPITCH3 */
Dave Airliee024e112009-06-24 09:48:08 +1000782 r = r100_cs_packet_next_reloc(p, &reloc);
783 if (r) {
784 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
785 idx, reg);
786 r100_cs_dump_packet(p, pkt);
787 return r;
788 }
789
790 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
791 tile_flags |= R300_COLOR_TILE_ENABLE;
792 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
793 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
794
Dave Airlie513bcb42009-09-23 16:56:27 +1000795 tmp = idx_value & ~(0x7 << 16);
Dave Airliee024e112009-06-24 09:48:08 +1000796 tmp |= tile_flags;
797 ib[idx] = tmp;
798
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200799 i = (reg - 0x4E38) >> 2;
Dave Airlie513bcb42009-09-23 16:56:27 +1000800 track->cb[i].pitch = idx_value & 0x3FFE;
801 switch (((idx_value >> 21) & 0xF)) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200802 case 9:
803 case 11:
804 case 12:
805 track->cb[i].cpp = 1;
806 break;
807 case 3:
808 case 4:
809 case 13:
810 case 15:
811 track->cb[i].cpp = 2;
812 break;
813 case 6:
814 track->cb[i].cpp = 4;
815 break;
816 case 10:
817 track->cb[i].cpp = 8;
818 break;
819 case 7:
820 track->cb[i].cpp = 16;
821 break;
822 default:
823 DRM_ERROR("Invalid color buffer format (%d) !\n",
Dave Airlie513bcb42009-09-23 16:56:27 +1000824 ((idx_value >> 21) & 0xF));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200825 return -EINVAL;
826 }
827 break;
828 case 0x4F00:
829 /* ZB_CNTL */
Dave Airlie513bcb42009-09-23 16:56:27 +1000830 if (idx_value & 2) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200831 track->z_enabled = true;
832 } else {
833 track->z_enabled = false;
834 }
835 break;
836 case 0x4F10:
837 /* ZB_FORMAT */
Dave Airlie513bcb42009-09-23 16:56:27 +1000838 switch ((idx_value & 0xF)) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200839 case 0:
840 case 1:
841 track->zb.cpp = 2;
842 break;
843 case 2:
844 track->zb.cpp = 4;
845 break;
846 default:
847 DRM_ERROR("Invalid z buffer format (%d) !\n",
Dave Airlie513bcb42009-09-23 16:56:27 +1000848 (idx_value & 0xF));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200849 return -EINVAL;
850 }
851 break;
852 case 0x4F24:
853 /* ZB_DEPTHPITCH */
Dave Airliee024e112009-06-24 09:48:08 +1000854 r = r100_cs_packet_next_reloc(p, &reloc);
855 if (r) {
856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
857 idx, reg);
858 r100_cs_dump_packet(p, pkt);
859 return r;
860 }
861
862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;;
866
Dave Airlie513bcb42009-09-23 16:56:27 +1000867 tmp = idx_value & ~(0x7 << 16);
Dave Airliee024e112009-06-24 09:48:08 +1000868 tmp |= tile_flags;
869 ib[idx] = tmp;
870
Dave Airlie513bcb42009-09-23 16:56:27 +1000871 track->zb.pitch = idx_value & 0x3FFC;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200872 break;
Jerome Glisse068a1172009-06-17 13:28:30 +0200873 case 0x4104:
874 for (i = 0; i < 16; i++) {
875 bool enabled;
876
Dave Airlie513bcb42009-09-23 16:56:27 +1000877 enabled = !!(idx_value & (1 << i));
Jerome Glisse068a1172009-06-17 13:28:30 +0200878 track->textures[i].enabled = enabled;
879 }
880 break;
881 case 0x44C0:
882 case 0x44C4:
883 case 0x44C8:
884 case 0x44CC:
885 case 0x44D0:
886 case 0x44D4:
887 case 0x44D8:
888 case 0x44DC:
889 case 0x44E0:
890 case 0x44E4:
891 case 0x44E8:
892 case 0x44EC:
893 case 0x44F0:
894 case 0x44F4:
895 case 0x44F8:
896 case 0x44FC:
897 /* TX_FORMAT1_[0-15] */
898 i = (reg - 0x44C0) >> 2;
Dave Airlie513bcb42009-09-23 16:56:27 +1000899 tmp = (idx_value >> 25) & 0x3;
Jerome Glisse068a1172009-06-17 13:28:30 +0200900 track->textures[i].tex_coord_type = tmp;
Dave Airlie513bcb42009-09-23 16:56:27 +1000901 switch ((idx_value & 0x1F)) {
Dave Airlie551ebd82009-09-01 15:25:57 +1000902 case R300_TX_FORMAT_X8:
903 case R300_TX_FORMAT_Y4X4:
904 case R300_TX_FORMAT_Z3Y3X2:
Jerome Glisse068a1172009-06-17 13:28:30 +0200905 track->textures[i].cpp = 1;
906 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000907 case R300_TX_FORMAT_X16:
908 case R300_TX_FORMAT_Y8X8:
909 case R300_TX_FORMAT_Z5Y6X5:
910 case R300_TX_FORMAT_Z6Y5X5:
911 case R300_TX_FORMAT_W4Z4Y4X4:
912 case R300_TX_FORMAT_W1Z5Y5X5:
Dave Airlie551ebd82009-09-01 15:25:57 +1000913 case R300_TX_FORMAT_D3DMFT_CxV8U8:
914 case R300_TX_FORMAT_B8G8_B8G8:
915 case R300_TX_FORMAT_G8R8_G8B8:
Jerome Glisse068a1172009-06-17 13:28:30 +0200916 track->textures[i].cpp = 2;
917 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000918 case R300_TX_FORMAT_Y16X16:
919 case R300_TX_FORMAT_Z11Y11X10:
920 case R300_TX_FORMAT_Z10Y11X11:
921 case R300_TX_FORMAT_W8Z8Y8X8:
922 case R300_TX_FORMAT_W2Z10Y10X10:
923 case 0x17:
924 case R300_TX_FORMAT_FL_I32:
925 case 0x1e:
Jerome Glisse068a1172009-06-17 13:28:30 +0200926 track->textures[i].cpp = 4;
927 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000928 case R300_TX_FORMAT_W16Z16Y16X16:
929 case R300_TX_FORMAT_FL_R16G16B16A16:
930 case R300_TX_FORMAT_FL_I32A32:
Jerome Glisse068a1172009-06-17 13:28:30 +0200931 track->textures[i].cpp = 8;
932 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000933 case R300_TX_FORMAT_FL_R32G32B32A32:
Jerome Glisse068a1172009-06-17 13:28:30 +0200934 track->textures[i].cpp = 16;
935 break;
Dave Airlied785d782009-12-07 13:16:06 +1000936 case R300_TX_FORMAT_DXT1:
937 track->textures[i].cpp = 1;
938 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
939 break;
Marek Olšák512889f2009-12-19 00:23:00 +0100940 case R300_TX_FORMAT_ATI2N:
941 if (p->rdev->family < CHIP_R420) {
942 DRM_ERROR("Invalid texture format %u\n",
943 (idx_value & 0x1F));
944 return -EINVAL;
945 }
946 /* The same rules apply as for DXT3/5. */
947 /* Pass through. */
Dave Airlied785d782009-12-07 13:16:06 +1000948 case R300_TX_FORMAT_DXT3:
949 case R300_TX_FORMAT_DXT5:
950 track->textures[i].cpp = 1;
951 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
952 break;
Jerome Glisse068a1172009-06-17 13:28:30 +0200953 default:
954 DRM_ERROR("Invalid texture format %u\n",
Dave Airlie513bcb42009-09-23 16:56:27 +1000955 (idx_value & 0x1F));
Jerome Glisse068a1172009-06-17 13:28:30 +0200956 return -EINVAL;
957 break;
958 }
959 break;
960 case 0x4400:
961 case 0x4404:
962 case 0x4408:
963 case 0x440C:
964 case 0x4410:
965 case 0x4414:
966 case 0x4418:
967 case 0x441C:
968 case 0x4420:
969 case 0x4424:
970 case 0x4428:
971 case 0x442C:
972 case 0x4430:
973 case 0x4434:
974 case 0x4438:
975 case 0x443C:
976 /* TX_FILTER0_[0-15] */
977 i = (reg - 0x4400) >> 2;
Dave Airlie513bcb42009-09-23 16:56:27 +1000978 tmp = idx_value & 0x7;
Jerome Glisse068a1172009-06-17 13:28:30 +0200979 if (tmp == 2 || tmp == 4 || tmp == 6) {
980 track->textures[i].roundup_w = false;
981 }
Dave Airlie513bcb42009-09-23 16:56:27 +1000982 tmp = (idx_value >> 3) & 0x7;
Jerome Glisse068a1172009-06-17 13:28:30 +0200983 if (tmp == 2 || tmp == 4 || tmp == 6) {
984 track->textures[i].roundup_h = false;
985 }
986 break;
987 case 0x4500:
988 case 0x4504:
989 case 0x4508:
990 case 0x450C:
991 case 0x4510:
992 case 0x4514:
993 case 0x4518:
994 case 0x451C:
995 case 0x4520:
996 case 0x4524:
997 case 0x4528:
998 case 0x452C:
999 case 0x4530:
1000 case 0x4534:
1001 case 0x4538:
1002 case 0x453C:
1003 /* TX_FORMAT2_[0-15] */
1004 i = (reg - 0x4500) >> 2;
Dave Airlie513bcb42009-09-23 16:56:27 +10001005 tmp = idx_value & 0x3FFF;
Jerome Glisse068a1172009-06-17 13:28:30 +02001006 track->textures[i].pitch = tmp + 1;
1007 if (p->rdev->family >= CHIP_RV515) {
Dave Airlie513bcb42009-09-23 16:56:27 +10001008 tmp = ((idx_value >> 15) & 1) << 11;
Jerome Glisse068a1172009-06-17 13:28:30 +02001009 track->textures[i].width_11 = tmp;
Dave Airlie513bcb42009-09-23 16:56:27 +10001010 tmp = ((idx_value >> 16) & 1) << 11;
Jerome Glisse068a1172009-06-17 13:28:30 +02001011 track->textures[i].height_11 = tmp;
Marek Olšák512889f2009-12-19 00:23:00 +01001012
1013 /* ATI1N */
1014 if (idx_value & (1 << 14)) {
1015 /* The same rules apply as for DXT1. */
1016 track->textures[i].compress_format =
1017 R100_TRACK_COMP_DXT1;
1018 }
1019 } else if (idx_value & (1 << 14)) {
1020 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1021 return -EINVAL;
Jerome Glisse068a1172009-06-17 13:28:30 +02001022 }
1023 break;
1024 case 0x4480:
1025 case 0x4484:
1026 case 0x4488:
1027 case 0x448C:
1028 case 0x4490:
1029 case 0x4494:
1030 case 0x4498:
1031 case 0x449C:
1032 case 0x44A0:
1033 case 0x44A4:
1034 case 0x44A8:
1035 case 0x44AC:
1036 case 0x44B0:
1037 case 0x44B4:
1038 case 0x44B8:
1039 case 0x44BC:
1040 /* TX_FORMAT0_[0-15] */
1041 i = (reg - 0x4480) >> 2;
Dave Airlie513bcb42009-09-23 16:56:27 +10001042 tmp = idx_value & 0x7FF;
Jerome Glisse068a1172009-06-17 13:28:30 +02001043 track->textures[i].width = tmp + 1;
Dave Airlie513bcb42009-09-23 16:56:27 +10001044 tmp = (idx_value >> 11) & 0x7FF;
Jerome Glisse068a1172009-06-17 13:28:30 +02001045 track->textures[i].height = tmp + 1;
Dave Airlie513bcb42009-09-23 16:56:27 +10001046 tmp = (idx_value >> 26) & 0xF;
Jerome Glisse068a1172009-06-17 13:28:30 +02001047 track->textures[i].num_levels = tmp;
Dave Airlie513bcb42009-09-23 16:56:27 +10001048 tmp = idx_value & (1 << 31);
Jerome Glisse068a1172009-06-17 13:28:30 +02001049 track->textures[i].use_pitch = !!tmp;
Dave Airlie513bcb42009-09-23 16:56:27 +10001050 tmp = (idx_value >> 22) & 0xF;
Jerome Glisse068a1172009-06-17 13:28:30 +02001051 track->textures[i].txdepth = tmp;
1052 break;
Dave Airlie3f8befe2009-08-15 20:54:13 +10001053 case R300_ZB_ZPASS_ADDR:
1054 r = r100_cs_packet_next_reloc(p, &reloc);
1055 if (r) {
1056 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1057 idx, reg);
1058 r100_cs_dump_packet(p, pkt);
1059 return r;
1060 }
Dave Airlie513bcb42009-09-23 16:56:27 +10001061 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
Dave Airlie3f8befe2009-08-15 20:54:13 +10001062 break;
Marek Olšák46c64d42009-12-17 06:02:28 +01001063 case 0x4e0c:
1064 /* RB3D_COLOR_CHANNEL_MASK */
1065 track->color_channel_mask = idx_value;
1066 break;
1067 case 0x4d1c:
1068 /* ZB_BW_CNTL */
1069 track->fastfill = !!(idx_value & (1 << 2));
1070 break;
1071 case 0x4e04:
1072 /* RB3D_BLENDCNTL */
1073 track->blend_read_enable = !!(idx_value & (1 << 2));
1074 break;
Dave Airlie3f8befe2009-08-15 20:54:13 +10001075 case 0x4be8:
1076 /* valid register only on RV530 */
1077 if (p->rdev->family == CHIP_RV530)
1078 break;
1079 /* fallthrough do not move */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001080 default:
Jerome Glisse068a1172009-06-17 13:28:30 +02001081 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1082 reg, idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001083 return -EINVAL;
1084 }
1085 return 0;
1086}
1087
1088static int r300_packet3_check(struct radeon_cs_parser *p,
1089 struct radeon_cs_packet *pkt)
1090{
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001091 struct radeon_cs_reloc *reloc;
Dave Airlie551ebd82009-09-01 15:25:57 +10001092 struct r100_cs_track *track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001093 volatile uint32_t *ib;
1094 unsigned idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001095 int r;
1096
1097 ib = p->ib->ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001098 idx = pkt->idx + 1;
Dave Airlie551ebd82009-09-01 15:25:57 +10001099 track = (struct r100_cs_track *)p->track;
Jerome Glisse068a1172009-06-17 13:28:30 +02001100 switch(pkt->opcode) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001101 case PACKET3_3D_LOAD_VBPNTR:
Dave Airlie513bcb42009-09-23 16:56:27 +10001102 r = r100_packet3_load_vbpntr(p, pkt, idx);
1103 if (r)
1104 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001105 break;
1106 case PACKET3_INDX_BUFFER:
1107 r = r100_cs_packet_next_reloc(p, &reloc);
1108 if (r) {
1109 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1110 r100_cs_dump_packet(p, pkt);
1111 return r;
1112 }
Dave Airlie513bcb42009-09-23 16:56:27 +10001113 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse068a1172009-06-17 13:28:30 +02001114 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1115 if (r) {
1116 return r;
1117 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001118 break;
1119 /* Draw packet */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001120 case PACKET3_3D_DRAW_IMMD:
Jerome Glisse068a1172009-06-17 13:28:30 +02001121 /* Number of dwords is vtx_size * (num_vertices - 1)
1122 * PRIM_WALK must be equal to 3 vertex data in embedded
1123 * in cmd stream */
Dave Airlie513bcb42009-09-23 16:56:27 +10001124 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
Jerome Glisse068a1172009-06-17 13:28:30 +02001125 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1126 return -EINVAL;
1127 }
Dave Airlie513bcb42009-09-23 16:56:27 +10001128 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
Jerome Glisse068a1172009-06-17 13:28:30 +02001129 track->immd_dwords = pkt->count - 1;
Dave Airlie551ebd82009-09-01 15:25:57 +10001130 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001131 if (r) {
1132 return r;
1133 }
1134 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001135 case PACKET3_3D_DRAW_IMMD_2:
Jerome Glisse068a1172009-06-17 13:28:30 +02001136 /* Number of dwords is vtx_size * (num_vertices - 1)
1137 * PRIM_WALK must be equal to 3 vertex data in embedded
1138 * in cmd stream */
Dave Airlie513bcb42009-09-23 16:56:27 +10001139 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
Jerome Glisse068a1172009-06-17 13:28:30 +02001140 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1141 return -EINVAL;
1142 }
Dave Airlie513bcb42009-09-23 16:56:27 +10001143 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
Jerome Glisse068a1172009-06-17 13:28:30 +02001144 track->immd_dwords = pkt->count;
Dave Airlie551ebd82009-09-01 15:25:57 +10001145 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001146 if (r) {
1147 return r;
1148 }
1149 break;
1150 case PACKET3_3D_DRAW_VBUF:
Dave Airlie513bcb42009-09-23 16:56:27 +10001151 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
Dave Airlie551ebd82009-09-01 15:25:57 +10001152 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001153 if (r) {
1154 return r;
1155 }
1156 break;
1157 case PACKET3_3D_DRAW_VBUF_2:
Dave Airlie513bcb42009-09-23 16:56:27 +10001158 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
Dave Airlie551ebd82009-09-01 15:25:57 +10001159 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001160 if (r) {
1161 return r;
1162 }
1163 break;
1164 case PACKET3_3D_DRAW_INDX:
Dave Airlie513bcb42009-09-23 16:56:27 +10001165 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
Dave Airlie551ebd82009-09-01 15:25:57 +10001166 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001167 if (r) {
1168 return r;
1169 }
1170 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001171 case PACKET3_3D_DRAW_INDX_2:
Dave Airlie513bcb42009-09-23 16:56:27 +10001172 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
Dave Airlie551ebd82009-09-01 15:25:57 +10001173 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001174 if (r) {
1175 return r;
1176 }
1177 break;
1178 case PACKET3_NOP:
1179 break;
1180 default:
1181 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1182 return -EINVAL;
1183 }
1184 return 0;
1185}
1186
1187int r300_cs_parse(struct radeon_cs_parser *p)
1188{
1189 struct radeon_cs_packet pkt;
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001190 struct r100_cs_track *track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001191 int r;
1192
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001193 track = kzalloc(sizeof(*track), GFP_KERNEL);
1194 r100_cs_track_clear(p->rdev, track);
1195 p->track = track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001196 do {
1197 r = r100_cs_packet_parse(p, &pkt, p->idx);
1198 if (r) {
1199 return r;
1200 }
1201 p->idx += pkt.count + 2;
1202 switch (pkt.type) {
1203 case PACKET_TYPE0:
1204 r = r100_cs_parse_packet0(p, &pkt,
Jerome Glisse068a1172009-06-17 13:28:30 +02001205 p->rdev->config.r300.reg_safe_bm,
1206 p->rdev->config.r300.reg_safe_bm_size,
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001207 &r300_packet0_check);
1208 break;
1209 case PACKET_TYPE2:
1210 break;
1211 case PACKET_TYPE3:
1212 r = r300_packet3_check(p, &pkt);
1213 break;
1214 default:
1215 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1216 return -EINVAL;
1217 }
1218 if (r) {
1219 return r;
1220 }
1221 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1222 return 0;
1223}
Jerome Glisse068a1172009-06-17 13:28:30 +02001224
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001225void r300_set_reg_safe(struct radeon_device *rdev)
Jerome Glisse068a1172009-06-17 13:28:30 +02001226{
1227 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1228 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001229}
1230
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001231void r300_mc_program(struct radeon_device *rdev)
1232{
1233 struct r100_mc_save save;
1234 int r;
1235
1236 r = r100_debugfs_mc_info_init(rdev);
1237 if (r) {
1238 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1239 }
1240
1241 /* Stops all mc clients */
1242 r100_mc_stop(rdev, &save);
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001243 if (rdev->flags & RADEON_IS_AGP) {
1244 WREG32(R_00014C_MC_AGP_LOCATION,
1245 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1246 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1247 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1248 WREG32(R_00015C_AGP_BASE_2,
1249 upper_32_bits(rdev->mc.agp_base) & 0xff);
1250 } else {
1251 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1252 WREG32(R_000170_AGP_BASE, 0);
1253 WREG32(R_00015C_AGP_BASE_2, 0);
1254 }
1255 /* Wait for mc idle */
1256 if (r300_mc_wait_for_idle(rdev))
1257 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1258 /* Program MC, should be a 32bits limited address space */
1259 WREG32(R_000148_MC_FB_LOCATION,
1260 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1261 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1262 r100_mc_resume(rdev, &save);
1263}
Jerome Glisseca6ffc62009-10-01 10:20:52 +02001264
1265void r300_clock_startup(struct radeon_device *rdev)
1266{
1267 u32 tmp;
1268
1269 if (radeon_dynclks != -1 && radeon_dynclks)
1270 radeon_legacy_set_clock_gating(rdev, 1);
1271 /* We need to force on some of the block */
1272 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1273 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1274 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1275 tmp |= S_00000D_FORCE_VAP(1);
1276 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1277}
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001278
1279static int r300_startup(struct radeon_device *rdev)
1280{
1281 int r;
1282
Alex Deucher92cde002009-12-04 10:55:12 -05001283 /* set common regs */
1284 r100_set_common_regs(rdev);
1285 /* program mc */
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001286 r300_mc_program(rdev);
1287 /* Resume clock */
1288 r300_clock_startup(rdev);
1289 /* Initialize GPU configuration (# pipes, ...) */
1290 r300_gpu_init(rdev);
1291 /* Initialize GART (initialize after TTM so we can allocate
1292 * memory through TTM but finalize after TTM) */
1293 if (rdev->flags & RADEON_IS_PCIE) {
1294 r = rv370_pcie_gart_enable(rdev);
1295 if (r)
1296 return r;
1297 }
Dave Airlie17e15b02009-11-05 15:36:53 +10001298
1299 if (rdev->family == CHIP_R300 ||
1300 rdev->family == CHIP_R350 ||
1301 rdev->family == CHIP_RV350)
1302 r100_enable_bm(rdev);
1303
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001304 if (rdev->flags & RADEON_IS_PCI) {
1305 r = r100_pci_gart_enable(rdev);
1306 if (r)
1307 return r;
1308 }
1309 /* Enable IRQ */
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001310 r100_irq_set(rdev);
Jerome Glissecafe6602010-01-07 12:39:21 +01001311 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001312 /* 1M ring buffer */
1313 r = r100_cp_init(rdev, 1024 * 1024);
1314 if (r) {
1315 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
1316 return r;
1317 }
1318 r = r100_wb_init(rdev);
1319 if (r)
1320 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
1321 r = r100_ib_init(rdev);
1322 if (r) {
1323 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1324 return r;
1325 }
1326 return 0;
1327}
1328
1329int r300_resume(struct radeon_device *rdev)
1330{
1331 /* Make sur GART are not working */
1332 if (rdev->flags & RADEON_IS_PCIE)
1333 rv370_pcie_gart_disable(rdev);
1334 if (rdev->flags & RADEON_IS_PCI)
1335 r100_pci_gart_disable(rdev);
1336 /* Resume clock before doing reset */
1337 r300_clock_startup(rdev);
1338 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1339 if (radeon_gpu_reset(rdev)) {
1340 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1341 RREG32(R_000E40_RBBM_STATUS),
1342 RREG32(R_0007C0_CP_STAT));
1343 }
1344 /* post */
1345 radeon_combios_asic_init(rdev->ddev);
1346 /* Resume clock after posting */
1347 r300_clock_startup(rdev);
Dave Airlie550e2d92009-12-09 14:15:38 +10001348 /* Initialize surface registers */
1349 radeon_surface_init(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001350 return r300_startup(rdev);
1351}
1352
1353int r300_suspend(struct radeon_device *rdev)
1354{
1355 r100_cp_disable(rdev);
1356 r100_wb_disable(rdev);
1357 r100_irq_disable(rdev);
1358 if (rdev->flags & RADEON_IS_PCIE)
1359 rv370_pcie_gart_disable(rdev);
1360 if (rdev->flags & RADEON_IS_PCI)
1361 r100_pci_gart_disable(rdev);
1362 return 0;
1363}
1364
1365void r300_fini(struct radeon_device *rdev)
1366{
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001367 r100_cp_fini(rdev);
1368 r100_wb_fini(rdev);
1369 r100_ib_fini(rdev);
1370 radeon_gem_fini(rdev);
1371 if (rdev->flags & RADEON_IS_PCIE)
1372 rv370_pcie_gart_fini(rdev);
1373 if (rdev->flags & RADEON_IS_PCI)
1374 r100_pci_gart_fini(rdev);
Jerome Glissed0269ed2010-01-07 16:08:32 +01001375 radeon_agp_fini(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001376 radeon_irq_kms_fini(rdev);
1377 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01001378 radeon_bo_fini(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001379 radeon_atombios_fini(rdev);
1380 kfree(rdev->bios);
1381 rdev->bios = NULL;
1382}
1383
1384int r300_init(struct radeon_device *rdev)
1385{
1386 int r;
1387
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001388 /* Disable VGA */
1389 r100_vga_render_disable(rdev);
1390 /* Initialize scratch registers */
1391 radeon_scratch_init(rdev);
1392 /* Initialize surface registers */
1393 radeon_surface_init(rdev);
1394 /* TODO: disable VGA need to use VGA request */
1395 /* BIOS*/
1396 if (!radeon_get_bios(rdev)) {
1397 if (ASIC_IS_AVIVO(rdev))
1398 return -EINVAL;
1399 }
1400 if (rdev->is_atom_bios) {
1401 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1402 return -EINVAL;
1403 } else {
1404 r = radeon_combios_init(rdev);
1405 if (r)
1406 return r;
1407 }
1408 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1409 if (radeon_gpu_reset(rdev)) {
1410 dev_warn(rdev->dev,
1411 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1412 RREG32(R_000E40_RBBM_STATUS),
1413 RREG32(R_0007C0_CP_STAT));
1414 }
1415 /* check if cards are posted or not */
Dave Airlie72542d72009-12-01 14:06:31 +10001416 if (radeon_boot_test_post_card(rdev) == false)
1417 return -EINVAL;
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001418 /* Set asic errata */
1419 r300_errata(rdev);
1420 /* Initialize clocks */
1421 radeon_get_clock_info(rdev->ddev);
Rafał Miłecki62340772009-12-15 21:46:58 +01001422 /* Initialize power management */
1423 radeon_pm_init(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001424 /* Get vram informations */
1425 r300_vram_info(rdev);
1426 /* Initialize memory controller (also test AGP) */
1427 r = r420_mc_init(rdev);
1428 if (r)
1429 return r;
1430 /* Fence driver */
1431 r = radeon_fence_driver_init(rdev);
1432 if (r)
1433 return r;
1434 r = radeon_irq_kms_init(rdev);
1435 if (r)
1436 return r;
1437 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01001438 r = radeon_bo_init(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001439 if (r)
1440 return r;
1441 if (rdev->flags & RADEON_IS_PCIE) {
1442 r = rv370_pcie_gart_init(rdev);
1443 if (r)
1444 return r;
1445 }
1446 if (rdev->flags & RADEON_IS_PCI) {
1447 r = r100_pci_gart_init(rdev);
1448 if (r)
1449 return r;
1450 }
1451 r300_set_reg_safe(rdev);
1452 rdev->accel_working = true;
1453 r = r300_startup(rdev);
1454 if (r) {
1455 /* Somethings want wront with the accel init stop accel */
1456 dev_err(rdev->dev, "Disabling GPU acceleration\n");
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001457 r100_cp_fini(rdev);
1458 r100_wb_fini(rdev);
1459 r100_ib_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01001460 radeon_irq_kms_fini(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001461 if (rdev->flags & RADEON_IS_PCIE)
1462 rv370_pcie_gart_fini(rdev);
1463 if (rdev->flags & RADEON_IS_PCI)
1464 r100_pci_gart_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01001465 radeon_agp_fini(rdev);
Jerome Glisse207bf9e2009-09-30 15:35:32 +02001466 rdev->accel_working = false;
1467 }
1468 return 0;
1469}