2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
36 #include "radeon_drm.h"
37 #include "r100_track.h"
40 #include "r300_reg_safe.h"
42 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
45 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
46 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
47 * However, scheduling such write to the ring seems harmless, i suspect
48 * the CP read collide with the flush somehow, or maybe the MC, hard to
49 * tell. (Jerome Glisse)
53 * rv370,rv380 PCIE GART
55 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
57 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
62 /* Workaround HW bug do flush 2 times */
63 for (i = 0; i < 2; i++) {
64 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
66 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
67 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
72 #define R300_PTE_WRITEABLE (1 << 2)
73 #define R300_PTE_READABLE (1 << 3)
75 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
77 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
79 if (i < 0 || i > rdev->gart.num_gpu_pages) {
82 addr = (lower_32_bits(addr) >> 8) |
83 ((upper_32_bits(addr) & 0xff) << 24) |
84 R300_PTE_WRITEABLE | R300_PTE_READABLE;
85 /* on x86 we want this to be CPU endian, on powerpc
86 * on powerpc without HW swappers, it'll get swapped on way
87 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
88 writel(addr, ((void __iomem *)ptr) + (i * 4));
92 int rv370_pcie_gart_init(struct radeon_device *rdev)
96 if (rdev->gart.table.vram.robj) {
97 WARN(1, "RV370 PCIE GART already initialized\n");
100 /* Initialize common gart structure */
101 r = radeon_gart_init(rdev);
104 r = rv370_debugfs_pcie_gart_info_init(rdev);
106 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
107 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
108 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
109 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
110 return radeon_gart_table_vram_alloc(rdev);
113 int rv370_pcie_gart_enable(struct radeon_device *rdev)
119 if (rdev->gart.table.vram.robj == NULL) {
120 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
123 r = radeon_gart_table_vram_pin(rdev);
126 radeon_gart_restore(rdev);
127 /* discard memory request outside of configured range */
128 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
130 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
131 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
132 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
133 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
134 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
135 table_addr = rdev->gart.table_addr;
136 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
137 /* FIXME: setup default page */
138 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
139 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
141 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
143 tmp |= RADEON_PCIE_TX_GART_EN;
144 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
145 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
146 rv370_pcie_gart_tlb_flush(rdev);
147 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
148 (unsigned)(rdev->mc.gtt_size >> 20),
149 (unsigned long long)table_addr);
150 rdev->gart.ready = true;
154 void rv370_pcie_gart_disable(struct radeon_device *rdev)
159 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
160 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
161 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
162 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
163 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
164 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
165 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
166 if (rdev->gart.table.vram.robj) {
167 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
168 if (likely(r == 0)) {
169 radeon_bo_kunmap(rdev->gart.table.vram.robj);
170 radeon_bo_unpin(rdev->gart.table.vram.robj);
171 radeon_bo_unreserve(rdev->gart.table.vram.robj);
176 void rv370_pcie_gart_fini(struct radeon_device *rdev)
178 radeon_gart_fini(rdev);
179 rv370_pcie_gart_disable(rdev);
180 radeon_gart_table_vram_free(rdev);
183 void r300_fence_ring_emit(struct radeon_device *rdev,
184 struct radeon_fence *fence)
186 /* Who ever call radeon_fence_emit should call ring_lock and ask
187 * for enough space (today caller are ib schedule and buffer move) */
188 /* Write SC register so SC & US assert idle */
189 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
190 radeon_ring_write(rdev, 0);
191 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
192 radeon_ring_write(rdev, 0);
194 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
195 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_ZC_FLUSH);
198 /* Wait until IDLE & CLEAN */
199 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
200 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
201 RADEON_WAIT_2D_IDLECLEAN |
202 RADEON_WAIT_DMA_GUI_IDLE));
203 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
204 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
205 RADEON_HDP_READ_BUFFER_INVALIDATE);
206 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
207 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
208 /* Emit fence sequence & fire IRQ */
209 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
210 radeon_ring_write(rdev, fence->seq);
211 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
212 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
215 void r300_ring_start(struct radeon_device *rdev)
217 unsigned gb_tile_config;
220 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
221 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
222 switch(rdev->num_gb_pipes) {
224 gb_tile_config |= R300_PIPE_COUNT_R300;
227 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
230 gb_tile_config |= R300_PIPE_COUNT_R420;
234 gb_tile_config |= R300_PIPE_COUNT_RV350;
238 r = radeon_ring_lock(rdev, 64);
242 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
243 radeon_ring_write(rdev,
244 RADEON_ISYNC_ANY2D_IDLE3D |
245 RADEON_ISYNC_ANY3D_IDLE2D |
246 RADEON_ISYNC_WAIT_IDLEGUI |
247 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
248 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
249 radeon_ring_write(rdev, gb_tile_config);
250 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
251 radeon_ring_write(rdev,
252 RADEON_WAIT_2D_IDLECLEAN |
253 RADEON_WAIT_3D_IDLECLEAN);
254 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
255 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
256 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
257 radeon_ring_write(rdev, 0);
258 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
259 radeon_ring_write(rdev, 0);
260 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
261 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
262 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
263 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
264 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
265 radeon_ring_write(rdev,
266 RADEON_WAIT_2D_IDLECLEAN |
267 RADEON_WAIT_3D_IDLECLEAN);
268 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
269 radeon_ring_write(rdev, 0);
270 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
271 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
272 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
273 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
274 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
275 radeon_ring_write(rdev,
276 ((6 << R300_MS_X0_SHIFT) |
277 (6 << R300_MS_Y0_SHIFT) |
278 (6 << R300_MS_X1_SHIFT) |
279 (6 << R300_MS_Y1_SHIFT) |
280 (6 << R300_MS_X2_SHIFT) |
281 (6 << R300_MS_Y2_SHIFT) |
282 (6 << R300_MSBD0_Y_SHIFT) |
283 (6 << R300_MSBD0_X_SHIFT)));
284 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
285 radeon_ring_write(rdev,
286 ((6 << R300_MS_X3_SHIFT) |
287 (6 << R300_MS_Y3_SHIFT) |
288 (6 << R300_MS_X4_SHIFT) |
289 (6 << R300_MS_Y4_SHIFT) |
290 (6 << R300_MS_X5_SHIFT) |
291 (6 << R300_MS_Y5_SHIFT) |
292 (6 << R300_MSBD1_SHIFT)));
293 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
294 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
295 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
296 radeon_ring_write(rdev,
297 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
298 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
299 radeon_ring_write(rdev,
300 R300_GEOMETRY_ROUND_NEAREST |
301 R300_COLOR_ROUND_NEAREST);
302 radeon_ring_unlock_commit(rdev);
305 void r300_errata(struct radeon_device *rdev)
307 rdev->pll_errata = 0;
309 if (rdev->family == CHIP_R300 &&
310 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
311 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
315 int r300_mc_wait_for_idle(struct radeon_device *rdev)
320 for (i = 0; i < rdev->usec_timeout; i++) {
322 tmp = RREG32(RADEON_MC_STATUS);
323 if (tmp & R300_MC_IDLE) {
331 void r300_gpu_init(struct radeon_device *rdev)
333 uint32_t gb_tile_config, tmp;
335 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
336 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
338 rdev->num_gb_pipes = 2;
340 /* rv350,rv370,rv380,r300 AD, r350 AH */
341 rdev->num_gb_pipes = 1;
343 rdev->num_z_pipes = 1;
344 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
345 switch (rdev->num_gb_pipes) {
347 gb_tile_config |= R300_PIPE_COUNT_R300;
350 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
353 gb_tile_config |= R300_PIPE_COUNT_R420;
357 gb_tile_config |= R300_PIPE_COUNT_RV350;
360 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
362 if (r100_gui_wait_for_idle(rdev)) {
363 printk(KERN_WARNING "Failed to wait GUI idle while "
364 "programming pipes. Bad things might happen.\n");
367 tmp = RREG32(R300_DST_PIPE_CONFIG);
368 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
370 WREG32(R300_RB2D_DSTCACHE_MODE,
371 R300_DC_AUTOFLUSH_ENABLE |
372 R300_DC_DC_DISABLE_IGNORE_PE);
374 if (r100_gui_wait_for_idle(rdev)) {
375 printk(KERN_WARNING "Failed to wait GUI idle while "
376 "programming pipes. Bad things might happen.\n");
378 if (r300_mc_wait_for_idle(rdev)) {
379 printk(KERN_WARNING "Failed to wait MC idle while "
380 "programming pipes. Bad things might happen.\n");
382 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
383 rdev->num_gb_pipes, rdev->num_z_pipes);
386 bool r300_gpu_is_lockup(struct radeon_device *rdev)
391 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
392 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
393 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
396 /* force CP activities */
397 r = radeon_ring_lock(rdev, 2);
400 radeon_ring_write(rdev, 0x80000000);
401 radeon_ring_write(rdev, 0x80000000);
402 radeon_ring_unlock_commit(rdev);
404 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
405 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
408 int r300_asic_reset(struct radeon_device *rdev)
410 struct r100_mc_save save;
414 status = RREG32(R_000E40_RBBM_STATUS);
415 if (!G_000E40_GUI_ACTIVE(status)) {
418 r100_mc_stop(rdev, &save);
419 status = RREG32(R_000E40_RBBM_STATUS);
420 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
422 WREG32(RADEON_CP_CSQ_CNTL, 0);
423 tmp = RREG32(RADEON_CP_RB_CNTL);
424 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
425 WREG32(RADEON_CP_RB_RPTR_WR, 0);
426 WREG32(RADEON_CP_RB_WPTR, 0);
427 WREG32(RADEON_CP_RB_CNTL, tmp);
429 pci_save_state(rdev->pdev);
430 /* disable bus mastering */
431 r100_bm_disable(rdev);
432 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
433 S_0000F0_SOFT_RESET_GA(1));
434 RREG32(R_0000F0_RBBM_SOFT_RESET);
436 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
438 status = RREG32(R_000E40_RBBM_STATUS);
439 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
440 /* resetting the CP seems to be problematic sometimes it end up
441 * hard locking the computer, but it's necessary for successful
442 * reset more test & playing is needed on R3XX/R4XX to find a
443 * reliable (if any solution)
445 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
446 RREG32(R_0000F0_RBBM_SOFT_RESET);
448 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
450 status = RREG32(R_000E40_RBBM_STATUS);
451 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
452 /* restore PCI & busmastering */
453 pci_restore_state(rdev->pdev);
454 r100_enable_bm(rdev);
455 /* Check if GPU is idle */
456 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
457 dev_err(rdev->dev, "failed to reset GPU\n");
458 rdev->gpu_lockup = true;
461 dev_info(rdev->dev, "GPU reset succeed\n");
462 r100_mc_resume(rdev, &save);
467 * r300,r350,rv350,rv380 VRAM info
469 void r300_mc_init(struct radeon_device *rdev)
474 /* DDR for all card after R300 & IGP */
475 rdev->mc.vram_is_ddr = true;
476 tmp = RREG32(RADEON_MEM_CNTL);
477 tmp &= R300_MEM_NUM_CHANNELS_MASK;
479 case 0: rdev->mc.vram_width = 64; break;
480 case 1: rdev->mc.vram_width = 128; break;
481 case 2: rdev->mc.vram_width = 256; break;
482 default: rdev->mc.vram_width = 128; break;
484 r100_vram_init_sizes(rdev);
485 base = rdev->mc.aper_base;
486 if (rdev->flags & RADEON_IS_IGP)
487 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
488 radeon_vram_location(rdev, &rdev->mc, base);
489 rdev->mc.gtt_base_align = 0;
490 if (!(rdev->flags & RADEON_IS_AGP))
491 radeon_gtt_location(rdev, &rdev->mc);
492 radeon_update_bandwidth_info(rdev);
495 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
497 uint32_t link_width_cntl, mask;
499 if (rdev->flags & RADEON_IS_IGP)
502 if (!(rdev->flags & RADEON_IS_PCIE))
505 /* FIXME wait for idle */
509 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
512 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
515 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
518 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
521 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
524 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
528 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
532 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
534 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
535 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
538 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
539 RADEON_PCIE_LC_RECONFIG_NOW |
540 RADEON_PCIE_LC_RECONFIG_LATER |
541 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
542 link_width_cntl |= mask;
543 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
544 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
545 RADEON_PCIE_LC_RECONFIG_NOW));
547 /* wait for lane set to complete */
548 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
549 while (link_width_cntl == 0xffffffff)
550 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
554 int rv370_get_pcie_lanes(struct radeon_device *rdev)
558 if (rdev->flags & RADEON_IS_IGP)
561 if (!(rdev->flags & RADEON_IS_PCIE))
564 /* FIXME wait for idle */
566 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
568 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
569 case RADEON_PCIE_LC_LINK_WIDTH_X0:
571 case RADEON_PCIE_LC_LINK_WIDTH_X1:
573 case RADEON_PCIE_LC_LINK_WIDTH_X2:
575 case RADEON_PCIE_LC_LINK_WIDTH_X4:
577 case RADEON_PCIE_LC_LINK_WIDTH_X8:
579 case RADEON_PCIE_LC_LINK_WIDTH_X16:
585 #if defined(CONFIG_DEBUG_FS)
586 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
588 struct drm_info_node *node = (struct drm_info_node *) m->private;
589 struct drm_device *dev = node->minor->dev;
590 struct radeon_device *rdev = dev->dev_private;
593 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
594 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
595 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
596 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
597 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
598 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
599 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
600 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
601 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
602 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
603 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
604 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
605 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
606 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
610 static struct drm_info_list rv370_pcie_gart_info_list[] = {
611 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
615 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
617 #if defined(CONFIG_DEBUG_FS)
618 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
624 static int r300_packet0_check(struct radeon_cs_parser *p,
625 struct radeon_cs_packet *pkt,
626 unsigned idx, unsigned reg)
628 struct radeon_cs_reloc *reloc;
629 struct r100_cs_track *track;
630 volatile uint32_t *ib;
631 uint32_t tmp, tile_flags = 0;
637 track = (struct r100_cs_track *)p->track;
638 idx_value = radeon_get_ib_value(p, idx);
641 case AVIVO_D1MODE_VLINE_START_END:
642 case RADEON_CRTC_GUI_TRIG_VLINE:
643 r = r100_cs_packet_parse_vline(p);
645 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
647 r100_cs_dump_packet(p, pkt);
651 case RADEON_DST_PITCH_OFFSET:
652 case RADEON_SRC_PITCH_OFFSET:
653 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
657 case R300_RB3D_COLOROFFSET0:
658 case R300_RB3D_COLOROFFSET1:
659 case R300_RB3D_COLOROFFSET2:
660 case R300_RB3D_COLOROFFSET3:
661 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
662 r = r100_cs_packet_next_reloc(p, &reloc);
664 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
666 r100_cs_dump_packet(p, pkt);
669 track->cb[i].robj = reloc->robj;
670 track->cb[i].offset = idx_value;
671 track->cb_dirty = true;
672 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
674 case R300_ZB_DEPTHOFFSET:
675 r = r100_cs_packet_next_reloc(p, &reloc);
677 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
679 r100_cs_dump_packet(p, pkt);
682 track->zb.robj = reloc->robj;
683 track->zb.offset = idx_value;
684 track->zb_dirty = true;
685 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
687 case R300_TX_OFFSET_0:
688 case R300_TX_OFFSET_0+4:
689 case R300_TX_OFFSET_0+8:
690 case R300_TX_OFFSET_0+12:
691 case R300_TX_OFFSET_0+16:
692 case R300_TX_OFFSET_0+20:
693 case R300_TX_OFFSET_0+24:
694 case R300_TX_OFFSET_0+28:
695 case R300_TX_OFFSET_0+32:
696 case R300_TX_OFFSET_0+36:
697 case R300_TX_OFFSET_0+40:
698 case R300_TX_OFFSET_0+44:
699 case R300_TX_OFFSET_0+48:
700 case R300_TX_OFFSET_0+52:
701 case R300_TX_OFFSET_0+56:
702 case R300_TX_OFFSET_0+60:
703 i = (reg - R300_TX_OFFSET_0) >> 2;
704 r = r100_cs_packet_next_reloc(p, &reloc);
706 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
708 r100_cs_dump_packet(p, pkt);
712 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
713 tile_flags |= R300_TXO_MACRO_TILE;
714 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
715 tile_flags |= R300_TXO_MICRO_TILE;
716 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
717 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
719 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
722 track->textures[i].robj = reloc->robj;
723 track->tex_dirty = true;
725 /* Tracked registers */
728 track->vap_vf_cntl = idx_value;
732 track->vtx_size = idx_value & 0x7F;
735 /* VAP_VF_MAX_VTX_INDX */
736 track->max_indx = idx_value & 0x00FFFFFFUL;
739 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
740 if (p->rdev->family < CHIP_RV515)
742 track->vap_alt_nverts = idx_value & 0xFFFFFF;
746 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
747 if (p->rdev->family < CHIP_RV515) {
750 track->cb_dirty = true;
751 track->zb_dirty = true;
755 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
756 p->rdev->cmask_filp != p->filp) {
757 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
760 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
761 track->cb_dirty = true;
767 /* RB3D_COLORPITCH0 */
768 /* RB3D_COLORPITCH1 */
769 /* RB3D_COLORPITCH2 */
770 /* RB3D_COLORPITCH3 */
771 r = r100_cs_packet_next_reloc(p, &reloc);
773 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
775 r100_cs_dump_packet(p, pkt);
779 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
780 tile_flags |= R300_COLOR_TILE_ENABLE;
781 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
782 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
783 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
784 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
786 tmp = idx_value & ~(0x7 << 16);
789 i = (reg - 0x4E38) >> 2;
790 track->cb[i].pitch = idx_value & 0x3FFE;
791 switch (((idx_value >> 21) & 0xF)) {
795 track->cb[i].cpp = 1;
801 track->cb[i].cpp = 2;
804 if (p->rdev->family < CHIP_RV515) {
805 DRM_ERROR("Invalid color buffer format (%d)!\n",
806 ((idx_value >> 21) & 0xF));
811 track->cb[i].cpp = 4;
814 track->cb[i].cpp = 8;
817 track->cb[i].cpp = 16;
820 DRM_ERROR("Invalid color buffer format (%d) !\n",
821 ((idx_value >> 21) & 0xF));
824 track->cb_dirty = true;
829 track->z_enabled = true;
831 track->z_enabled = false;
833 track->zb_dirty = true;
837 switch ((idx_value & 0xF)) {
846 DRM_ERROR("Invalid z buffer format (%d) !\n",
850 track->zb_dirty = true;
854 r = r100_cs_packet_next_reloc(p, &reloc);
856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
858 r100_cs_dump_packet(p, pkt);
862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;
866 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
867 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
869 tmp = idx_value & ~(0x7 << 16);
873 track->zb.pitch = idx_value & 0x3FFC;
874 track->zb_dirty = true;
878 for (i = 0; i < 16; i++) {
881 enabled = !!(idx_value & (1 << i));
882 track->textures[i].enabled = enabled;
884 track->tex_dirty = true;
902 /* TX_FORMAT1_[0-15] */
903 i = (reg - 0x44C0) >> 2;
904 tmp = (idx_value >> 25) & 0x3;
905 track->textures[i].tex_coord_type = tmp;
906 switch ((idx_value & 0x1F)) {
907 case R300_TX_FORMAT_X8:
908 case R300_TX_FORMAT_Y4X4:
909 case R300_TX_FORMAT_Z3Y3X2:
910 track->textures[i].cpp = 1;
911 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
913 case R300_TX_FORMAT_X16:
914 case R300_TX_FORMAT_FL_I16:
915 case R300_TX_FORMAT_Y8X8:
916 case R300_TX_FORMAT_Z5Y6X5:
917 case R300_TX_FORMAT_Z6Y5X5:
918 case R300_TX_FORMAT_W4Z4Y4X4:
919 case R300_TX_FORMAT_W1Z5Y5X5:
920 case R300_TX_FORMAT_D3DMFT_CxV8U8:
921 case R300_TX_FORMAT_B8G8_B8G8:
922 case R300_TX_FORMAT_G8R8_G8B8:
923 track->textures[i].cpp = 2;
924 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
926 case R300_TX_FORMAT_Y16X16:
927 case R300_TX_FORMAT_FL_I16A16:
928 case R300_TX_FORMAT_Z11Y11X10:
929 case R300_TX_FORMAT_Z10Y11X11:
930 case R300_TX_FORMAT_W8Z8Y8X8:
931 case R300_TX_FORMAT_W2Z10Y10X10:
933 case R300_TX_FORMAT_FL_I32:
935 track->textures[i].cpp = 4;
936 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
938 case R300_TX_FORMAT_W16Z16Y16X16:
939 case R300_TX_FORMAT_FL_R16G16B16A16:
940 case R300_TX_FORMAT_FL_I32A32:
941 track->textures[i].cpp = 8;
942 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
944 case R300_TX_FORMAT_FL_R32G32B32A32:
945 track->textures[i].cpp = 16;
946 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
948 case R300_TX_FORMAT_DXT1:
949 track->textures[i].cpp = 1;
950 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
952 case R300_TX_FORMAT_ATI2N:
953 if (p->rdev->family < CHIP_R420) {
954 DRM_ERROR("Invalid texture format %u\n",
958 /* The same rules apply as for DXT3/5. */
960 case R300_TX_FORMAT_DXT3:
961 case R300_TX_FORMAT_DXT5:
962 track->textures[i].cpp = 1;
963 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
966 DRM_ERROR("Invalid texture format %u\n",
970 track->tex_dirty = true;
988 /* TX_FILTER0_[0-15] */
989 i = (reg - 0x4400) >> 2;
990 tmp = idx_value & 0x7;
991 if (tmp == 2 || tmp == 4 || tmp == 6) {
992 track->textures[i].roundup_w = false;
994 tmp = (idx_value >> 3) & 0x7;
995 if (tmp == 2 || tmp == 4 || tmp == 6) {
996 track->textures[i].roundup_h = false;
998 track->tex_dirty = true;
1016 /* TX_FORMAT2_[0-15] */
1017 i = (reg - 0x4500) >> 2;
1018 tmp = idx_value & 0x3FFF;
1019 track->textures[i].pitch = tmp + 1;
1020 if (p->rdev->family >= CHIP_RV515) {
1021 tmp = ((idx_value >> 15) & 1) << 11;
1022 track->textures[i].width_11 = tmp;
1023 tmp = ((idx_value >> 16) & 1) << 11;
1024 track->textures[i].height_11 = tmp;
1027 if (idx_value & (1 << 14)) {
1028 /* The same rules apply as for DXT1. */
1029 track->textures[i].compress_format =
1030 R100_TRACK_COMP_DXT1;
1032 } else if (idx_value & (1 << 14)) {
1033 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1036 track->tex_dirty = true;
1054 /* TX_FORMAT0_[0-15] */
1055 i = (reg - 0x4480) >> 2;
1056 tmp = idx_value & 0x7FF;
1057 track->textures[i].width = tmp + 1;
1058 tmp = (idx_value >> 11) & 0x7FF;
1059 track->textures[i].height = tmp + 1;
1060 tmp = (idx_value >> 26) & 0xF;
1061 track->textures[i].num_levels = tmp;
1062 tmp = idx_value & (1 << 31);
1063 track->textures[i].use_pitch = !!tmp;
1064 tmp = (idx_value >> 22) & 0xF;
1065 track->textures[i].txdepth = tmp;
1066 track->tex_dirty = true;
1068 case R300_ZB_ZPASS_ADDR:
1069 r = r100_cs_packet_next_reloc(p, &reloc);
1071 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1073 r100_cs_dump_packet(p, pkt);
1076 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1079 /* RB3D_COLOR_CHANNEL_MASK */
1080 track->color_channel_mask = idx_value;
1081 track->cb_dirty = true;
1085 /* r300c emits this register - we need to disable hyperz for it
1086 * without complaining */
1087 if (p->rdev->hyperz_filp != p->filp) {
1088 if (idx_value & 0x1)
1089 ib[idx] = idx_value & ~1;
1094 track->zb_cb_clear = !!(idx_value & (1 << 5));
1095 track->cb_dirty = true;
1096 track->zb_dirty = true;
1097 if (p->rdev->hyperz_filp != p->filp) {
1098 if (idx_value & (R300_HIZ_ENABLE |
1099 R300_RD_COMP_ENABLE |
1100 R300_WR_COMP_ENABLE |
1101 R300_FAST_FILL_ENABLE))
1106 /* RB3D_BLENDCNTL */
1107 track->blend_read_enable = !!(idx_value & (1 << 2));
1108 track->cb_dirty = true;
1110 case R300_RB3D_AARESOLVE_OFFSET:
1111 r = r100_cs_packet_next_reloc(p, &reloc);
1113 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1115 r100_cs_dump_packet(p, pkt);
1118 track->aa.robj = reloc->robj;
1119 track->aa.offset = idx_value;
1120 track->aa_dirty = true;
1121 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1123 case R300_RB3D_AARESOLVE_PITCH:
1124 track->aa.pitch = idx_value & 0x3FFE;
1125 track->aa_dirty = true;
1127 case R300_RB3D_AARESOLVE_CTL:
1128 track->aaresolve = idx_value & 0x1;
1129 track->aa_dirty = true;
1131 case 0x4f30: /* ZB_MASK_OFFSET */
1132 case 0x4f34: /* ZB_ZMASK_PITCH */
1133 case 0x4f44: /* ZB_HIZ_OFFSET */
1134 case 0x4f54: /* ZB_HIZ_PITCH */
1135 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1139 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1141 /* GB_Z_PEQ_CONFIG */
1142 if (p->rdev->family >= CHIP_RV350)
1147 /* valid register only on RV530 */
1148 if (p->rdev->family == CHIP_RV530)
1150 /* fallthrough do not move */
1156 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
1157 reg, idx, idx_value);
1161 static int r300_packet3_check(struct radeon_cs_parser *p,
1162 struct radeon_cs_packet *pkt)
1164 struct radeon_cs_reloc *reloc;
1165 struct r100_cs_track *track;
1166 volatile uint32_t *ib;
1172 track = (struct r100_cs_track *)p->track;
1173 switch(pkt->opcode) {
1174 case PACKET3_3D_LOAD_VBPNTR:
1175 r = r100_packet3_load_vbpntr(p, pkt, idx);
1179 case PACKET3_INDX_BUFFER:
1180 r = r100_cs_packet_next_reloc(p, &reloc);
1182 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1183 r100_cs_dump_packet(p, pkt);
1186 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1187 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1193 case PACKET3_3D_DRAW_IMMD:
1194 /* Number of dwords is vtx_size * (num_vertices - 1)
1195 * PRIM_WALK must be equal to 3 vertex data in embedded
1197 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1198 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1201 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1202 track->immd_dwords = pkt->count - 1;
1203 r = r100_cs_track_check(p->rdev, track);
1208 case PACKET3_3D_DRAW_IMMD_2:
1209 /* Number of dwords is vtx_size * (num_vertices - 1)
1210 * PRIM_WALK must be equal to 3 vertex data in embedded
1212 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1213 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1216 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1217 track->immd_dwords = pkt->count;
1218 r = r100_cs_track_check(p->rdev, track);
1223 case PACKET3_3D_DRAW_VBUF:
1224 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1225 r = r100_cs_track_check(p->rdev, track);
1230 case PACKET3_3D_DRAW_VBUF_2:
1231 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1232 r = r100_cs_track_check(p->rdev, track);
1237 case PACKET3_3D_DRAW_INDX:
1238 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1239 r = r100_cs_track_check(p->rdev, track);
1244 case PACKET3_3D_DRAW_INDX_2:
1245 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1246 r = r100_cs_track_check(p->rdev, track);
1251 case PACKET3_3D_CLEAR_HIZ:
1252 case PACKET3_3D_CLEAR_ZMASK:
1253 if (p->rdev->hyperz_filp != p->filp)
1256 case PACKET3_3D_CLEAR_CMASK:
1257 if (p->rdev->cmask_filp != p->filp)
1263 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1269 int r300_cs_parse(struct radeon_cs_parser *p)
1271 struct radeon_cs_packet pkt;
1272 struct r100_cs_track *track;
1275 track = kzalloc(sizeof(*track), GFP_KERNEL);
1278 r100_cs_track_clear(p->rdev, track);
1281 r = r100_cs_packet_parse(p, &pkt, p->idx);
1285 p->idx += pkt.count + 2;
1288 r = r100_cs_parse_packet0(p, &pkt,
1289 p->rdev->config.r300.reg_safe_bm,
1290 p->rdev->config.r300.reg_safe_bm_size,
1291 &r300_packet0_check);
1296 r = r300_packet3_check(p, &pkt);
1299 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1305 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1309 void r300_set_reg_safe(struct radeon_device *rdev)
1311 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1312 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1315 void r300_mc_program(struct radeon_device *rdev)
1317 struct r100_mc_save save;
1320 r = r100_debugfs_mc_info_init(rdev);
1322 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1325 /* Stops all mc clients */
1326 r100_mc_stop(rdev, &save);
1327 if (rdev->flags & RADEON_IS_AGP) {
1328 WREG32(R_00014C_MC_AGP_LOCATION,
1329 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1330 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1331 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1332 WREG32(R_00015C_AGP_BASE_2,
1333 upper_32_bits(rdev->mc.agp_base) & 0xff);
1335 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1336 WREG32(R_000170_AGP_BASE, 0);
1337 WREG32(R_00015C_AGP_BASE_2, 0);
1339 /* Wait for mc idle */
1340 if (r300_mc_wait_for_idle(rdev))
1341 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1342 /* Program MC, should be a 32bits limited address space */
1343 WREG32(R_000148_MC_FB_LOCATION,
1344 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1345 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1346 r100_mc_resume(rdev, &save);
1349 void r300_clock_startup(struct radeon_device *rdev)
1353 if (radeon_dynclks != -1 && radeon_dynclks)
1354 radeon_legacy_set_clock_gating(rdev, 1);
1355 /* We need to force on some of the block */
1356 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1357 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1358 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1359 tmp |= S_00000D_FORCE_VAP(1);
1360 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1363 static int r300_startup(struct radeon_device *rdev)
1367 /* set common regs */
1368 r100_set_common_regs(rdev);
1370 r300_mc_program(rdev);
1372 r300_clock_startup(rdev);
1373 /* Initialize GPU configuration (# pipes, ...) */
1374 r300_gpu_init(rdev);
1375 /* Initialize GART (initialize after TTM so we can allocate
1376 * memory through TTM but finalize after TTM) */
1377 if (rdev->flags & RADEON_IS_PCIE) {
1378 r = rv370_pcie_gart_enable(rdev);
1383 if (rdev->family == CHIP_R300 ||
1384 rdev->family == CHIP_R350 ||
1385 rdev->family == CHIP_RV350)
1386 r100_enable_bm(rdev);
1388 if (rdev->flags & RADEON_IS_PCI) {
1389 r = r100_pci_gart_enable(rdev);
1394 /* allocate wb buffer */
1395 r = radeon_wb_init(rdev);
1401 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1402 /* 1M ring buffer */
1403 r = r100_cp_init(rdev, 1024 * 1024);
1405 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1408 r = r100_ib_init(rdev);
1410 dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
1416 int r300_resume(struct radeon_device *rdev)
1418 /* Make sur GART are not working */
1419 if (rdev->flags & RADEON_IS_PCIE)
1420 rv370_pcie_gart_disable(rdev);
1421 if (rdev->flags & RADEON_IS_PCI)
1422 r100_pci_gart_disable(rdev);
1423 /* Resume clock before doing reset */
1424 r300_clock_startup(rdev);
1425 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1426 if (radeon_asic_reset(rdev)) {
1427 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1428 RREG32(R_000E40_RBBM_STATUS),
1429 RREG32(R_0007C0_CP_STAT));
1432 radeon_combios_asic_init(rdev->ddev);
1433 /* Resume clock after posting */
1434 r300_clock_startup(rdev);
1435 /* Initialize surface registers */
1436 radeon_surface_init(rdev);
1437 return r300_startup(rdev);
1440 int r300_suspend(struct radeon_device *rdev)
1442 r100_cp_disable(rdev);
1443 radeon_wb_disable(rdev);
1444 r100_irq_disable(rdev);
1445 if (rdev->flags & RADEON_IS_PCIE)
1446 rv370_pcie_gart_disable(rdev);
1447 if (rdev->flags & RADEON_IS_PCI)
1448 r100_pci_gart_disable(rdev);
1452 void r300_fini(struct radeon_device *rdev)
1455 radeon_wb_fini(rdev);
1457 radeon_gem_fini(rdev);
1458 if (rdev->flags & RADEON_IS_PCIE)
1459 rv370_pcie_gart_fini(rdev);
1460 if (rdev->flags & RADEON_IS_PCI)
1461 r100_pci_gart_fini(rdev);
1462 radeon_agp_fini(rdev);
1463 radeon_irq_kms_fini(rdev);
1464 radeon_fence_driver_fini(rdev);
1465 radeon_bo_fini(rdev);
1466 radeon_atombios_fini(rdev);
1471 int r300_init(struct radeon_device *rdev)
1476 r100_vga_render_disable(rdev);
1477 /* Initialize scratch registers */
1478 radeon_scratch_init(rdev);
1479 /* Initialize surface registers */
1480 radeon_surface_init(rdev);
1481 /* TODO: disable VGA need to use VGA request */
1482 /* restore some register to sane defaults */
1483 r100_restore_sanity(rdev);
1485 if (!radeon_get_bios(rdev)) {
1486 if (ASIC_IS_AVIVO(rdev))
1489 if (rdev->is_atom_bios) {
1490 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1493 r = radeon_combios_init(rdev);
1497 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1498 if (radeon_asic_reset(rdev)) {
1500 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1501 RREG32(R_000E40_RBBM_STATUS),
1502 RREG32(R_0007C0_CP_STAT));
1504 /* check if cards are posted or not */
1505 if (radeon_boot_test_post_card(rdev) == false)
1507 /* Set asic errata */
1509 /* Initialize clocks */
1510 radeon_get_clock_info(rdev->ddev);
1511 /* initialize AGP */
1512 if (rdev->flags & RADEON_IS_AGP) {
1513 r = radeon_agp_init(rdev);
1515 radeon_agp_disable(rdev);
1518 /* initialize memory controller */
1521 r = radeon_fence_driver_init(rdev);
1524 r = radeon_irq_kms_init(rdev);
1527 /* Memory manager */
1528 r = radeon_bo_init(rdev);
1531 if (rdev->flags & RADEON_IS_PCIE) {
1532 r = rv370_pcie_gart_init(rdev);
1536 if (rdev->flags & RADEON_IS_PCI) {
1537 r = r100_pci_gart_init(rdev);
1541 r300_set_reg_safe(rdev);
1542 rdev->accel_working = true;
1543 r = r300_startup(rdev);
1545 /* Somethings want wront with the accel init stop accel */
1546 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1548 radeon_wb_fini(rdev);
1550 radeon_irq_kms_fini(rdev);
1551 if (rdev->flags & RADEON_IS_PCIE)
1552 rv370_pcie_gart_fini(rdev);
1553 if (rdev->flags & RADEON_IS_PCI)
1554 r100_pci_gart_fini(rdev);
1555 radeon_agp_fini(rdev);
1556 rdev->accel_working = false;