2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
36 #include "radeon_drm.h"
37 #include "r100_track.h"
40 #include "r300_reg_safe.h"
42 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
45 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
46 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
47 * However, scheduling such write to the ring seems harmless, i suspect
48 * the CP read collide with the flush somehow, or maybe the MC, hard to
49 * tell. (Jerome Glisse)
53 * rv370,rv380 PCIE GART
55 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
57 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
62 /* Workaround HW bug do flush 2 times */
63 for (i = 0; i < 2; i++) {
64 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
66 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
67 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
72 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
74 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
76 if (i < 0 || i > rdev->gart.num_gpu_pages) {
79 addr = (lower_32_bits(addr) >> 8) |
80 ((upper_32_bits(addr) & 0xff) << 24) |
82 /* on x86 we want this to be CPU endian, on powerpc
83 * on powerpc without HW swappers, it'll get swapped on way
84 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
85 writel(addr, ((void __iomem *)ptr) + (i * 4));
89 int rv370_pcie_gart_init(struct radeon_device *rdev)
93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n");
97 /* Initialize common gart structure */
98 r = radeon_gart_init(rdev);
101 r = rv370_debugfs_pcie_gart_info_init(rdev);
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
104 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
105 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
106 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
107 return radeon_gart_table_vram_alloc(rdev);
110 int rv370_pcie_gart_enable(struct radeon_device *rdev)
116 if (rdev->gart.table.vram.robj == NULL) {
117 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
120 r = radeon_gart_table_vram_pin(rdev);
123 radeon_gart_restore(rdev);
124 /* discard memory request outside of configured range */
125 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
126 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
127 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
128 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
130 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
131 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
132 table_addr = rdev->gart.table_addr;
133 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
134 /* FIXME: setup default page */
135 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
136 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
138 WREG32_PCIE(0x18, 0);
139 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
140 tmp |= RADEON_PCIE_TX_GART_EN;
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
143 rv370_pcie_gart_tlb_flush(rdev);
144 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
145 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
146 rdev->gart.ready = true;
150 void rv370_pcie_gart_disable(struct radeon_device *rdev)
155 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
156 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
157 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
158 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
159 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
160 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
161 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
162 if (rdev->gart.table.vram.robj) {
163 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
164 if (likely(r == 0)) {
165 radeon_bo_kunmap(rdev->gart.table.vram.robj);
166 radeon_bo_unpin(rdev->gart.table.vram.robj);
167 radeon_bo_unreserve(rdev->gart.table.vram.robj);
172 void rv370_pcie_gart_fini(struct radeon_device *rdev)
174 radeon_gart_fini(rdev);
175 rv370_pcie_gart_disable(rdev);
176 radeon_gart_table_vram_free(rdev);
179 void r300_fence_ring_emit(struct radeon_device *rdev,
180 struct radeon_fence *fence)
182 /* Who ever call radeon_fence_emit should call ring_lock and ask
183 * for enough space (today caller are ib schedule and buffer move) */
184 /* Write SC register so SC & US assert idle */
185 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
186 radeon_ring_write(rdev, 0);
187 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
188 radeon_ring_write(rdev, 0);
190 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
191 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
192 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
193 radeon_ring_write(rdev, R300_ZC_FLUSH);
194 /* Wait until IDLE & CLEAN */
195 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
196 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
197 RADEON_WAIT_2D_IDLECLEAN |
198 RADEON_WAIT_DMA_GUI_IDLE));
199 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
200 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
201 RADEON_HDP_READ_BUFFER_INVALIDATE);
202 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
203 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
204 /* Emit fence sequence & fire IRQ */
205 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
206 radeon_ring_write(rdev, fence->seq);
207 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
208 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
211 void r300_ring_start(struct radeon_device *rdev)
213 unsigned gb_tile_config;
216 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
217 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
218 switch(rdev->num_gb_pipes) {
220 gb_tile_config |= R300_PIPE_COUNT_R300;
223 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
226 gb_tile_config |= R300_PIPE_COUNT_R420;
230 gb_tile_config |= R300_PIPE_COUNT_RV350;
234 r = radeon_ring_lock(rdev, 64);
238 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
239 radeon_ring_write(rdev,
240 RADEON_ISYNC_ANY2D_IDLE3D |
241 RADEON_ISYNC_ANY3D_IDLE2D |
242 RADEON_ISYNC_WAIT_IDLEGUI |
243 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
244 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
245 radeon_ring_write(rdev, gb_tile_config);
246 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
247 radeon_ring_write(rdev,
248 RADEON_WAIT_2D_IDLECLEAN |
249 RADEON_WAIT_3D_IDLECLEAN);
250 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
251 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
252 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
253 radeon_ring_write(rdev, 0);
254 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
255 radeon_ring_write(rdev, 0);
256 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
257 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
258 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
259 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
260 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
261 radeon_ring_write(rdev,
262 RADEON_WAIT_2D_IDLECLEAN |
263 RADEON_WAIT_3D_IDLECLEAN);
264 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
265 radeon_ring_write(rdev, 0);
266 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
267 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
268 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
269 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
270 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
271 radeon_ring_write(rdev,
272 ((6 << R300_MS_X0_SHIFT) |
273 (6 << R300_MS_Y0_SHIFT) |
274 (6 << R300_MS_X1_SHIFT) |
275 (6 << R300_MS_Y1_SHIFT) |
276 (6 << R300_MS_X2_SHIFT) |
277 (6 << R300_MS_Y2_SHIFT) |
278 (6 << R300_MSBD0_Y_SHIFT) |
279 (6 << R300_MSBD0_X_SHIFT)));
280 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
281 radeon_ring_write(rdev,
282 ((6 << R300_MS_X3_SHIFT) |
283 (6 << R300_MS_Y3_SHIFT) |
284 (6 << R300_MS_X4_SHIFT) |
285 (6 << R300_MS_Y4_SHIFT) |
286 (6 << R300_MS_X5_SHIFT) |
287 (6 << R300_MS_Y5_SHIFT) |
288 (6 << R300_MSBD1_SHIFT)));
289 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
290 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
291 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
292 radeon_ring_write(rdev,
293 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
294 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
295 radeon_ring_write(rdev,
296 R300_GEOMETRY_ROUND_NEAREST |
297 R300_COLOR_ROUND_NEAREST);
298 radeon_ring_unlock_commit(rdev);
301 void r300_errata(struct radeon_device *rdev)
303 rdev->pll_errata = 0;
305 if (rdev->family == CHIP_R300 &&
306 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
307 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
311 int r300_mc_wait_for_idle(struct radeon_device *rdev)
316 for (i = 0; i < rdev->usec_timeout; i++) {
318 tmp = RREG32(RADEON_MC_STATUS);
319 if (tmp & R300_MC_IDLE) {
327 void r300_gpu_init(struct radeon_device *rdev)
329 uint32_t gb_tile_config, tmp;
331 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
332 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
334 rdev->num_gb_pipes = 2;
336 /* rv350,rv370,rv380,r300 AD, r350 AH */
337 rdev->num_gb_pipes = 1;
339 rdev->num_z_pipes = 1;
340 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
341 switch (rdev->num_gb_pipes) {
343 gb_tile_config |= R300_PIPE_COUNT_R300;
346 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
349 gb_tile_config |= R300_PIPE_COUNT_R420;
353 gb_tile_config |= R300_PIPE_COUNT_RV350;
356 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
358 if (r100_gui_wait_for_idle(rdev)) {
359 printk(KERN_WARNING "Failed to wait GUI idle while "
360 "programming pipes. Bad things might happen.\n");
363 tmp = RREG32(R300_DST_PIPE_CONFIG);
364 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
366 WREG32(R300_RB2D_DSTCACHE_MODE,
367 R300_DC_AUTOFLUSH_ENABLE |
368 R300_DC_DC_DISABLE_IGNORE_PE);
370 if (r100_gui_wait_for_idle(rdev)) {
371 printk(KERN_WARNING "Failed to wait GUI idle while "
372 "programming pipes. Bad things might happen.\n");
374 if (r300_mc_wait_for_idle(rdev)) {
375 printk(KERN_WARNING "Failed to wait MC idle while "
376 "programming pipes. Bad things might happen.\n");
378 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
379 rdev->num_gb_pipes, rdev->num_z_pipes);
382 bool r300_gpu_is_lockup(struct radeon_device *rdev)
387 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
388 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
389 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
392 /* force CP activities */
393 r = radeon_ring_lock(rdev, 2);
396 radeon_ring_write(rdev, 0x80000000);
397 radeon_ring_write(rdev, 0x80000000);
398 radeon_ring_unlock_commit(rdev);
400 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
401 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
404 int r300_asic_reset(struct radeon_device *rdev)
406 struct r100_mc_save save;
409 r100_mc_stop(rdev, &save);
410 status = RREG32(R_000E40_RBBM_STATUS);
411 if (!G_000E40_GUI_ACTIVE(status)) {
414 status = RREG32(R_000E40_RBBM_STATUS);
415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
417 WREG32(RADEON_CP_CSQ_CNTL, 0);
418 tmp = RREG32(RADEON_CP_RB_CNTL);
419 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
420 WREG32(RADEON_CP_RB_RPTR_WR, 0);
421 WREG32(RADEON_CP_RB_WPTR, 0);
422 WREG32(RADEON_CP_RB_CNTL, tmp);
424 pci_save_state(rdev->pdev);
425 /* disable bus mastering */
426 r100_bm_disable(rdev);
427 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
428 S_0000F0_SOFT_RESET_GA(1));
429 RREG32(R_0000F0_RBBM_SOFT_RESET);
431 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
433 status = RREG32(R_000E40_RBBM_STATUS);
434 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
435 /* resetting the CP seems to be problematic sometimes it end up
436 * hard locking the computer, but it's necessary for successfull
437 * reset more test & playing is needed on R3XX/R4XX to find a
438 * reliable (if any solution)
440 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
441 RREG32(R_0000F0_RBBM_SOFT_RESET);
443 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
445 status = RREG32(R_000E40_RBBM_STATUS);
446 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
447 /* restore PCI & busmastering */
448 pci_restore_state(rdev->pdev);
449 r100_enable_bm(rdev);
450 /* Check if GPU is idle */
451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
452 dev_err(rdev->dev, "failed to reset GPU\n");
453 rdev->gpu_lockup = true;
456 r100_mc_resume(rdev, &save);
457 dev_info(rdev->dev, "GPU reset succeed\n");
462 * r300,r350,rv350,rv380 VRAM info
464 void r300_mc_init(struct radeon_device *rdev)
469 /* DDR for all card after R300 & IGP */
470 rdev->mc.vram_is_ddr = true;
471 tmp = RREG32(RADEON_MEM_CNTL);
472 tmp &= R300_MEM_NUM_CHANNELS_MASK;
474 case 0: rdev->mc.vram_width = 64; break;
475 case 1: rdev->mc.vram_width = 128; break;
476 case 2: rdev->mc.vram_width = 256; break;
477 default: rdev->mc.vram_width = 128; break;
479 r100_vram_init_sizes(rdev);
480 base = rdev->mc.aper_base;
481 if (rdev->flags & RADEON_IS_IGP)
482 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
483 radeon_vram_location(rdev, &rdev->mc, base);
484 rdev->mc.gtt_base_align = 0;
485 if (!(rdev->flags & RADEON_IS_AGP))
486 radeon_gtt_location(rdev, &rdev->mc);
487 radeon_update_bandwidth_info(rdev);
490 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
492 uint32_t link_width_cntl, mask;
494 if (rdev->flags & RADEON_IS_IGP)
497 if (!(rdev->flags & RADEON_IS_PCIE))
500 /* FIXME wait for idle */
504 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
507 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
510 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
513 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
516 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
519 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
523 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
527 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
529 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
530 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
533 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
534 RADEON_PCIE_LC_RECONFIG_NOW |
535 RADEON_PCIE_LC_RECONFIG_LATER |
536 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
537 link_width_cntl |= mask;
538 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
539 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
540 RADEON_PCIE_LC_RECONFIG_NOW));
542 /* wait for lane set to complete */
543 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
544 while (link_width_cntl == 0xffffffff)
545 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
549 int rv370_get_pcie_lanes(struct radeon_device *rdev)
553 if (rdev->flags & RADEON_IS_IGP)
556 if (!(rdev->flags & RADEON_IS_PCIE))
559 /* FIXME wait for idle */
561 if (rdev->family < CHIP_R600)
562 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
564 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
566 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
567 case RADEON_PCIE_LC_LINK_WIDTH_X0:
569 case RADEON_PCIE_LC_LINK_WIDTH_X1:
571 case RADEON_PCIE_LC_LINK_WIDTH_X2:
573 case RADEON_PCIE_LC_LINK_WIDTH_X4:
575 case RADEON_PCIE_LC_LINK_WIDTH_X8:
577 case RADEON_PCIE_LC_LINK_WIDTH_X16:
583 #if defined(CONFIG_DEBUG_FS)
584 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
586 struct drm_info_node *node = (struct drm_info_node *) m->private;
587 struct drm_device *dev = node->minor->dev;
588 struct radeon_device *rdev = dev->dev_private;
591 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
592 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
593 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
594 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
595 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
596 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
597 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
598 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
599 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
600 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
601 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
602 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
603 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
604 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
608 static struct drm_info_list rv370_pcie_gart_info_list[] = {
609 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
613 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
615 #if defined(CONFIG_DEBUG_FS)
616 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
622 static int r300_packet0_check(struct radeon_cs_parser *p,
623 struct radeon_cs_packet *pkt,
624 unsigned idx, unsigned reg)
626 struct radeon_cs_reloc *reloc;
627 struct r100_cs_track *track;
628 volatile uint32_t *ib;
629 uint32_t tmp, tile_flags = 0;
635 track = (struct r100_cs_track *)p->track;
636 idx_value = radeon_get_ib_value(p, idx);
639 case AVIVO_D1MODE_VLINE_START_END:
640 case RADEON_CRTC_GUI_TRIG_VLINE:
641 r = r100_cs_packet_parse_vline(p);
643 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
645 r100_cs_dump_packet(p, pkt);
649 case RADEON_DST_PITCH_OFFSET:
650 case RADEON_SRC_PITCH_OFFSET:
651 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
655 case R300_RB3D_COLOROFFSET0:
656 case R300_RB3D_COLOROFFSET1:
657 case R300_RB3D_COLOROFFSET2:
658 case R300_RB3D_COLOROFFSET3:
659 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
660 r = r100_cs_packet_next_reloc(p, &reloc);
662 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
664 r100_cs_dump_packet(p, pkt);
667 track->cb[i].robj = reloc->robj;
668 track->cb[i].offset = idx_value;
669 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
671 case R300_ZB_DEPTHOFFSET:
672 r = r100_cs_packet_next_reloc(p, &reloc);
674 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
676 r100_cs_dump_packet(p, pkt);
679 track->zb.robj = reloc->robj;
680 track->zb.offset = idx_value;
681 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
683 case R300_TX_OFFSET_0:
684 case R300_TX_OFFSET_0+4:
685 case R300_TX_OFFSET_0+8:
686 case R300_TX_OFFSET_0+12:
687 case R300_TX_OFFSET_0+16:
688 case R300_TX_OFFSET_0+20:
689 case R300_TX_OFFSET_0+24:
690 case R300_TX_OFFSET_0+28:
691 case R300_TX_OFFSET_0+32:
692 case R300_TX_OFFSET_0+36:
693 case R300_TX_OFFSET_0+40:
694 case R300_TX_OFFSET_0+44:
695 case R300_TX_OFFSET_0+48:
696 case R300_TX_OFFSET_0+52:
697 case R300_TX_OFFSET_0+56:
698 case R300_TX_OFFSET_0+60:
699 i = (reg - R300_TX_OFFSET_0) >> 2;
700 r = r100_cs_packet_next_reloc(p, &reloc);
702 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
704 r100_cs_dump_packet(p, pkt);
708 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
709 tile_flags |= R300_TXO_MACRO_TILE;
710 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
711 tile_flags |= R300_TXO_MICRO_TILE;
712 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
713 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
715 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
718 track->textures[i].robj = reloc->robj;
720 /* Tracked registers */
723 track->vap_vf_cntl = idx_value;
727 track->vtx_size = idx_value & 0x7F;
730 /* VAP_VF_MAX_VTX_INDX */
731 track->max_indx = idx_value & 0x00FFFFFFUL;
734 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
735 if (p->rdev->family < CHIP_RV515)
737 track->vap_alt_nverts = idx_value & 0xFFFFFF;
741 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
742 if (p->rdev->family < CHIP_RV515) {
748 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
754 /* RB3D_COLORPITCH0 */
755 /* RB3D_COLORPITCH1 */
756 /* RB3D_COLORPITCH2 */
757 /* RB3D_COLORPITCH3 */
758 r = r100_cs_packet_next_reloc(p, &reloc);
760 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
762 r100_cs_dump_packet(p, pkt);
766 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
767 tile_flags |= R300_COLOR_TILE_ENABLE;
768 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
769 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
770 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
771 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
773 tmp = idx_value & ~(0x7 << 16);
776 i = (reg - 0x4E38) >> 2;
777 track->cb[i].pitch = idx_value & 0x3FFE;
778 switch (((idx_value >> 21) & 0xF)) {
782 track->cb[i].cpp = 1;
788 track->cb[i].cpp = 2;
791 track->cb[i].cpp = 4;
794 track->cb[i].cpp = 8;
797 track->cb[i].cpp = 16;
800 DRM_ERROR("Invalid color buffer format (%d) !\n",
801 ((idx_value >> 21) & 0xF));
808 track->z_enabled = true;
810 track->z_enabled = false;
815 switch ((idx_value & 0xF)) {
824 DRM_ERROR("Invalid z buffer format (%d) !\n",
831 r = r100_cs_packet_next_reloc(p, &reloc);
833 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
835 r100_cs_dump_packet(p, pkt);
839 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
840 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
841 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
842 tile_flags |= R300_DEPTHMICROTILE_TILED;
843 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
844 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
846 tmp = idx_value & ~(0x7 << 16);
850 track->zb.pitch = idx_value & 0x3FFC;
853 for (i = 0; i < 16; i++) {
856 enabled = !!(idx_value & (1 << i));
857 track->textures[i].enabled = enabled;
876 /* TX_FORMAT1_[0-15] */
877 i = (reg - 0x44C0) >> 2;
878 tmp = (idx_value >> 25) & 0x3;
879 track->textures[i].tex_coord_type = tmp;
880 switch ((idx_value & 0x1F)) {
881 case R300_TX_FORMAT_X8:
882 case R300_TX_FORMAT_Y4X4:
883 case R300_TX_FORMAT_Z3Y3X2:
884 track->textures[i].cpp = 1;
885 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
887 case R300_TX_FORMAT_X16:
888 case R300_TX_FORMAT_Y8X8:
889 case R300_TX_FORMAT_Z5Y6X5:
890 case R300_TX_FORMAT_Z6Y5X5:
891 case R300_TX_FORMAT_W4Z4Y4X4:
892 case R300_TX_FORMAT_W1Z5Y5X5:
893 case R300_TX_FORMAT_D3DMFT_CxV8U8:
894 case R300_TX_FORMAT_B8G8_B8G8:
895 case R300_TX_FORMAT_G8R8_G8B8:
896 track->textures[i].cpp = 2;
897 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
899 case R300_TX_FORMAT_Y16X16:
900 case R300_TX_FORMAT_Z11Y11X10:
901 case R300_TX_FORMAT_Z10Y11X11:
902 case R300_TX_FORMAT_W8Z8Y8X8:
903 case R300_TX_FORMAT_W2Z10Y10X10:
905 case R300_TX_FORMAT_FL_I32:
907 track->textures[i].cpp = 4;
908 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
910 case R300_TX_FORMAT_W16Z16Y16X16:
911 case R300_TX_FORMAT_FL_R16G16B16A16:
912 case R300_TX_FORMAT_FL_I32A32:
913 track->textures[i].cpp = 8;
914 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
916 case R300_TX_FORMAT_FL_R32G32B32A32:
917 track->textures[i].cpp = 16;
918 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
920 case R300_TX_FORMAT_DXT1:
921 track->textures[i].cpp = 1;
922 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
924 case R300_TX_FORMAT_ATI2N:
925 if (p->rdev->family < CHIP_R420) {
926 DRM_ERROR("Invalid texture format %u\n",
930 /* The same rules apply as for DXT3/5. */
932 case R300_TX_FORMAT_DXT3:
933 case R300_TX_FORMAT_DXT5:
934 track->textures[i].cpp = 1;
935 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
938 DRM_ERROR("Invalid texture format %u\n",
960 /* TX_FILTER0_[0-15] */
961 i = (reg - 0x4400) >> 2;
962 tmp = idx_value & 0x7;
963 if (tmp == 2 || tmp == 4 || tmp == 6) {
964 track->textures[i].roundup_w = false;
966 tmp = (idx_value >> 3) & 0x7;
967 if (tmp == 2 || tmp == 4 || tmp == 6) {
968 track->textures[i].roundup_h = false;
987 /* TX_FORMAT2_[0-15] */
988 i = (reg - 0x4500) >> 2;
989 tmp = idx_value & 0x3FFF;
990 track->textures[i].pitch = tmp + 1;
991 if (p->rdev->family >= CHIP_RV515) {
992 tmp = ((idx_value >> 15) & 1) << 11;
993 track->textures[i].width_11 = tmp;
994 tmp = ((idx_value >> 16) & 1) << 11;
995 track->textures[i].height_11 = tmp;
998 if (idx_value & (1 << 14)) {
999 /* The same rules apply as for DXT1. */
1000 track->textures[i].compress_format =
1001 R100_TRACK_COMP_DXT1;
1003 } else if (idx_value & (1 << 14)) {
1004 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1024 /* TX_FORMAT0_[0-15] */
1025 i = (reg - 0x4480) >> 2;
1026 tmp = idx_value & 0x7FF;
1027 track->textures[i].width = tmp + 1;
1028 tmp = (idx_value >> 11) & 0x7FF;
1029 track->textures[i].height = tmp + 1;
1030 tmp = (idx_value >> 26) & 0xF;
1031 track->textures[i].num_levels = tmp;
1032 tmp = idx_value & (1 << 31);
1033 track->textures[i].use_pitch = !!tmp;
1034 tmp = (idx_value >> 22) & 0xF;
1035 track->textures[i].txdepth = tmp;
1037 case R300_ZB_ZPASS_ADDR:
1038 r = r100_cs_packet_next_reloc(p, &reloc);
1040 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1042 r100_cs_dump_packet(p, pkt);
1045 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1048 /* RB3D_COLOR_CHANNEL_MASK */
1049 track->color_channel_mask = idx_value;
1053 track->zb_cb_clear = !!(idx_value & (1 << 5));
1056 /* RB3D_BLENDCNTL */
1057 track->blend_read_enable = !!(idx_value & (1 << 2));
1060 /* valid register only on RV530 */
1061 if (p->rdev->family == CHIP_RV530)
1063 /* fallthrough do not move */
1069 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1074 static int r300_packet3_check(struct radeon_cs_parser *p,
1075 struct radeon_cs_packet *pkt)
1077 struct radeon_cs_reloc *reloc;
1078 struct r100_cs_track *track;
1079 volatile uint32_t *ib;
1085 track = (struct r100_cs_track *)p->track;
1086 switch(pkt->opcode) {
1087 case PACKET3_3D_LOAD_VBPNTR:
1088 r = r100_packet3_load_vbpntr(p, pkt, idx);
1092 case PACKET3_INDX_BUFFER:
1093 r = r100_cs_packet_next_reloc(p, &reloc);
1095 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1096 r100_cs_dump_packet(p, pkt);
1099 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1100 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1106 case PACKET3_3D_DRAW_IMMD:
1107 /* Number of dwords is vtx_size * (num_vertices - 1)
1108 * PRIM_WALK must be equal to 3 vertex data in embedded
1110 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1111 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1114 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1115 track->immd_dwords = pkt->count - 1;
1116 r = r100_cs_track_check(p->rdev, track);
1121 case PACKET3_3D_DRAW_IMMD_2:
1122 /* Number of dwords is vtx_size * (num_vertices - 1)
1123 * PRIM_WALK must be equal to 3 vertex data in embedded
1125 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1126 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1129 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1130 track->immd_dwords = pkt->count;
1131 r = r100_cs_track_check(p->rdev, track);
1136 case PACKET3_3D_DRAW_VBUF:
1137 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1138 r = r100_cs_track_check(p->rdev, track);
1143 case PACKET3_3D_DRAW_VBUF_2:
1144 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1145 r = r100_cs_track_check(p->rdev, track);
1150 case PACKET3_3D_DRAW_INDX:
1151 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1152 r = r100_cs_track_check(p->rdev, track);
1157 case PACKET3_3D_DRAW_INDX_2:
1158 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1159 r = r100_cs_track_check(p->rdev, track);
1167 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1173 int r300_cs_parse(struct radeon_cs_parser *p)
1175 struct radeon_cs_packet pkt;
1176 struct r100_cs_track *track;
1179 track = kzalloc(sizeof(*track), GFP_KERNEL);
1182 r100_cs_track_clear(p->rdev, track);
1185 r = r100_cs_packet_parse(p, &pkt, p->idx);
1189 p->idx += pkt.count + 2;
1192 r = r100_cs_parse_packet0(p, &pkt,
1193 p->rdev->config.r300.reg_safe_bm,
1194 p->rdev->config.r300.reg_safe_bm_size,
1195 &r300_packet0_check);
1200 r = r300_packet3_check(p, &pkt);
1203 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1209 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1213 void r300_set_reg_safe(struct radeon_device *rdev)
1215 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1216 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1219 void r300_mc_program(struct radeon_device *rdev)
1221 struct r100_mc_save save;
1224 r = r100_debugfs_mc_info_init(rdev);
1226 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1229 /* Stops all mc clients */
1230 r100_mc_stop(rdev, &save);
1231 if (rdev->flags & RADEON_IS_AGP) {
1232 WREG32(R_00014C_MC_AGP_LOCATION,
1233 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1234 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1235 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1236 WREG32(R_00015C_AGP_BASE_2,
1237 upper_32_bits(rdev->mc.agp_base) & 0xff);
1239 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1240 WREG32(R_000170_AGP_BASE, 0);
1241 WREG32(R_00015C_AGP_BASE_2, 0);
1243 /* Wait for mc idle */
1244 if (r300_mc_wait_for_idle(rdev))
1245 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1246 /* Program MC, should be a 32bits limited address space */
1247 WREG32(R_000148_MC_FB_LOCATION,
1248 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1249 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1250 r100_mc_resume(rdev, &save);
1253 void r300_clock_startup(struct radeon_device *rdev)
1257 if (radeon_dynclks != -1 && radeon_dynclks)
1258 radeon_legacy_set_clock_gating(rdev, 1);
1259 /* We need to force on some of the block */
1260 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1261 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1262 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1263 tmp |= S_00000D_FORCE_VAP(1);
1264 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1267 static int r300_startup(struct radeon_device *rdev)
1271 /* set common regs */
1272 r100_set_common_regs(rdev);
1274 r300_mc_program(rdev);
1276 r300_clock_startup(rdev);
1277 /* Initialize GPU configuration (# pipes, ...) */
1278 r300_gpu_init(rdev);
1279 /* Initialize GART (initialize after TTM so we can allocate
1280 * memory through TTM but finalize after TTM) */
1281 if (rdev->flags & RADEON_IS_PCIE) {
1282 r = rv370_pcie_gart_enable(rdev);
1287 if (rdev->family == CHIP_R300 ||
1288 rdev->family == CHIP_R350 ||
1289 rdev->family == CHIP_RV350)
1290 r100_enable_bm(rdev);
1292 if (rdev->flags & RADEON_IS_PCI) {
1293 r = r100_pci_gart_enable(rdev);
1299 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1300 /* 1M ring buffer */
1301 r = r100_cp_init(rdev, 1024 * 1024);
1303 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
1306 r = r100_wb_init(rdev);
1308 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
1309 r = r100_ib_init(rdev);
1311 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1317 int r300_resume(struct radeon_device *rdev)
1319 /* Make sur GART are not working */
1320 if (rdev->flags & RADEON_IS_PCIE)
1321 rv370_pcie_gart_disable(rdev);
1322 if (rdev->flags & RADEON_IS_PCI)
1323 r100_pci_gart_disable(rdev);
1324 /* Resume clock before doing reset */
1325 r300_clock_startup(rdev);
1326 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1327 if (radeon_asic_reset(rdev)) {
1328 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1329 RREG32(R_000E40_RBBM_STATUS),
1330 RREG32(R_0007C0_CP_STAT));
1333 radeon_combios_asic_init(rdev->ddev);
1334 /* Resume clock after posting */
1335 r300_clock_startup(rdev);
1336 /* Initialize surface registers */
1337 radeon_surface_init(rdev);
1338 return r300_startup(rdev);
1341 int r300_suspend(struct radeon_device *rdev)
1343 r100_cp_disable(rdev);
1344 r100_wb_disable(rdev);
1345 r100_irq_disable(rdev);
1346 if (rdev->flags & RADEON_IS_PCIE)
1347 rv370_pcie_gart_disable(rdev);
1348 if (rdev->flags & RADEON_IS_PCI)
1349 r100_pci_gart_disable(rdev);
1353 void r300_fini(struct radeon_device *rdev)
1358 radeon_gem_fini(rdev);
1359 if (rdev->flags & RADEON_IS_PCIE)
1360 rv370_pcie_gart_fini(rdev);
1361 if (rdev->flags & RADEON_IS_PCI)
1362 r100_pci_gart_fini(rdev);
1363 radeon_agp_fini(rdev);
1364 radeon_irq_kms_fini(rdev);
1365 radeon_fence_driver_fini(rdev);
1366 radeon_bo_fini(rdev);
1367 radeon_atombios_fini(rdev);
1372 int r300_init(struct radeon_device *rdev)
1377 r100_vga_render_disable(rdev);
1378 /* Initialize scratch registers */
1379 radeon_scratch_init(rdev);
1380 /* Initialize surface registers */
1381 radeon_surface_init(rdev);
1382 /* TODO: disable VGA need to use VGA request */
1384 if (!radeon_get_bios(rdev)) {
1385 if (ASIC_IS_AVIVO(rdev))
1388 if (rdev->is_atom_bios) {
1389 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1392 r = radeon_combios_init(rdev);
1396 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1397 if (radeon_asic_reset(rdev)) {
1399 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1400 RREG32(R_000E40_RBBM_STATUS),
1401 RREG32(R_0007C0_CP_STAT));
1403 /* check if cards are posted or not */
1404 if (radeon_boot_test_post_card(rdev) == false)
1406 /* Set asic errata */
1408 /* Initialize clocks */
1409 radeon_get_clock_info(rdev->ddev);
1410 /* initialize AGP */
1411 if (rdev->flags & RADEON_IS_AGP) {
1412 r = radeon_agp_init(rdev);
1414 radeon_agp_disable(rdev);
1417 /* initialize memory controller */
1420 r = radeon_fence_driver_init(rdev);
1423 r = radeon_irq_kms_init(rdev);
1426 /* Memory manager */
1427 r = radeon_bo_init(rdev);
1430 if (rdev->flags & RADEON_IS_PCIE) {
1431 r = rv370_pcie_gart_init(rdev);
1435 if (rdev->flags & RADEON_IS_PCI) {
1436 r = r100_pci_gart_init(rdev);
1440 r300_set_reg_safe(rdev);
1441 rdev->accel_working = true;
1442 r = r300_startup(rdev);
1444 /* Somethings want wront with the accel init stop accel */
1445 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1449 radeon_irq_kms_fini(rdev);
1450 if (rdev->flags & RADEON_IS_PCIE)
1451 rv370_pcie_gart_fini(rdev);
1452 if (rdev->flags & RADEON_IS_PCI)
1453 r100_pci_gart_fini(rdev);
1454 radeon_agp_fini(rdev);
1455 rdev->accel_working = false;