2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/module.h>
33 #include <drm/radeon_drm.h>
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
40 #include "radeon_ucode.h"
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
93 static const u32 crtc_offsets[2] =
96 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
99 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
101 /* r600,rv610,rv630,rv620,rv635,rv670 */
102 int r600_mc_wait_for_idle(struct radeon_device *rdev);
103 static void r600_gpu_init(struct radeon_device *rdev);
104 void r600_fini(struct radeon_device *rdev);
105 void r600_irq_disable(struct radeon_device *rdev);
106 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
107 extern int evergreen_rlc_resume(struct radeon_device *rdev);
108 extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
111 * r600_get_xclk - get the xclk
113 * @rdev: radeon_device pointer
115 * Returns the reference clock used by the gfx engine
116 * (r6xx, IGPs, APUs).
118 u32 r600_get_xclk(struct radeon_device *rdev)
120 return rdev->clock.spll.reference_freq;
123 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
125 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
128 /* bypass vclk and dclk with bclk */
129 WREG32_P(CG_UPLL_FUNC_CNTL_2,
130 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
131 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
133 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
134 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
135 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
137 if (rdev->family >= CHIP_RS780)
138 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
141 if (!vclk || !dclk) {
142 /* keep the Bypass mode, put PLL to sleep */
143 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
147 if (rdev->clock.spll.reference_freq == 10000)
152 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
153 ref_div + 1, 0xFFF, 2, 30, ~0,
154 &fb_div, &vclk_div, &dclk_div);
158 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
163 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
167 /* assert PLL_RESET */
168 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
170 /* For RS780 we have to choose ref clk */
171 if (rdev->family >= CHIP_RS780)
172 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
173 ~UPLL_REFCLK_SRC_SEL_MASK);
175 /* set the required fb, ref and post divder values */
176 WREG32_P(CG_UPLL_FUNC_CNTL,
177 UPLL_FB_DIV(fb_div) |
178 UPLL_REF_DIV(ref_div),
179 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
180 WREG32_P(CG_UPLL_FUNC_CNTL_2,
181 UPLL_SW_HILEN(vclk_div >> 1) |
182 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
183 UPLL_SW_HILEN2(dclk_div >> 1) |
184 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
185 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
188 /* give the PLL some time to settle */
191 /* deassert PLL_RESET */
192 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
196 /* deassert BYPASS EN */
197 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
199 if (rdev->family >= CHIP_RS780)
200 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
202 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
206 /* switch VCLK and DCLK selection */
207 WREG32_P(CG_UPLL_FUNC_CNTL_2,
208 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
209 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
216 void dce3_program_fmt(struct drm_encoder *encoder)
218 struct drm_device *dev = encoder->dev;
219 struct radeon_device *rdev = dev->dev_private;
220 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
221 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
222 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
225 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
228 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
229 bpc = radeon_get_monitor_bpc(connector);
230 dither = radeon_connector->dither;
233 /* LVDS FMT is set up by atom */
234 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
237 /* not needed for analog */
238 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
239 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
247 if (dither == RADEON_FMT_DITHER_ENABLE)
248 /* XXX sort out optimal dither settings */
249 tmp |= FMT_SPATIAL_DITHER_EN;
251 tmp |= FMT_TRUNCATE_EN;
254 if (dither == RADEON_FMT_DITHER_ENABLE)
255 /* XXX sort out optimal dither settings */
256 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
258 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
266 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
269 /* get temperature in millidegrees */
270 int rv6xx_get_temp(struct radeon_device *rdev)
272 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
274 int actual_temp = temp & 0xff;
279 return actual_temp * 1000;
282 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
286 rdev->pm.dynpm_can_upclock = true;
287 rdev->pm.dynpm_can_downclock = true;
289 /* power state array is low to high, default is first */
290 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
291 int min_power_state_index = 0;
293 if (rdev->pm.num_power_states > 2)
294 min_power_state_index = 1;
296 switch (rdev->pm.dynpm_planned_action) {
297 case DYNPM_ACTION_MINIMUM:
298 rdev->pm.requested_power_state_index = min_power_state_index;
299 rdev->pm.requested_clock_mode_index = 0;
300 rdev->pm.dynpm_can_downclock = false;
302 case DYNPM_ACTION_DOWNCLOCK:
303 if (rdev->pm.current_power_state_index == min_power_state_index) {
304 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
305 rdev->pm.dynpm_can_downclock = false;
307 if (rdev->pm.active_crtc_count > 1) {
308 for (i = 0; i < rdev->pm.num_power_states; i++) {
309 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
311 else if (i >= rdev->pm.current_power_state_index) {
312 rdev->pm.requested_power_state_index =
313 rdev->pm.current_power_state_index;
316 rdev->pm.requested_power_state_index = i;
321 if (rdev->pm.current_power_state_index == 0)
322 rdev->pm.requested_power_state_index =
323 rdev->pm.num_power_states - 1;
325 rdev->pm.requested_power_state_index =
326 rdev->pm.current_power_state_index - 1;
329 rdev->pm.requested_clock_mode_index = 0;
330 /* don't use the power state if crtcs are active and no display flag is set */
331 if ((rdev->pm.active_crtc_count > 0) &&
332 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
333 clock_info[rdev->pm.requested_clock_mode_index].flags &
334 RADEON_PM_MODE_NO_DISPLAY)) {
335 rdev->pm.requested_power_state_index++;
338 case DYNPM_ACTION_UPCLOCK:
339 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
340 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
341 rdev->pm.dynpm_can_upclock = false;
343 if (rdev->pm.active_crtc_count > 1) {
344 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
345 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
347 else if (i <= rdev->pm.current_power_state_index) {
348 rdev->pm.requested_power_state_index =
349 rdev->pm.current_power_state_index;
352 rdev->pm.requested_power_state_index = i;
357 rdev->pm.requested_power_state_index =
358 rdev->pm.current_power_state_index + 1;
360 rdev->pm.requested_clock_mode_index = 0;
362 case DYNPM_ACTION_DEFAULT:
363 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
364 rdev->pm.requested_clock_mode_index = 0;
365 rdev->pm.dynpm_can_upclock = false;
367 case DYNPM_ACTION_NONE:
369 DRM_ERROR("Requested mode for not defined action\n");
373 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
374 /* for now just select the first power state and switch between clock modes */
375 /* power state array is low to high, default is first (0) */
376 if (rdev->pm.active_crtc_count > 1) {
377 rdev->pm.requested_power_state_index = -1;
378 /* start at 1 as we don't want the default mode */
379 for (i = 1; i < rdev->pm.num_power_states; i++) {
380 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
382 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
383 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
384 rdev->pm.requested_power_state_index = i;
388 /* if nothing selected, grab the default state. */
389 if (rdev->pm.requested_power_state_index == -1)
390 rdev->pm.requested_power_state_index = 0;
392 rdev->pm.requested_power_state_index = 1;
394 switch (rdev->pm.dynpm_planned_action) {
395 case DYNPM_ACTION_MINIMUM:
396 rdev->pm.requested_clock_mode_index = 0;
397 rdev->pm.dynpm_can_downclock = false;
399 case DYNPM_ACTION_DOWNCLOCK:
400 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
401 if (rdev->pm.current_clock_mode_index == 0) {
402 rdev->pm.requested_clock_mode_index = 0;
403 rdev->pm.dynpm_can_downclock = false;
405 rdev->pm.requested_clock_mode_index =
406 rdev->pm.current_clock_mode_index - 1;
408 rdev->pm.requested_clock_mode_index = 0;
409 rdev->pm.dynpm_can_downclock = false;
411 /* don't use the power state if crtcs are active and no display flag is set */
412 if ((rdev->pm.active_crtc_count > 0) &&
413 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
414 clock_info[rdev->pm.requested_clock_mode_index].flags &
415 RADEON_PM_MODE_NO_DISPLAY)) {
416 rdev->pm.requested_clock_mode_index++;
419 case DYNPM_ACTION_UPCLOCK:
420 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
421 if (rdev->pm.current_clock_mode_index ==
422 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
423 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
424 rdev->pm.dynpm_can_upclock = false;
426 rdev->pm.requested_clock_mode_index =
427 rdev->pm.current_clock_mode_index + 1;
429 rdev->pm.requested_clock_mode_index =
430 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
431 rdev->pm.dynpm_can_upclock = false;
434 case DYNPM_ACTION_DEFAULT:
435 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
436 rdev->pm.requested_clock_mode_index = 0;
437 rdev->pm.dynpm_can_upclock = false;
439 case DYNPM_ACTION_NONE:
441 DRM_ERROR("Requested mode for not defined action\n");
446 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
447 rdev->pm.power_state[rdev->pm.requested_power_state_index].
448 clock_info[rdev->pm.requested_clock_mode_index].sclk,
449 rdev->pm.power_state[rdev->pm.requested_power_state_index].
450 clock_info[rdev->pm.requested_clock_mode_index].mclk,
451 rdev->pm.power_state[rdev->pm.requested_power_state_index].
455 void rs780_pm_init_profile(struct radeon_device *rdev)
457 if (rdev->pm.num_power_states == 2) {
459 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
460 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
461 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
462 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
464 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
465 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
466 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
467 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
469 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
470 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
472 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
474 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
475 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
476 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
477 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
479 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
480 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
481 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
482 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
484 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
485 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
486 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
487 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
489 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
490 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
491 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
492 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
493 } else if (rdev->pm.num_power_states == 3) {
495 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
496 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
497 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
498 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
500 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
501 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
502 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
505 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
506 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
507 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
508 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
510 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
511 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
512 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
517 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
522 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
523 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
531 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
532 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
533 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
536 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
537 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
538 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
539 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
541 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
542 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
543 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
544 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
546 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
547 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
548 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
549 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
551 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
553 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
556 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
557 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
561 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
562 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
563 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
568 void r600_pm_init_profile(struct radeon_device *rdev)
572 if (rdev->family == CHIP_R600) {
575 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
576 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
577 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
578 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
580 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
581 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
582 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
583 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
585 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
586 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
587 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
588 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
590 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
591 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
592 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
593 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
595 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
596 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
597 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
598 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
600 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
601 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
602 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
603 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
605 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
606 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
607 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
608 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
610 if (rdev->pm.num_power_states < 4) {
612 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
613 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
614 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
615 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
617 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
618 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
619 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
620 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
622 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
623 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
624 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
625 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
627 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
628 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
629 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
630 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
632 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
633 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
634 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
635 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
637 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
638 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
639 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
640 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
642 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
643 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
644 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
645 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
648 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
649 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
650 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
651 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
653 if (rdev->flags & RADEON_IS_MOBILITY)
654 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
656 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
657 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
658 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
659 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
660 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
662 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
663 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
664 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
665 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
667 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
668 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
669 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
670 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
671 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
673 if (rdev->flags & RADEON_IS_MOBILITY)
674 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
676 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
677 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
678 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
679 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
680 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
682 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
683 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
684 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
685 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
687 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
688 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
689 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
690 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
691 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
696 void r600_pm_misc(struct radeon_device *rdev)
698 int req_ps_idx = rdev->pm.requested_power_state_index;
699 int req_cm_idx = rdev->pm.requested_clock_mode_index;
700 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
701 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
703 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
704 /* 0xff01 is a flag rather then an actual voltage */
705 if (voltage->voltage == 0xff01)
707 if (voltage->voltage != rdev->pm.current_vddc) {
708 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
709 rdev->pm.current_vddc = voltage->voltage;
710 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
715 bool r600_gui_idle(struct radeon_device *rdev)
717 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
723 /* hpd for digital panel detect/disconnect */
724 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
726 bool connected = false;
728 if (ASIC_IS_DCE3(rdev)) {
731 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
735 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
739 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
743 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
748 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
752 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
761 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
765 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
769 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
779 void r600_hpd_set_polarity(struct radeon_device *rdev,
780 enum radeon_hpd_id hpd)
783 bool connected = r600_hpd_sense(rdev, hpd);
785 if (ASIC_IS_DCE3(rdev)) {
788 tmp = RREG32(DC_HPD1_INT_CONTROL);
790 tmp &= ~DC_HPDx_INT_POLARITY;
792 tmp |= DC_HPDx_INT_POLARITY;
793 WREG32(DC_HPD1_INT_CONTROL, tmp);
796 tmp = RREG32(DC_HPD2_INT_CONTROL);
798 tmp &= ~DC_HPDx_INT_POLARITY;
800 tmp |= DC_HPDx_INT_POLARITY;
801 WREG32(DC_HPD2_INT_CONTROL, tmp);
804 tmp = RREG32(DC_HPD3_INT_CONTROL);
806 tmp &= ~DC_HPDx_INT_POLARITY;
808 tmp |= DC_HPDx_INT_POLARITY;
809 WREG32(DC_HPD3_INT_CONTROL, tmp);
812 tmp = RREG32(DC_HPD4_INT_CONTROL);
814 tmp &= ~DC_HPDx_INT_POLARITY;
816 tmp |= DC_HPDx_INT_POLARITY;
817 WREG32(DC_HPD4_INT_CONTROL, tmp);
820 tmp = RREG32(DC_HPD5_INT_CONTROL);
822 tmp &= ~DC_HPDx_INT_POLARITY;
824 tmp |= DC_HPDx_INT_POLARITY;
825 WREG32(DC_HPD5_INT_CONTROL, tmp);
829 tmp = RREG32(DC_HPD6_INT_CONTROL);
831 tmp &= ~DC_HPDx_INT_POLARITY;
833 tmp |= DC_HPDx_INT_POLARITY;
834 WREG32(DC_HPD6_INT_CONTROL, tmp);
842 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
844 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
846 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
847 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
850 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
852 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
854 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
855 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
858 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
860 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
862 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
863 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
871 void r600_hpd_init(struct radeon_device *rdev)
873 struct drm_device *dev = rdev->ddev;
874 struct drm_connector *connector;
877 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
878 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
880 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
881 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
882 /* don't try to enable hpd on eDP or LVDS avoid breaking the
883 * aux dp channel on imac and help (but not completely fix)
884 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
888 if (ASIC_IS_DCE3(rdev)) {
889 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
890 if (ASIC_IS_DCE32(rdev))
893 switch (radeon_connector->hpd.hpd) {
895 WREG32(DC_HPD1_CONTROL, tmp);
898 WREG32(DC_HPD2_CONTROL, tmp);
901 WREG32(DC_HPD3_CONTROL, tmp);
904 WREG32(DC_HPD4_CONTROL, tmp);
908 WREG32(DC_HPD5_CONTROL, tmp);
911 WREG32(DC_HPD6_CONTROL, tmp);
917 switch (radeon_connector->hpd.hpd) {
919 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
922 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
925 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
931 enable |= 1 << radeon_connector->hpd.hpd;
932 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
934 radeon_irq_kms_enable_hpd(rdev, enable);
937 void r600_hpd_fini(struct radeon_device *rdev)
939 struct drm_device *dev = rdev->ddev;
940 struct drm_connector *connector;
941 unsigned disable = 0;
943 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
944 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
945 if (ASIC_IS_DCE3(rdev)) {
946 switch (radeon_connector->hpd.hpd) {
948 WREG32(DC_HPD1_CONTROL, 0);
951 WREG32(DC_HPD2_CONTROL, 0);
954 WREG32(DC_HPD3_CONTROL, 0);
957 WREG32(DC_HPD4_CONTROL, 0);
961 WREG32(DC_HPD5_CONTROL, 0);
964 WREG32(DC_HPD6_CONTROL, 0);
970 switch (radeon_connector->hpd.hpd) {
972 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
975 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
978 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
984 disable |= 1 << radeon_connector->hpd.hpd;
986 radeon_irq_kms_disable_hpd(rdev, disable);
992 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
997 /* flush hdp cache so updates hit vram */
998 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
999 !(rdev->flags & RADEON_IS_AGP)) {
1000 void __iomem *ptr = (void *)rdev->gart.ptr;
1003 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1004 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1005 * This seems to cause problems on some AGP cards. Just use the old
1008 WREG32(HDP_DEBUG1, 0);
1009 tmp = readl((void __iomem *)ptr);
1011 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1013 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
1014 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
1015 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1016 for (i = 0; i < rdev->usec_timeout; i++) {
1017 /* read MC_STATUS */
1018 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1019 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1021 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1031 int r600_pcie_gart_init(struct radeon_device *rdev)
1035 if (rdev->gart.robj) {
1036 WARN(1, "R600 PCIE GART already initialized\n");
1039 /* Initialize common gart structure */
1040 r = radeon_gart_init(rdev);
1043 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
1044 return radeon_gart_table_vram_alloc(rdev);
1047 static int r600_pcie_gart_enable(struct radeon_device *rdev)
1052 if (rdev->gart.robj == NULL) {
1053 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1056 r = radeon_gart_table_vram_pin(rdev);
1060 /* Setup L2 cache */
1061 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1062 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1063 EFFECTIVE_L2_QUEUE_SIZE(7));
1064 WREG32(VM_L2_CNTL2, 0);
1065 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1066 /* Setup TLB control */
1067 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1068 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1069 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1070 ENABLE_WAIT_L2_QUERY;
1071 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1072 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1073 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1074 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1075 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1076 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1077 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1078 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1079 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1080 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1081 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1082 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1083 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1084 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1085 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1086 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1087 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1088 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1089 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1090 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1091 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1092 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1093 (u32)(rdev->dummy_page.addr >> 12));
1094 for (i = 1; i < 7; i++)
1095 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1097 r600_pcie_gart_tlb_flush(rdev);
1098 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1099 (unsigned)(rdev->mc.gtt_size >> 20),
1100 (unsigned long long)rdev->gart.table_addr);
1101 rdev->gart.ready = true;
1105 static void r600_pcie_gart_disable(struct radeon_device *rdev)
1110 /* Disable all tables */
1111 for (i = 0; i < 7; i++)
1112 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1114 /* Disable L2 cache */
1115 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1116 EFFECTIVE_L2_QUEUE_SIZE(7));
1117 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1118 /* Setup L1 TLB control */
1119 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1120 ENABLE_WAIT_L2_QUERY;
1121 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1122 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1123 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1124 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1125 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1126 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1127 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1128 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1129 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1130 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1131 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1132 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1133 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1134 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1135 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1136 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1137 radeon_gart_table_vram_unpin(rdev);
1140 static void r600_pcie_gart_fini(struct radeon_device *rdev)
1142 radeon_gart_fini(rdev);
1143 r600_pcie_gart_disable(rdev);
1144 radeon_gart_table_vram_free(rdev);
1147 static void r600_agp_enable(struct radeon_device *rdev)
1152 /* Setup L2 cache */
1153 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1154 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1155 EFFECTIVE_L2_QUEUE_SIZE(7));
1156 WREG32(VM_L2_CNTL2, 0);
1157 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1158 /* Setup TLB control */
1159 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1160 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1161 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1162 ENABLE_WAIT_L2_QUERY;
1163 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1164 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1165 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1166 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1167 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1168 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1169 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1170 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1171 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1172 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1173 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1174 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1175 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1176 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1177 for (i = 0; i < 7; i++)
1178 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1181 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1186 for (i = 0; i < rdev->usec_timeout; i++) {
1187 /* read MC_STATUS */
1188 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1196 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1198 unsigned long flags;
1201 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1202 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1203 r = RREG32(R_0028FC_MC_DATA);
1204 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1205 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1209 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1211 unsigned long flags;
1213 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1214 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1215 S_0028F8_MC_IND_WR_EN(1));
1216 WREG32(R_0028FC_MC_DATA, v);
1217 WREG32(R_0028F8_MC_INDEX, 0x7F);
1218 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1221 static void r600_mc_program(struct radeon_device *rdev)
1223 struct rv515_mc_save save;
1227 /* Initialize HDP */
1228 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1229 WREG32((0x2c14 + j), 0x00000000);
1230 WREG32((0x2c18 + j), 0x00000000);
1231 WREG32((0x2c1c + j), 0x00000000);
1232 WREG32((0x2c20 + j), 0x00000000);
1233 WREG32((0x2c24 + j), 0x00000000);
1235 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1237 rv515_mc_stop(rdev, &save);
1238 if (r600_mc_wait_for_idle(rdev)) {
1239 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1241 /* Lockout access through VGA aperture (doesn't exist before R600) */
1242 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1243 /* Update configuration */
1244 if (rdev->flags & RADEON_IS_AGP) {
1245 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1246 /* VRAM before AGP */
1247 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1248 rdev->mc.vram_start >> 12);
1249 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1250 rdev->mc.gtt_end >> 12);
1252 /* VRAM after AGP */
1253 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1254 rdev->mc.gtt_start >> 12);
1255 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1256 rdev->mc.vram_end >> 12);
1259 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1260 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1262 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1263 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1264 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1265 WREG32(MC_VM_FB_LOCATION, tmp);
1266 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1267 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1268 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1269 if (rdev->flags & RADEON_IS_AGP) {
1270 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1271 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1272 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1274 WREG32(MC_VM_AGP_BASE, 0);
1275 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1276 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1278 if (r600_mc_wait_for_idle(rdev)) {
1279 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1281 rv515_mc_resume(rdev, &save);
1282 /* we need to own VRAM, so turn off the VGA renderer here
1283 * to stop it overwriting our objects */
1284 rv515_vga_render_disable(rdev);
1288 * r600_vram_gtt_location - try to find VRAM & GTT location
1289 * @rdev: radeon device structure holding all necessary informations
1290 * @mc: memory controller structure holding memory informations
1292 * Function will place try to place VRAM at same place as in CPU (PCI)
1293 * address space as some GPU seems to have issue when we reprogram at
1294 * different address space.
1296 * If there is not enough space to fit the unvisible VRAM after the
1297 * aperture then we limit the VRAM size to the aperture.
1299 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1300 * them to be in one from GPU point of view so that we can program GPU to
1301 * catch access outside them (weird GPU policy see ??).
1303 * This function will never fails, worst case are limiting VRAM or GTT.
1305 * Note: GTT start, end, size should be initialized before calling this
1306 * function on AGP platform.
1308 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1310 u64 size_bf, size_af;
1312 if (mc->mc_vram_size > 0xE0000000) {
1313 /* leave room for at least 512M GTT */
1314 dev_warn(rdev->dev, "limiting VRAM\n");
1315 mc->real_vram_size = 0xE0000000;
1316 mc->mc_vram_size = 0xE0000000;
1318 if (rdev->flags & RADEON_IS_AGP) {
1319 size_bf = mc->gtt_start;
1320 size_af = mc->mc_mask - mc->gtt_end;
1321 if (size_bf > size_af) {
1322 if (mc->mc_vram_size > size_bf) {
1323 dev_warn(rdev->dev, "limiting VRAM\n");
1324 mc->real_vram_size = size_bf;
1325 mc->mc_vram_size = size_bf;
1327 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1329 if (mc->mc_vram_size > size_af) {
1330 dev_warn(rdev->dev, "limiting VRAM\n");
1331 mc->real_vram_size = size_af;
1332 mc->mc_vram_size = size_af;
1334 mc->vram_start = mc->gtt_end + 1;
1336 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1337 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1338 mc->mc_vram_size >> 20, mc->vram_start,
1339 mc->vram_end, mc->real_vram_size >> 20);
1342 if (rdev->flags & RADEON_IS_IGP) {
1343 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1346 radeon_vram_location(rdev, &rdev->mc, base);
1347 rdev->mc.gtt_base_align = 0;
1348 radeon_gtt_location(rdev, mc);
1352 static int r600_mc_init(struct radeon_device *rdev)
1355 int chansize, numchan;
1356 uint32_t h_addr, l_addr;
1357 unsigned long long k8_addr;
1359 /* Get VRAM informations */
1360 rdev->mc.vram_is_ddr = true;
1361 tmp = RREG32(RAMCFG);
1362 if (tmp & CHANSIZE_OVERRIDE) {
1364 } else if (tmp & CHANSIZE_MASK) {
1369 tmp = RREG32(CHMAP);
1370 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1385 rdev->mc.vram_width = numchan * chansize;
1386 /* Could aper size report 0 ? */
1387 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1388 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1389 /* Setup GPU memory space */
1390 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1391 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1392 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1393 r600_vram_gtt_location(rdev, &rdev->mc);
1395 if (rdev->flags & RADEON_IS_IGP) {
1396 rs690_pm_info(rdev);
1397 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1399 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1400 /* Use K8 direct mapping for fast fb access. */
1401 rdev->fastfb_working = false;
1402 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1403 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1404 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1405 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1406 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1409 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1410 * memory is present.
1412 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1413 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1414 (unsigned long long)rdev->mc.aper_base, k8_addr);
1415 rdev->mc.aper_base = (resource_size_t)k8_addr;
1416 rdev->fastfb_working = true;
1422 radeon_update_bandwidth_info(rdev);
1426 int r600_vram_scratch_init(struct radeon_device *rdev)
1430 if (rdev->vram_scratch.robj == NULL) {
1431 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1432 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1433 0, NULL, &rdev->vram_scratch.robj);
1439 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1440 if (unlikely(r != 0))
1442 r = radeon_bo_pin(rdev->vram_scratch.robj,
1443 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1445 radeon_bo_unreserve(rdev->vram_scratch.robj);
1448 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1449 (void **)&rdev->vram_scratch.ptr);
1451 radeon_bo_unpin(rdev->vram_scratch.robj);
1452 radeon_bo_unreserve(rdev->vram_scratch.robj);
1457 void r600_vram_scratch_fini(struct radeon_device *rdev)
1461 if (rdev->vram_scratch.robj == NULL) {
1464 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1465 if (likely(r == 0)) {
1466 radeon_bo_kunmap(rdev->vram_scratch.robj);
1467 radeon_bo_unpin(rdev->vram_scratch.robj);
1468 radeon_bo_unreserve(rdev->vram_scratch.robj);
1470 radeon_bo_unref(&rdev->vram_scratch.robj);
1473 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1475 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1478 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1480 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1482 WREG32(R600_BIOS_3_SCRATCH, tmp);
1485 static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1487 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1488 RREG32(R_008010_GRBM_STATUS));
1489 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1490 RREG32(R_008014_GRBM_STATUS2));
1491 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1492 RREG32(R_000E50_SRBM_STATUS));
1493 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1494 RREG32(CP_STALLED_STAT1));
1495 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1496 RREG32(CP_STALLED_STAT2));
1497 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1498 RREG32(CP_BUSY_STAT));
1499 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1501 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1502 RREG32(DMA_STATUS_REG));
1505 static bool r600_is_display_hung(struct radeon_device *rdev)
1511 for (i = 0; i < rdev->num_crtc; i++) {
1512 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1513 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1514 crtc_hung |= (1 << i);
1518 for (j = 0; j < 10; j++) {
1519 for (i = 0; i < rdev->num_crtc; i++) {
1520 if (crtc_hung & (1 << i)) {
1521 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1522 if (tmp != crtc_status[i])
1523 crtc_hung &= ~(1 << i);
1534 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1540 tmp = RREG32(R_008010_GRBM_STATUS);
1541 if (rdev->family >= CHIP_RV770) {
1542 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1543 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1544 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1545 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1546 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1547 reset_mask |= RADEON_RESET_GFX;
1549 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1550 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1551 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1552 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1553 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1554 reset_mask |= RADEON_RESET_GFX;
1557 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1558 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1559 reset_mask |= RADEON_RESET_CP;
1561 if (G_008010_GRBM_EE_BUSY(tmp))
1562 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1564 /* DMA_STATUS_REG */
1565 tmp = RREG32(DMA_STATUS_REG);
1566 if (!(tmp & DMA_IDLE))
1567 reset_mask |= RADEON_RESET_DMA;
1570 tmp = RREG32(R_000E50_SRBM_STATUS);
1571 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1572 reset_mask |= RADEON_RESET_RLC;
1574 if (G_000E50_IH_BUSY(tmp))
1575 reset_mask |= RADEON_RESET_IH;
1577 if (G_000E50_SEM_BUSY(tmp))
1578 reset_mask |= RADEON_RESET_SEM;
1580 if (G_000E50_GRBM_RQ_PENDING(tmp))
1581 reset_mask |= RADEON_RESET_GRBM;
1583 if (G_000E50_VMC_BUSY(tmp))
1584 reset_mask |= RADEON_RESET_VMC;
1586 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1587 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1588 G_000E50_MCDW_BUSY(tmp))
1589 reset_mask |= RADEON_RESET_MC;
1591 if (r600_is_display_hung(rdev))
1592 reset_mask |= RADEON_RESET_DISPLAY;
1594 /* Skip MC reset as it's mostly likely not hung, just busy */
1595 if (reset_mask & RADEON_RESET_MC) {
1596 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1597 reset_mask &= ~RADEON_RESET_MC;
1603 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1605 struct rv515_mc_save save;
1606 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1609 if (reset_mask == 0)
1612 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1614 r600_print_gpu_status_regs(rdev);
1616 /* Disable CP parsing/prefetching */
1617 if (rdev->family >= CHIP_RV770)
1618 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1620 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1622 /* disable the RLC */
1623 WREG32(RLC_CNTL, 0);
1625 if (reset_mask & RADEON_RESET_DMA) {
1627 tmp = RREG32(DMA_RB_CNTL);
1628 tmp &= ~DMA_RB_ENABLE;
1629 WREG32(DMA_RB_CNTL, tmp);
1634 rv515_mc_stop(rdev, &save);
1635 if (r600_mc_wait_for_idle(rdev)) {
1636 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1639 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1640 if (rdev->family >= CHIP_RV770)
1641 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1642 S_008020_SOFT_RESET_CB(1) |
1643 S_008020_SOFT_RESET_PA(1) |
1644 S_008020_SOFT_RESET_SC(1) |
1645 S_008020_SOFT_RESET_SPI(1) |
1646 S_008020_SOFT_RESET_SX(1) |
1647 S_008020_SOFT_RESET_SH(1) |
1648 S_008020_SOFT_RESET_TC(1) |
1649 S_008020_SOFT_RESET_TA(1) |
1650 S_008020_SOFT_RESET_VC(1) |
1651 S_008020_SOFT_RESET_VGT(1);
1653 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1654 S_008020_SOFT_RESET_DB(1) |
1655 S_008020_SOFT_RESET_CB(1) |
1656 S_008020_SOFT_RESET_PA(1) |
1657 S_008020_SOFT_RESET_SC(1) |
1658 S_008020_SOFT_RESET_SMX(1) |
1659 S_008020_SOFT_RESET_SPI(1) |
1660 S_008020_SOFT_RESET_SX(1) |
1661 S_008020_SOFT_RESET_SH(1) |
1662 S_008020_SOFT_RESET_TC(1) |
1663 S_008020_SOFT_RESET_TA(1) |
1664 S_008020_SOFT_RESET_VC(1) |
1665 S_008020_SOFT_RESET_VGT(1);
1668 if (reset_mask & RADEON_RESET_CP) {
1669 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1670 S_008020_SOFT_RESET_VGT(1);
1672 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1675 if (reset_mask & RADEON_RESET_DMA) {
1676 if (rdev->family >= CHIP_RV770)
1677 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1679 srbm_soft_reset |= SOFT_RESET_DMA;
1682 if (reset_mask & RADEON_RESET_RLC)
1683 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1685 if (reset_mask & RADEON_RESET_SEM)
1686 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1688 if (reset_mask & RADEON_RESET_IH)
1689 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1691 if (reset_mask & RADEON_RESET_GRBM)
1692 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1694 if (!(rdev->flags & RADEON_IS_IGP)) {
1695 if (reset_mask & RADEON_RESET_MC)
1696 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1699 if (reset_mask & RADEON_RESET_VMC)
1700 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1702 if (grbm_soft_reset) {
1703 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1704 tmp |= grbm_soft_reset;
1705 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1706 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1707 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1711 tmp &= ~grbm_soft_reset;
1712 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1713 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1716 if (srbm_soft_reset) {
1717 tmp = RREG32(SRBM_SOFT_RESET);
1718 tmp |= srbm_soft_reset;
1719 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1720 WREG32(SRBM_SOFT_RESET, tmp);
1721 tmp = RREG32(SRBM_SOFT_RESET);
1725 tmp &= ~srbm_soft_reset;
1726 WREG32(SRBM_SOFT_RESET, tmp);
1727 tmp = RREG32(SRBM_SOFT_RESET);
1730 /* Wait a little for things to settle down */
1733 rv515_mc_resume(rdev, &save);
1736 r600_print_gpu_status_regs(rdev);
1739 static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1741 struct rv515_mc_save save;
1744 dev_info(rdev->dev, "GPU pci config reset\n");
1748 /* Disable CP parsing/prefetching */
1749 if (rdev->family >= CHIP_RV770)
1750 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1752 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1754 /* disable the RLC */
1755 WREG32(RLC_CNTL, 0);
1758 tmp = RREG32(DMA_RB_CNTL);
1759 tmp &= ~DMA_RB_ENABLE;
1760 WREG32(DMA_RB_CNTL, tmp);
1764 /* set mclk/sclk to bypass */
1765 if (rdev->family >= CHIP_RV770)
1766 rv770_set_clk_bypass_mode(rdev);
1768 pci_clear_master(rdev->pdev);
1769 /* disable mem access */
1770 rv515_mc_stop(rdev, &save);
1771 if (r600_mc_wait_for_idle(rdev)) {
1772 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1775 /* BIF reset workaround. Not sure if this is needed on 6xx */
1776 tmp = RREG32(BUS_CNTL);
1777 tmp |= VGA_COHE_SPEC_TIMER_DIS;
1778 WREG32(BUS_CNTL, tmp);
1780 tmp = RREG32(BIF_SCRATCH0);
1783 radeon_pci_config_reset(rdev);
1786 /* BIF reset workaround. Not sure if this is needed on 6xx */
1787 tmp = SOFT_RESET_BIF;
1788 WREG32(SRBM_SOFT_RESET, tmp);
1790 WREG32(SRBM_SOFT_RESET, 0);
1792 /* wait for asic to come out of reset */
1793 for (i = 0; i < rdev->usec_timeout; i++) {
1794 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1800 int r600_asic_reset(struct radeon_device *rdev)
1804 reset_mask = r600_gpu_check_soft_reset(rdev);
1807 r600_set_bios_scratch_engine_hung(rdev, true);
1809 /* try soft reset */
1810 r600_gpu_soft_reset(rdev, reset_mask);
1812 reset_mask = r600_gpu_check_soft_reset(rdev);
1814 /* try pci config reset */
1815 if (reset_mask && radeon_hard_reset)
1816 r600_gpu_pci_config_reset(rdev);
1818 reset_mask = r600_gpu_check_soft_reset(rdev);
1821 r600_set_bios_scratch_engine_hung(rdev, false);
1827 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1829 * @rdev: radeon_device pointer
1830 * @ring: radeon_ring structure holding ring information
1832 * Check if the GFX engine is locked up.
1833 * Returns true if the engine appears to be locked up, false if not.
1835 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1837 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1839 if (!(reset_mask & (RADEON_RESET_GFX |
1840 RADEON_RESET_COMPUTE |
1841 RADEON_RESET_CP))) {
1842 radeon_ring_lockup_update(rdev, ring);
1845 return radeon_ring_test_lockup(rdev, ring);
1848 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1849 u32 tiling_pipe_num,
1851 u32 total_max_rb_num,
1852 u32 disabled_rb_mask)
1854 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1855 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1856 u32 data = 0, mask = 1 << (max_rb_num - 1);
1859 /* mask out the RBs that don't exist on that asic */
1860 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1861 /* make sure at least one RB is available */
1862 if ((tmp & 0xff) != 0xff)
1863 disabled_rb_mask = tmp;
1865 rendering_pipe_num = 1 << tiling_pipe_num;
1866 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1867 BUG_ON(rendering_pipe_num < req_rb_num);
1869 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1870 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1872 if (rdev->family <= CHIP_RV740) {
1880 for (i = 0; i < max_rb_num; i++) {
1881 if (!(mask & disabled_rb_mask)) {
1882 for (j = 0; j < pipe_rb_ratio; j++) {
1883 data <<= rb_num_width;
1884 data |= max_rb_num - i - 1;
1886 if (pipe_rb_remain) {
1887 data <<= rb_num_width;
1888 data |= max_rb_num - i - 1;
1898 int r600_count_pipe_bits(uint32_t val)
1900 return hweight32(val);
1903 static void r600_gpu_init(struct radeon_device *rdev)
1907 u32 cc_rb_backend_disable;
1908 u32 cc_gc_shader_pipe_config;
1912 u32 sq_gpr_resource_mgmt_1 = 0;
1913 u32 sq_gpr_resource_mgmt_2 = 0;
1914 u32 sq_thread_resource_mgmt = 0;
1915 u32 sq_stack_resource_mgmt_1 = 0;
1916 u32 sq_stack_resource_mgmt_2 = 0;
1917 u32 disabled_rb_mask;
1919 rdev->config.r600.tiling_group_size = 256;
1920 switch (rdev->family) {
1922 rdev->config.r600.max_pipes = 4;
1923 rdev->config.r600.max_tile_pipes = 8;
1924 rdev->config.r600.max_simds = 4;
1925 rdev->config.r600.max_backends = 4;
1926 rdev->config.r600.max_gprs = 256;
1927 rdev->config.r600.max_threads = 192;
1928 rdev->config.r600.max_stack_entries = 256;
1929 rdev->config.r600.max_hw_contexts = 8;
1930 rdev->config.r600.max_gs_threads = 16;
1931 rdev->config.r600.sx_max_export_size = 128;
1932 rdev->config.r600.sx_max_export_pos_size = 16;
1933 rdev->config.r600.sx_max_export_smx_size = 128;
1934 rdev->config.r600.sq_num_cf_insts = 2;
1938 rdev->config.r600.max_pipes = 2;
1939 rdev->config.r600.max_tile_pipes = 2;
1940 rdev->config.r600.max_simds = 3;
1941 rdev->config.r600.max_backends = 1;
1942 rdev->config.r600.max_gprs = 128;
1943 rdev->config.r600.max_threads = 192;
1944 rdev->config.r600.max_stack_entries = 128;
1945 rdev->config.r600.max_hw_contexts = 8;
1946 rdev->config.r600.max_gs_threads = 4;
1947 rdev->config.r600.sx_max_export_size = 128;
1948 rdev->config.r600.sx_max_export_pos_size = 16;
1949 rdev->config.r600.sx_max_export_smx_size = 128;
1950 rdev->config.r600.sq_num_cf_insts = 2;
1956 rdev->config.r600.max_pipes = 1;
1957 rdev->config.r600.max_tile_pipes = 1;
1958 rdev->config.r600.max_simds = 2;
1959 rdev->config.r600.max_backends = 1;
1960 rdev->config.r600.max_gprs = 128;
1961 rdev->config.r600.max_threads = 192;
1962 rdev->config.r600.max_stack_entries = 128;
1963 rdev->config.r600.max_hw_contexts = 4;
1964 rdev->config.r600.max_gs_threads = 4;
1965 rdev->config.r600.sx_max_export_size = 128;
1966 rdev->config.r600.sx_max_export_pos_size = 16;
1967 rdev->config.r600.sx_max_export_smx_size = 128;
1968 rdev->config.r600.sq_num_cf_insts = 1;
1971 rdev->config.r600.max_pipes = 4;
1972 rdev->config.r600.max_tile_pipes = 4;
1973 rdev->config.r600.max_simds = 4;
1974 rdev->config.r600.max_backends = 4;
1975 rdev->config.r600.max_gprs = 192;
1976 rdev->config.r600.max_threads = 192;
1977 rdev->config.r600.max_stack_entries = 256;
1978 rdev->config.r600.max_hw_contexts = 8;
1979 rdev->config.r600.max_gs_threads = 16;
1980 rdev->config.r600.sx_max_export_size = 128;
1981 rdev->config.r600.sx_max_export_pos_size = 16;
1982 rdev->config.r600.sx_max_export_smx_size = 128;
1983 rdev->config.r600.sq_num_cf_insts = 2;
1989 /* Initialize HDP */
1990 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1991 WREG32((0x2c14 + j), 0x00000000);
1992 WREG32((0x2c18 + j), 0x00000000);
1993 WREG32((0x2c1c + j), 0x00000000);
1994 WREG32((0x2c20 + j), 0x00000000);
1995 WREG32((0x2c24 + j), 0x00000000);
1998 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2002 ramcfg = RREG32(RAMCFG);
2003 switch (rdev->config.r600.max_tile_pipes) {
2005 tiling_config |= PIPE_TILING(0);
2008 tiling_config |= PIPE_TILING(1);
2011 tiling_config |= PIPE_TILING(2);
2014 tiling_config |= PIPE_TILING(3);
2019 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
2020 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2021 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2022 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2024 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
2026 tiling_config |= ROW_TILING(3);
2027 tiling_config |= SAMPLE_SPLIT(3);
2029 tiling_config |= ROW_TILING(tmp);
2030 tiling_config |= SAMPLE_SPLIT(tmp);
2032 tiling_config |= BANK_SWAPS(1);
2034 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
2035 tmp = R6XX_MAX_BACKENDS -
2036 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
2037 if (tmp < rdev->config.r600.max_backends) {
2038 rdev->config.r600.max_backends = tmp;
2041 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
2042 tmp = R6XX_MAX_PIPES -
2043 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
2044 if (tmp < rdev->config.r600.max_pipes) {
2045 rdev->config.r600.max_pipes = tmp;
2047 tmp = R6XX_MAX_SIMDS -
2048 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
2049 if (tmp < rdev->config.r600.max_simds) {
2050 rdev->config.r600.max_simds = tmp;
2052 tmp = rdev->config.r600.max_simds -
2053 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
2054 rdev->config.r600.active_simds = tmp;
2056 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
2057 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
2058 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
2059 R6XX_MAX_BACKENDS, disabled_rb_mask);
2060 tiling_config |= tmp << 16;
2061 rdev->config.r600.backend_map = tmp;
2063 rdev->config.r600.tile_config = tiling_config;
2064 WREG32(GB_TILING_CONFIG, tiling_config);
2065 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
2066 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
2067 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
2069 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
2070 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2071 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2073 /* Setup some CP states */
2074 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2075 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2077 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2078 SYNC_WALKER | SYNC_ALIGNER));
2079 /* Setup various GPU states */
2080 if (rdev->family == CHIP_RV670)
2081 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2083 tmp = RREG32(SX_DEBUG_1);
2084 tmp |= SMX_EVENT_RELEASE;
2085 if ((rdev->family > CHIP_R600))
2086 tmp |= ENABLE_NEW_SMX_ADDRESS;
2087 WREG32(SX_DEBUG_1, tmp);
2089 if (((rdev->family) == CHIP_R600) ||
2090 ((rdev->family) == CHIP_RV630) ||
2091 ((rdev->family) == CHIP_RV610) ||
2092 ((rdev->family) == CHIP_RV620) ||
2093 ((rdev->family) == CHIP_RS780) ||
2094 ((rdev->family) == CHIP_RS880)) {
2095 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2097 WREG32(DB_DEBUG, 0);
2099 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2100 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2102 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2103 WREG32(VGT_NUM_INSTANCES, 0);
2105 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2106 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2108 tmp = RREG32(SQ_MS_FIFO_SIZES);
2109 if (((rdev->family) == CHIP_RV610) ||
2110 ((rdev->family) == CHIP_RV620) ||
2111 ((rdev->family) == CHIP_RS780) ||
2112 ((rdev->family) == CHIP_RS880)) {
2113 tmp = (CACHE_FIFO_SIZE(0xa) |
2114 FETCH_FIFO_HIWATER(0xa) |
2115 DONE_FIFO_HIWATER(0xe0) |
2116 ALU_UPDATE_FIFO_HIWATER(0x8));
2117 } else if (((rdev->family) == CHIP_R600) ||
2118 ((rdev->family) == CHIP_RV630)) {
2119 tmp &= ~DONE_FIFO_HIWATER(0xff);
2120 tmp |= DONE_FIFO_HIWATER(0x4);
2122 WREG32(SQ_MS_FIFO_SIZES, tmp);
2124 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2125 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2127 sq_config = RREG32(SQ_CONFIG);
2128 sq_config &= ~(PS_PRIO(3) |
2132 sq_config |= (DX9_CONSTS |
2139 if ((rdev->family) == CHIP_R600) {
2140 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2142 NUM_CLAUSE_TEMP_GPRS(4));
2143 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2145 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2146 NUM_VS_THREADS(48) |
2149 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2150 NUM_VS_STACK_ENTRIES(128));
2151 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2152 NUM_ES_STACK_ENTRIES(0));
2153 } else if (((rdev->family) == CHIP_RV610) ||
2154 ((rdev->family) == CHIP_RV620) ||
2155 ((rdev->family) == CHIP_RS780) ||
2156 ((rdev->family) == CHIP_RS880)) {
2157 /* no vertex cache */
2158 sq_config &= ~VC_ENABLE;
2160 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2162 NUM_CLAUSE_TEMP_GPRS(2));
2163 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2165 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2166 NUM_VS_THREADS(78) |
2168 NUM_ES_THREADS(31));
2169 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2170 NUM_VS_STACK_ENTRIES(40));
2171 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2172 NUM_ES_STACK_ENTRIES(16));
2173 } else if (((rdev->family) == CHIP_RV630) ||
2174 ((rdev->family) == CHIP_RV635)) {
2175 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2177 NUM_CLAUSE_TEMP_GPRS(2));
2178 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2180 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2181 NUM_VS_THREADS(78) |
2183 NUM_ES_THREADS(31));
2184 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2185 NUM_VS_STACK_ENTRIES(40));
2186 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2187 NUM_ES_STACK_ENTRIES(16));
2188 } else if ((rdev->family) == CHIP_RV670) {
2189 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2191 NUM_CLAUSE_TEMP_GPRS(2));
2192 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2194 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2195 NUM_VS_THREADS(78) |
2197 NUM_ES_THREADS(31));
2198 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2199 NUM_VS_STACK_ENTRIES(64));
2200 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2201 NUM_ES_STACK_ENTRIES(64));
2204 WREG32(SQ_CONFIG, sq_config);
2205 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2206 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2207 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2208 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2209 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2211 if (((rdev->family) == CHIP_RV610) ||
2212 ((rdev->family) == CHIP_RV620) ||
2213 ((rdev->family) == CHIP_RS780) ||
2214 ((rdev->family) == CHIP_RS880)) {
2215 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2217 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2220 /* More default values. 2D/3D driver should adjust as needed */
2221 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2222 S1_X(0x4) | S1_Y(0xc)));
2223 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2224 S1_X(0x2) | S1_Y(0x2) |
2225 S2_X(0xa) | S2_Y(0x6) |
2226 S3_X(0x6) | S3_Y(0xa)));
2227 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2228 S1_X(0x4) | S1_Y(0xc) |
2229 S2_X(0x1) | S2_Y(0x6) |
2230 S3_X(0xa) | S3_Y(0xe)));
2231 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2232 S5_X(0x0) | S5_Y(0x0) |
2233 S6_X(0xb) | S6_Y(0x4) |
2234 S7_X(0x7) | S7_Y(0x8)));
2236 WREG32(VGT_STRMOUT_EN, 0);
2237 tmp = rdev->config.r600.max_pipes * 16;
2238 switch (rdev->family) {
2254 WREG32(VGT_ES_PER_GS, 128);
2255 WREG32(VGT_GS_PER_ES, tmp);
2256 WREG32(VGT_GS_PER_VS, 2);
2257 WREG32(VGT_GS_VERTEX_REUSE, 16);
2259 /* more default values. 2D/3D driver should adjust as needed */
2260 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2261 WREG32(VGT_STRMOUT_EN, 0);
2263 WREG32(PA_SC_MODE_CNTL, 0);
2264 WREG32(PA_SC_AA_CONFIG, 0);
2265 WREG32(PA_SC_LINE_STIPPLE, 0);
2266 WREG32(SPI_INPUT_Z, 0);
2267 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2268 WREG32(CB_COLOR7_FRAG, 0);
2270 /* Clear render buffer base addresses */
2271 WREG32(CB_COLOR0_BASE, 0);
2272 WREG32(CB_COLOR1_BASE, 0);
2273 WREG32(CB_COLOR2_BASE, 0);
2274 WREG32(CB_COLOR3_BASE, 0);
2275 WREG32(CB_COLOR4_BASE, 0);
2276 WREG32(CB_COLOR5_BASE, 0);
2277 WREG32(CB_COLOR6_BASE, 0);
2278 WREG32(CB_COLOR7_BASE, 0);
2279 WREG32(CB_COLOR7_FRAG, 0);
2281 switch (rdev->family) {
2286 tmp = TC_L2_SIZE(8);
2290 tmp = TC_L2_SIZE(4);
2293 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2296 tmp = TC_L2_SIZE(0);
2299 WREG32(TC_CNTL, tmp);
2301 tmp = RREG32(HDP_HOST_PATH_CNTL);
2302 WREG32(HDP_HOST_PATH_CNTL, tmp);
2304 tmp = RREG32(ARB_POP);
2305 tmp |= ENABLE_TC128;
2306 WREG32(ARB_POP, tmp);
2308 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2309 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2311 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2312 WREG32(VC_ENHANCE, 0);
2317 * Indirect registers accessor
2319 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2321 unsigned long flags;
2324 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2325 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2326 (void)RREG32(PCIE_PORT_INDEX);
2327 r = RREG32(PCIE_PORT_DATA);
2328 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2332 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2334 unsigned long flags;
2336 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2337 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2338 (void)RREG32(PCIE_PORT_INDEX);
2339 WREG32(PCIE_PORT_DATA, (v));
2340 (void)RREG32(PCIE_PORT_DATA);
2341 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2347 void r600_cp_stop(struct radeon_device *rdev)
2349 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2350 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2351 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2352 WREG32(SCRATCH_UMSK, 0);
2353 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2356 int r600_init_microcode(struct radeon_device *rdev)
2358 const char *chip_name;
2359 const char *rlc_chip_name;
2360 const char *smc_chip_name = "RV770";
2361 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2367 switch (rdev->family) {
2370 rlc_chip_name = "R600";
2373 chip_name = "RV610";
2374 rlc_chip_name = "R600";
2377 chip_name = "RV630";
2378 rlc_chip_name = "R600";
2381 chip_name = "RV620";
2382 rlc_chip_name = "R600";
2385 chip_name = "RV635";
2386 rlc_chip_name = "R600";
2389 chip_name = "RV670";
2390 rlc_chip_name = "R600";
2394 chip_name = "RS780";
2395 rlc_chip_name = "R600";
2398 chip_name = "RV770";
2399 rlc_chip_name = "R700";
2400 smc_chip_name = "RV770";
2401 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2404 chip_name = "RV730";
2405 rlc_chip_name = "R700";
2406 smc_chip_name = "RV730";
2407 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2410 chip_name = "RV710";
2411 rlc_chip_name = "R700";
2412 smc_chip_name = "RV710";
2413 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2416 chip_name = "RV730";
2417 rlc_chip_name = "R700";
2418 smc_chip_name = "RV740";
2419 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2422 chip_name = "CEDAR";
2423 rlc_chip_name = "CEDAR";
2424 smc_chip_name = "CEDAR";
2425 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2428 chip_name = "REDWOOD";
2429 rlc_chip_name = "REDWOOD";
2430 smc_chip_name = "REDWOOD";
2431 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2434 chip_name = "JUNIPER";
2435 rlc_chip_name = "JUNIPER";
2436 smc_chip_name = "JUNIPER";
2437 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2441 chip_name = "CYPRESS";
2442 rlc_chip_name = "CYPRESS";
2443 smc_chip_name = "CYPRESS";
2444 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2448 rlc_chip_name = "SUMO";
2452 rlc_chip_name = "SUMO";
2455 chip_name = "SUMO2";
2456 rlc_chip_name = "SUMO";
2461 if (rdev->family >= CHIP_CEDAR) {
2462 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2463 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2464 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2465 } else if (rdev->family >= CHIP_RV770) {
2466 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2467 me_req_size = R700_PM4_UCODE_SIZE * 4;
2468 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2470 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2471 me_req_size = R600_PM4_UCODE_SIZE * 12;
2472 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2475 DRM_INFO("Loading %s Microcode\n", chip_name);
2477 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2478 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2481 if (rdev->pfp_fw->size != pfp_req_size) {
2483 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2484 rdev->pfp_fw->size, fw_name);
2489 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2490 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2493 if (rdev->me_fw->size != me_req_size) {
2495 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2496 rdev->me_fw->size, fw_name);
2500 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2501 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2504 if (rdev->rlc_fw->size != rlc_req_size) {
2506 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2507 rdev->rlc_fw->size, fw_name);
2511 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2512 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2513 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2516 "smc: error loading firmware \"%s\"\n",
2518 release_firmware(rdev->smc_fw);
2519 rdev->smc_fw = NULL;
2521 } else if (rdev->smc_fw->size != smc_req_size) {
2523 "smc: Bogus length %zu in firmware \"%s\"\n",
2524 rdev->smc_fw->size, fw_name);
2533 "r600_cp: Failed to load firmware \"%s\"\n",
2535 release_firmware(rdev->pfp_fw);
2536 rdev->pfp_fw = NULL;
2537 release_firmware(rdev->me_fw);
2539 release_firmware(rdev->rlc_fw);
2540 rdev->rlc_fw = NULL;
2541 release_firmware(rdev->smc_fw);
2542 rdev->smc_fw = NULL;
2547 u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2548 struct radeon_ring *ring)
2552 if (rdev->wb.enabled)
2553 rptr = rdev->wb.wb[ring->rptr_offs/4];
2555 rptr = RREG32(R600_CP_RB_RPTR);
2560 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2561 struct radeon_ring *ring)
2565 wptr = RREG32(R600_CP_RB_WPTR);
2570 void r600_gfx_set_wptr(struct radeon_device *rdev,
2571 struct radeon_ring *ring)
2573 WREG32(R600_CP_RB_WPTR, ring->wptr);
2574 (void)RREG32(R600_CP_RB_WPTR);
2577 static int r600_cp_load_microcode(struct radeon_device *rdev)
2579 const __be32 *fw_data;
2582 if (!rdev->me_fw || !rdev->pfp_fw)
2591 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2594 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2595 RREG32(GRBM_SOFT_RESET);
2597 WREG32(GRBM_SOFT_RESET, 0);
2599 WREG32(CP_ME_RAM_WADDR, 0);
2601 fw_data = (const __be32 *)rdev->me_fw->data;
2602 WREG32(CP_ME_RAM_WADDR, 0);
2603 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2604 WREG32(CP_ME_RAM_DATA,
2605 be32_to_cpup(fw_data++));
2607 fw_data = (const __be32 *)rdev->pfp_fw->data;
2608 WREG32(CP_PFP_UCODE_ADDR, 0);
2609 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2610 WREG32(CP_PFP_UCODE_DATA,
2611 be32_to_cpup(fw_data++));
2613 WREG32(CP_PFP_UCODE_ADDR, 0);
2614 WREG32(CP_ME_RAM_WADDR, 0);
2615 WREG32(CP_ME_RAM_RADDR, 0);
2619 int r600_cp_start(struct radeon_device *rdev)
2621 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2625 r = radeon_ring_lock(rdev, ring, 7);
2627 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2630 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2631 radeon_ring_write(ring, 0x1);
2632 if (rdev->family >= CHIP_RV770) {
2633 radeon_ring_write(ring, 0x0);
2634 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2636 radeon_ring_write(ring, 0x3);
2637 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2639 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2640 radeon_ring_write(ring, 0);
2641 radeon_ring_write(ring, 0);
2642 radeon_ring_unlock_commit(rdev, ring, false);
2645 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2649 int r600_cp_resume(struct radeon_device *rdev)
2651 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2657 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2658 RREG32(GRBM_SOFT_RESET);
2660 WREG32(GRBM_SOFT_RESET, 0);
2662 /* Set ring buffer size */
2663 rb_bufsz = order_base_2(ring->ring_size / 8);
2664 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2666 tmp |= BUF_SWAP_32BIT;
2668 WREG32(CP_RB_CNTL, tmp);
2669 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2671 /* Set the write pointer delay */
2672 WREG32(CP_RB_WPTR_DELAY, 0);
2674 /* Initialize the ring buffer's read and write pointers */
2675 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2676 WREG32(CP_RB_RPTR_WR, 0);
2678 WREG32(CP_RB_WPTR, ring->wptr);
2680 /* set the wb address whether it's enabled or not */
2681 WREG32(CP_RB_RPTR_ADDR,
2682 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2683 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2684 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2686 if (rdev->wb.enabled)
2687 WREG32(SCRATCH_UMSK, 0xff);
2689 tmp |= RB_NO_UPDATE;
2690 WREG32(SCRATCH_UMSK, 0);
2694 WREG32(CP_RB_CNTL, tmp);
2696 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2697 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2699 r600_cp_start(rdev);
2701 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2703 ring->ready = false;
2707 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2708 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2713 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2718 /* Align ring size */
2719 rb_bufsz = order_base_2(ring_size / 8);
2720 ring_size = (1 << (rb_bufsz + 1)) * 4;
2721 ring->ring_size = ring_size;
2722 ring->align_mask = 16 - 1;
2724 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2725 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2727 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2728 ring->rptr_save_reg = 0;
2733 void r600_cp_fini(struct radeon_device *rdev)
2735 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2737 radeon_ring_fini(rdev, ring);
2738 radeon_scratch_free(rdev, ring->rptr_save_reg);
2742 * GPU scratch registers helpers function.
2744 void r600_scratch_init(struct radeon_device *rdev)
2748 rdev->scratch.num_reg = 7;
2749 rdev->scratch.reg_base = SCRATCH_REG0;
2750 for (i = 0; i < rdev->scratch.num_reg; i++) {
2751 rdev->scratch.free[i] = true;
2752 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2756 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2763 r = radeon_scratch_get(rdev, &scratch);
2765 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2768 WREG32(scratch, 0xCAFEDEAD);
2769 r = radeon_ring_lock(rdev, ring, 3);
2771 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2772 radeon_scratch_free(rdev, scratch);
2775 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2776 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2777 radeon_ring_write(ring, 0xDEADBEEF);
2778 radeon_ring_unlock_commit(rdev, ring, false);
2779 for (i = 0; i < rdev->usec_timeout; i++) {
2780 tmp = RREG32(scratch);
2781 if (tmp == 0xDEADBEEF)
2785 if (i < rdev->usec_timeout) {
2786 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2788 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2789 ring->idx, scratch, tmp);
2792 radeon_scratch_free(rdev, scratch);
2797 * CP fences/semaphores
2800 void r600_fence_ring_emit(struct radeon_device *rdev,
2801 struct radeon_fence *fence)
2803 struct radeon_ring *ring = &rdev->ring[fence->ring];
2804 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2805 PACKET3_SH_ACTION_ENA;
2807 if (rdev->family >= CHIP_RV770)
2808 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2810 if (rdev->wb.use_event) {
2811 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2812 /* flush read cache over gart */
2813 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2814 radeon_ring_write(ring, cp_coher_cntl);
2815 radeon_ring_write(ring, 0xFFFFFFFF);
2816 radeon_ring_write(ring, 0);
2817 radeon_ring_write(ring, 10); /* poll interval */
2818 /* EVENT_WRITE_EOP - flush caches, send int */
2819 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2820 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2821 radeon_ring_write(ring, lower_32_bits(addr));
2822 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2823 radeon_ring_write(ring, fence->seq);
2824 radeon_ring_write(ring, 0);
2826 /* flush read cache over gart */
2827 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2828 radeon_ring_write(ring, cp_coher_cntl);
2829 radeon_ring_write(ring, 0xFFFFFFFF);
2830 radeon_ring_write(ring, 0);
2831 radeon_ring_write(ring, 10); /* poll interval */
2832 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2833 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2834 /* wait for 3D idle clean */
2835 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2836 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2837 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2838 /* Emit fence sequence & fire IRQ */
2839 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2840 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2841 radeon_ring_write(ring, fence->seq);
2842 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2843 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2844 radeon_ring_write(ring, RB_INT_STAT);
2849 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2851 * @rdev: radeon_device pointer
2852 * @ring: radeon ring buffer object
2853 * @semaphore: radeon semaphore object
2854 * @emit_wait: Is this a sempahore wait?
2856 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2857 * from running ahead of semaphore waits.
2859 bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2860 struct radeon_ring *ring,
2861 struct radeon_semaphore *semaphore,
2864 uint64_t addr = semaphore->gpu_addr;
2865 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2867 if (rdev->family < CHIP_CAYMAN)
2868 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2870 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2871 radeon_ring_write(ring, lower_32_bits(addr));
2872 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2874 /* PFP_SYNC_ME packet only exists on 7xx+ */
2875 if (emit_wait && (rdev->family >= CHIP_RV770)) {
2876 /* Prevent the PFP from running ahead of the semaphore wait */
2877 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2878 radeon_ring_write(ring, 0x0);
2885 * r600_copy_cpdma - copy pages using the CP DMA engine
2887 * @rdev: radeon_device pointer
2888 * @src_offset: src GPU address
2889 * @dst_offset: dst GPU address
2890 * @num_gpu_pages: number of GPU pages to xfer
2891 * @fence: radeon fence object
2893 * Copy GPU paging using the CP DMA engine (r6xx+).
2894 * Used by the radeon ttm implementation to move pages if
2895 * registered as the asic copy callback.
2897 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2898 uint64_t src_offset, uint64_t dst_offset,
2899 unsigned num_gpu_pages,
2900 struct reservation_object *resv)
2902 struct radeon_semaphore *sem = NULL;
2903 struct radeon_fence *fence;
2904 int ring_index = rdev->asic->copy.blit_ring_index;
2905 struct radeon_ring *ring = &rdev->ring[ring_index];
2906 u32 size_in_bytes, cur_size_in_bytes, tmp;
2910 r = radeon_semaphore_create(rdev, &sem);
2912 DRM_ERROR("radeon: moving bo (%d).\n", r);
2916 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2917 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2918 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2920 DRM_ERROR("radeon: moving bo (%d).\n", r);
2921 radeon_semaphore_free(rdev, &sem, NULL);
2925 radeon_semaphore_sync_resv(sem, resv, false);
2926 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2928 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2929 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2930 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2931 for (i = 0; i < num_loops; i++) {
2932 cur_size_in_bytes = size_in_bytes;
2933 if (cur_size_in_bytes > 0x1fffff)
2934 cur_size_in_bytes = 0x1fffff;
2935 size_in_bytes -= cur_size_in_bytes;
2936 tmp = upper_32_bits(src_offset) & 0xff;
2937 if (size_in_bytes == 0)
2938 tmp |= PACKET3_CP_DMA_CP_SYNC;
2939 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2940 radeon_ring_write(ring, lower_32_bits(src_offset));
2941 radeon_ring_write(ring, tmp);
2942 radeon_ring_write(ring, lower_32_bits(dst_offset));
2943 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2944 radeon_ring_write(ring, cur_size_in_bytes);
2945 src_offset += cur_size_in_bytes;
2946 dst_offset += cur_size_in_bytes;
2948 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2949 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2950 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2952 r = radeon_fence_emit(rdev, &fence, ring->idx);
2954 radeon_ring_unlock_undo(rdev, ring);
2955 radeon_semaphore_free(rdev, &sem, NULL);
2959 radeon_ring_unlock_commit(rdev, ring, false);
2960 radeon_semaphore_free(rdev, &sem, fence);
2965 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2966 uint32_t tiling_flags, uint32_t pitch,
2967 uint32_t offset, uint32_t obj_size)
2969 /* FIXME: implement */
2973 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2975 /* FIXME: implement */
2978 static int r600_startup(struct radeon_device *rdev)
2980 struct radeon_ring *ring;
2983 /* enable pcie gen2 link */
2984 r600_pcie_gen2_enable(rdev);
2986 /* scratch needs to be initialized before MC */
2987 r = r600_vram_scratch_init(rdev);
2991 r600_mc_program(rdev);
2993 if (rdev->flags & RADEON_IS_AGP) {
2994 r600_agp_enable(rdev);
2996 r = r600_pcie_gart_enable(rdev);
3000 r600_gpu_init(rdev);
3002 /* allocate wb buffer */
3003 r = radeon_wb_init(rdev);
3007 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3009 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3013 if (rdev->has_uvd) {
3014 r = uvd_v1_0_resume(rdev);
3016 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3018 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3022 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3026 if (!rdev->irq.installed) {
3027 r = radeon_irq_kms_init(rdev);
3032 r = r600_irq_init(rdev);
3034 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3035 radeon_irq_kms_fini(rdev);
3040 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3041 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3046 r = r600_cp_load_microcode(rdev);
3049 r = r600_cp_resume(rdev);
3053 if (rdev->has_uvd) {
3054 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3055 if (ring->ring_size) {
3056 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
3059 r = uvd_v1_0_init(rdev);
3061 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
3065 r = radeon_ib_pool_init(rdev);
3067 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3071 r = r600_audio_init(rdev);
3073 DRM_ERROR("radeon: audio init failed\n");
3080 void r600_vga_set_state(struct radeon_device *rdev, bool state)
3084 temp = RREG32(CONFIG_CNTL);
3085 if (state == false) {
3091 WREG32(CONFIG_CNTL, temp);
3094 int r600_resume(struct radeon_device *rdev)
3098 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3099 * posting will perform necessary task to bring back GPU into good
3103 atom_asic_init(rdev->mode_info.atom_context);
3105 if (rdev->pm.pm_method == PM_METHOD_DPM)
3106 radeon_pm_resume(rdev);
3108 rdev->accel_working = true;
3109 r = r600_startup(rdev);
3111 DRM_ERROR("r600 startup failed on resume\n");
3112 rdev->accel_working = false;
3119 int r600_suspend(struct radeon_device *rdev)
3121 radeon_pm_suspend(rdev);
3122 r600_audio_fini(rdev);
3124 if (rdev->has_uvd) {
3125 uvd_v1_0_fini(rdev);
3126 radeon_uvd_suspend(rdev);
3128 r600_irq_suspend(rdev);
3129 radeon_wb_disable(rdev);
3130 r600_pcie_gart_disable(rdev);
3135 /* Plan is to move initialization in that function and use
3136 * helper function so that radeon_device_init pretty much
3137 * do nothing more than calling asic specific function. This
3138 * should also allow to remove a bunch of callback function
3141 int r600_init(struct radeon_device *rdev)
3145 if (r600_debugfs_mc_info_init(rdev)) {
3146 DRM_ERROR("Failed to register debugfs file for mc !\n");
3149 if (!radeon_get_bios(rdev)) {
3150 if (ASIC_IS_AVIVO(rdev))
3153 /* Must be an ATOMBIOS */
3154 if (!rdev->is_atom_bios) {
3155 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3158 r = radeon_atombios_init(rdev);
3161 /* Post card if necessary */
3162 if (!radeon_card_posted(rdev)) {
3164 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3167 DRM_INFO("GPU not posted. posting now...\n");
3168 atom_asic_init(rdev->mode_info.atom_context);
3170 /* Initialize scratch registers */
3171 r600_scratch_init(rdev);
3172 /* Initialize surface registers */
3173 radeon_surface_init(rdev);
3174 /* Initialize clocks */
3175 radeon_get_clock_info(rdev->ddev);
3177 r = radeon_fence_driver_init(rdev);
3180 if (rdev->flags & RADEON_IS_AGP) {
3181 r = radeon_agp_init(rdev);
3183 radeon_agp_disable(rdev);
3185 r = r600_mc_init(rdev);
3188 /* Memory manager */
3189 r = radeon_bo_init(rdev);
3193 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3194 r = r600_init_microcode(rdev);
3196 DRM_ERROR("Failed to load firmware!\n");
3201 /* Initialize power management */
3202 radeon_pm_init(rdev);
3204 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3205 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3207 if (rdev->has_uvd) {
3208 r = radeon_uvd_init(rdev);
3210 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3211 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3215 rdev->ih.ring_obj = NULL;
3216 r600_ih_ring_init(rdev, 64 * 1024);
3218 r = r600_pcie_gart_init(rdev);
3222 rdev->accel_working = true;
3223 r = r600_startup(rdev);
3225 dev_err(rdev->dev, "disabling GPU acceleration\n");
3227 r600_irq_fini(rdev);
3228 radeon_wb_fini(rdev);
3229 radeon_ib_pool_fini(rdev);
3230 radeon_irq_kms_fini(rdev);
3231 r600_pcie_gart_fini(rdev);
3232 rdev->accel_working = false;
3238 void r600_fini(struct radeon_device *rdev)
3240 radeon_pm_fini(rdev);
3241 r600_audio_fini(rdev);
3243 r600_irq_fini(rdev);
3244 if (rdev->has_uvd) {
3245 uvd_v1_0_fini(rdev);
3246 radeon_uvd_fini(rdev);
3248 radeon_wb_fini(rdev);
3249 radeon_ib_pool_fini(rdev);
3250 radeon_irq_kms_fini(rdev);
3251 r600_pcie_gart_fini(rdev);
3252 r600_vram_scratch_fini(rdev);
3253 radeon_agp_fini(rdev);
3254 radeon_gem_fini(rdev);
3255 radeon_fence_driver_fini(rdev);
3256 radeon_bo_fini(rdev);
3257 radeon_atombios_fini(rdev);
3266 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3268 struct radeon_ring *ring = &rdev->ring[ib->ring];
3271 if (ring->rptr_save_reg) {
3272 next_rptr = ring->wptr + 3 + 4;
3273 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3274 radeon_ring_write(ring, ((ring->rptr_save_reg -
3275 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3276 radeon_ring_write(ring, next_rptr);
3277 } else if (rdev->wb.enabled) {
3278 next_rptr = ring->wptr + 5 + 4;
3279 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3280 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3281 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3282 radeon_ring_write(ring, next_rptr);
3283 radeon_ring_write(ring, 0);
3286 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3287 radeon_ring_write(ring,
3291 (ib->gpu_addr & 0xFFFFFFFC));
3292 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3293 radeon_ring_write(ring, ib->length_dw);
3296 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3298 struct radeon_ib ib;
3304 r = radeon_scratch_get(rdev, &scratch);
3306 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3309 WREG32(scratch, 0xCAFEDEAD);
3310 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3312 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3315 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3316 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3317 ib.ptr[2] = 0xDEADBEEF;
3319 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3321 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3324 r = radeon_fence_wait(ib.fence, false);
3326 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3329 for (i = 0; i < rdev->usec_timeout; i++) {
3330 tmp = RREG32(scratch);
3331 if (tmp == 0xDEADBEEF)
3335 if (i < rdev->usec_timeout) {
3336 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3338 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3343 radeon_ib_free(rdev, &ib);
3345 radeon_scratch_free(rdev, scratch);
3352 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3353 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3354 * writing to the ring and the GPU consuming, the GPU writes to the ring
3355 * and host consumes. As the host irq handler processes interrupts, it
3356 * increments the rptr. When the rptr catches up with the wptr, all the
3357 * current interrupts have been processed.
3360 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3364 /* Align ring size */
3365 rb_bufsz = order_base_2(ring_size / 4);
3366 ring_size = (1 << rb_bufsz) * 4;
3367 rdev->ih.ring_size = ring_size;
3368 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3372 int r600_ih_ring_alloc(struct radeon_device *rdev)
3376 /* Allocate ring buffer */
3377 if (rdev->ih.ring_obj == NULL) {
3378 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3380 RADEON_GEM_DOMAIN_GTT, 0,
3381 NULL, &rdev->ih.ring_obj);
3383 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3386 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3387 if (unlikely(r != 0))
3389 r = radeon_bo_pin(rdev->ih.ring_obj,
3390 RADEON_GEM_DOMAIN_GTT,
3391 &rdev->ih.gpu_addr);
3393 radeon_bo_unreserve(rdev->ih.ring_obj);
3394 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3397 r = radeon_bo_kmap(rdev->ih.ring_obj,
3398 (void **)&rdev->ih.ring);
3399 radeon_bo_unreserve(rdev->ih.ring_obj);
3401 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3408 void r600_ih_ring_fini(struct radeon_device *rdev)
3411 if (rdev->ih.ring_obj) {
3412 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3413 if (likely(r == 0)) {
3414 radeon_bo_kunmap(rdev->ih.ring_obj);
3415 radeon_bo_unpin(rdev->ih.ring_obj);
3416 radeon_bo_unreserve(rdev->ih.ring_obj);
3418 radeon_bo_unref(&rdev->ih.ring_obj);
3419 rdev->ih.ring = NULL;
3420 rdev->ih.ring_obj = NULL;
3424 void r600_rlc_stop(struct radeon_device *rdev)
3427 if ((rdev->family >= CHIP_RV770) &&
3428 (rdev->family <= CHIP_RV740)) {
3429 /* r7xx asics need to soft reset RLC before halting */
3430 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3431 RREG32(SRBM_SOFT_RESET);
3433 WREG32(SRBM_SOFT_RESET, 0);
3434 RREG32(SRBM_SOFT_RESET);
3437 WREG32(RLC_CNTL, 0);
3440 static void r600_rlc_start(struct radeon_device *rdev)
3442 WREG32(RLC_CNTL, RLC_ENABLE);
3445 static int r600_rlc_resume(struct radeon_device *rdev)
3448 const __be32 *fw_data;
3453 r600_rlc_stop(rdev);
3455 WREG32(RLC_HB_CNTL, 0);
3457 WREG32(RLC_HB_BASE, 0);
3458 WREG32(RLC_HB_RPTR, 0);
3459 WREG32(RLC_HB_WPTR, 0);
3460 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3461 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3462 WREG32(RLC_MC_CNTL, 0);
3463 WREG32(RLC_UCODE_CNTL, 0);
3465 fw_data = (const __be32 *)rdev->rlc_fw->data;
3466 if (rdev->family >= CHIP_RV770) {
3467 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3468 WREG32(RLC_UCODE_ADDR, i);
3469 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3472 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3473 WREG32(RLC_UCODE_ADDR, i);
3474 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3477 WREG32(RLC_UCODE_ADDR, 0);
3479 r600_rlc_start(rdev);
3484 static void r600_enable_interrupts(struct radeon_device *rdev)
3486 u32 ih_cntl = RREG32(IH_CNTL);
3487 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3489 ih_cntl |= ENABLE_INTR;
3490 ih_rb_cntl |= IH_RB_ENABLE;
3491 WREG32(IH_CNTL, ih_cntl);
3492 WREG32(IH_RB_CNTL, ih_rb_cntl);
3493 rdev->ih.enabled = true;
3496 void r600_disable_interrupts(struct radeon_device *rdev)
3498 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3499 u32 ih_cntl = RREG32(IH_CNTL);
3501 ih_rb_cntl &= ~IH_RB_ENABLE;
3502 ih_cntl &= ~ENABLE_INTR;
3503 WREG32(IH_RB_CNTL, ih_rb_cntl);
3504 WREG32(IH_CNTL, ih_cntl);
3505 /* set rptr, wptr to 0 */
3506 WREG32(IH_RB_RPTR, 0);
3507 WREG32(IH_RB_WPTR, 0);
3508 rdev->ih.enabled = false;
3512 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3516 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3517 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3518 WREG32(DMA_CNTL, tmp);
3519 WREG32(GRBM_INT_CNTL, 0);
3520 WREG32(DxMODE_INT_MASK, 0);
3521 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3522 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3523 if (ASIC_IS_DCE3(rdev)) {
3524 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3525 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3526 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3527 WREG32(DC_HPD1_INT_CONTROL, tmp);
3528 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3529 WREG32(DC_HPD2_INT_CONTROL, tmp);
3530 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3531 WREG32(DC_HPD3_INT_CONTROL, tmp);
3532 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3533 WREG32(DC_HPD4_INT_CONTROL, tmp);
3534 if (ASIC_IS_DCE32(rdev)) {
3535 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3536 WREG32(DC_HPD5_INT_CONTROL, tmp);
3537 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3538 WREG32(DC_HPD6_INT_CONTROL, tmp);
3539 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3540 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3541 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3542 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3544 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3545 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3546 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3547 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3550 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3551 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3552 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3553 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3554 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3555 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3556 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3557 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3558 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3559 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3560 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3561 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3565 int r600_irq_init(struct radeon_device *rdev)
3569 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3572 ret = r600_ih_ring_alloc(rdev);
3577 r600_disable_interrupts(rdev);
3580 if (rdev->family >= CHIP_CEDAR)
3581 ret = evergreen_rlc_resume(rdev);
3583 ret = r600_rlc_resume(rdev);
3585 r600_ih_ring_fini(rdev);
3589 /* setup interrupt control */
3590 /* set dummy read address to ring address */
3591 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3592 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3593 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3594 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3596 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3597 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3598 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3599 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3601 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3602 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3604 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3605 IH_WPTR_OVERFLOW_CLEAR |
3608 if (rdev->wb.enabled)
3609 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3611 /* set the writeback address whether it's enabled or not */
3612 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3613 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3615 WREG32(IH_RB_CNTL, ih_rb_cntl);
3617 /* set rptr, wptr to 0 */
3618 WREG32(IH_RB_RPTR, 0);
3619 WREG32(IH_RB_WPTR, 0);
3621 /* Default settings for IH_CNTL (disabled at first) */
3622 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3623 /* RPTR_REARM only works if msi's are enabled */
3624 if (rdev->msi_enabled)
3625 ih_cntl |= RPTR_REARM;
3626 WREG32(IH_CNTL, ih_cntl);
3628 /* force the active interrupt state to all disabled */
3629 if (rdev->family >= CHIP_CEDAR)
3630 evergreen_disable_interrupt_state(rdev);
3632 r600_disable_interrupt_state(rdev);
3634 /* at this point everything should be setup correctly to enable master */
3635 pci_set_master(rdev->pdev);
3638 r600_enable_interrupts(rdev);
3643 void r600_irq_suspend(struct radeon_device *rdev)
3645 r600_irq_disable(rdev);
3646 r600_rlc_stop(rdev);
3649 void r600_irq_fini(struct radeon_device *rdev)
3651 r600_irq_suspend(rdev);
3652 r600_ih_ring_fini(rdev);
3655 int r600_irq_set(struct radeon_device *rdev)
3657 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3659 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3660 u32 grbm_int_cntl = 0;
3663 u32 thermal_int = 0;
3665 if (!rdev->irq.installed) {
3666 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3669 /* don't enable anything if the ih is disabled */
3670 if (!rdev->ih.enabled) {
3671 r600_disable_interrupts(rdev);
3672 /* force the active interrupt state to all disabled */
3673 r600_disable_interrupt_state(rdev);
3677 if (ASIC_IS_DCE3(rdev)) {
3678 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3679 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3680 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3681 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3682 if (ASIC_IS_DCE32(rdev)) {
3683 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3684 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3685 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3686 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3688 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3689 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3692 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3693 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3694 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3695 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3696 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3699 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3701 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3702 thermal_int = RREG32(CG_THERMAL_INT) &
3703 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3704 } else if (rdev->family >= CHIP_RV770) {
3705 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3706 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3708 if (rdev->irq.dpm_thermal) {
3709 DRM_DEBUG("dpm thermal\n");
3710 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3713 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3714 DRM_DEBUG("r600_irq_set: sw int\n");
3715 cp_int_cntl |= RB_INT_ENABLE;
3716 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3719 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3720 DRM_DEBUG("r600_irq_set: sw int dma\n");
3721 dma_cntl |= TRAP_ENABLE;
3724 if (rdev->irq.crtc_vblank_int[0] ||
3725 atomic_read(&rdev->irq.pflip[0])) {
3726 DRM_DEBUG("r600_irq_set: vblank 0\n");
3727 mode_int |= D1MODE_VBLANK_INT_MASK;
3729 if (rdev->irq.crtc_vblank_int[1] ||
3730 atomic_read(&rdev->irq.pflip[1])) {
3731 DRM_DEBUG("r600_irq_set: vblank 1\n");
3732 mode_int |= D2MODE_VBLANK_INT_MASK;
3734 if (rdev->irq.hpd[0]) {
3735 DRM_DEBUG("r600_irq_set: hpd 1\n");
3736 hpd1 |= DC_HPDx_INT_EN;
3738 if (rdev->irq.hpd[1]) {
3739 DRM_DEBUG("r600_irq_set: hpd 2\n");
3740 hpd2 |= DC_HPDx_INT_EN;
3742 if (rdev->irq.hpd[2]) {
3743 DRM_DEBUG("r600_irq_set: hpd 3\n");
3744 hpd3 |= DC_HPDx_INT_EN;
3746 if (rdev->irq.hpd[3]) {
3747 DRM_DEBUG("r600_irq_set: hpd 4\n");
3748 hpd4 |= DC_HPDx_INT_EN;
3750 if (rdev->irq.hpd[4]) {
3751 DRM_DEBUG("r600_irq_set: hpd 5\n");
3752 hpd5 |= DC_HPDx_INT_EN;
3754 if (rdev->irq.hpd[5]) {
3755 DRM_DEBUG("r600_irq_set: hpd 6\n");
3756 hpd6 |= DC_HPDx_INT_EN;
3758 if (rdev->irq.afmt[0]) {
3759 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3760 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3762 if (rdev->irq.afmt[1]) {
3763 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3764 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3767 WREG32(CP_INT_CNTL, cp_int_cntl);
3768 WREG32(DMA_CNTL, dma_cntl);
3769 WREG32(DxMODE_INT_MASK, mode_int);
3770 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3771 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3772 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3773 if (ASIC_IS_DCE3(rdev)) {
3774 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3775 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3776 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3777 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3778 if (ASIC_IS_DCE32(rdev)) {
3779 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3780 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3781 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3782 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3784 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3785 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3788 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3789 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3790 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3791 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3792 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3794 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3795 WREG32(CG_THERMAL_INT, thermal_int);
3796 } else if (rdev->family >= CHIP_RV770) {
3797 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3803 static void r600_irq_ack(struct radeon_device *rdev)
3807 if (ASIC_IS_DCE3(rdev)) {
3808 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3809 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3810 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3811 if (ASIC_IS_DCE32(rdev)) {
3812 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3813 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3815 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3816 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3819 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3820 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3821 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3822 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3823 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3825 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3826 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3828 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3829 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3830 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3831 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3832 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3833 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3834 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3835 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3836 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3837 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3838 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3839 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3840 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3841 if (ASIC_IS_DCE3(rdev)) {
3842 tmp = RREG32(DC_HPD1_INT_CONTROL);
3843 tmp |= DC_HPDx_INT_ACK;
3844 WREG32(DC_HPD1_INT_CONTROL, tmp);
3846 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3847 tmp |= DC_HPDx_INT_ACK;
3848 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3851 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3852 if (ASIC_IS_DCE3(rdev)) {
3853 tmp = RREG32(DC_HPD2_INT_CONTROL);
3854 tmp |= DC_HPDx_INT_ACK;
3855 WREG32(DC_HPD2_INT_CONTROL, tmp);
3857 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3858 tmp |= DC_HPDx_INT_ACK;
3859 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3862 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3863 if (ASIC_IS_DCE3(rdev)) {
3864 tmp = RREG32(DC_HPD3_INT_CONTROL);
3865 tmp |= DC_HPDx_INT_ACK;
3866 WREG32(DC_HPD3_INT_CONTROL, tmp);
3868 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3869 tmp |= DC_HPDx_INT_ACK;
3870 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3873 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3874 tmp = RREG32(DC_HPD4_INT_CONTROL);
3875 tmp |= DC_HPDx_INT_ACK;
3876 WREG32(DC_HPD4_INT_CONTROL, tmp);
3878 if (ASIC_IS_DCE32(rdev)) {
3879 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3880 tmp = RREG32(DC_HPD5_INT_CONTROL);
3881 tmp |= DC_HPDx_INT_ACK;
3882 WREG32(DC_HPD5_INT_CONTROL, tmp);
3884 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3885 tmp = RREG32(DC_HPD5_INT_CONTROL);
3886 tmp |= DC_HPDx_INT_ACK;
3887 WREG32(DC_HPD6_INT_CONTROL, tmp);
3889 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3890 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3891 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3892 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3894 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3895 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3896 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3897 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3900 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3901 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3902 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3903 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3905 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3906 if (ASIC_IS_DCE3(rdev)) {
3907 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3908 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3909 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3911 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3912 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3913 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3919 void r600_irq_disable(struct radeon_device *rdev)
3921 r600_disable_interrupts(rdev);
3922 /* Wait and acknowledge irq */
3925 r600_disable_interrupt_state(rdev);
3928 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3932 if (rdev->wb.enabled)
3933 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3935 wptr = RREG32(IH_RB_WPTR);
3937 if (wptr & RB_OVERFLOW) {
3938 /* When a ring buffer overflow happen start parsing interrupt
3939 * from the last not overwritten vector (wptr + 16). Hopefully
3940 * this should allow us to catchup.
3942 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3943 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3944 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3945 tmp = RREG32(IH_RB_CNTL);
3946 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3947 WREG32(IH_RB_CNTL, tmp);
3948 wptr &= ~RB_OVERFLOW;
3950 return (wptr & rdev->ih.ptr_mask);
3954 * Each IV ring entry is 128 bits:
3955 * [7:0] - interrupt source id
3957 * [59:32] - interrupt source data
3958 * [127:60] - reserved
3960 * The basic interrupt vector entries
3961 * are decoded as follows:
3962 * src_id src_data description
3967 * 19 0 FP Hot plug detection A
3968 * 19 1 FP Hot plug detection B
3969 * 19 2 DAC A auto-detection
3970 * 19 3 DAC B auto-detection
3976 * 181 - EOP Interrupt
3979 * Note, these are based on r600 and may need to be
3980 * adjusted or added to on newer asics
3983 int r600_irq_process(struct radeon_device *rdev)
3987 u32 src_id, src_data;
3989 bool queue_hotplug = false;
3990 bool queue_hdmi = false;
3991 bool queue_thermal = false;
3993 if (!rdev->ih.enabled || rdev->shutdown)
3996 /* No MSIs, need a dummy read to flush PCI DMAs */
3997 if (!rdev->msi_enabled)
4000 wptr = r600_get_ih_wptr(rdev);
4003 /* is somebody else already processing irqs? */
4004 if (atomic_xchg(&rdev->ih.lock, 1))
4007 rptr = rdev->ih.rptr;
4008 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4010 /* Order reading of wptr vs. reading of IH ring data */
4013 /* display interrupts */
4016 while (rptr != wptr) {
4017 /* wptr/rptr are in bytes! */
4018 ring_index = rptr / 4;
4019 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4020 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4023 case 1: /* D1 vblank/vline */
4025 case 0: /* D1 vblank */
4026 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
4027 if (rdev->irq.crtc_vblank_int[0]) {
4028 drm_handle_vblank(rdev->ddev, 0);
4029 rdev->pm.vblank_sync = true;
4030 wake_up(&rdev->irq.vblank_queue);
4032 if (atomic_read(&rdev->irq.pflip[0]))
4033 radeon_crtc_handle_vblank(rdev, 0);
4034 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4035 DRM_DEBUG("IH: D1 vblank\n");
4038 case 1: /* D1 vline */
4039 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
4040 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4041 DRM_DEBUG("IH: D1 vline\n");
4045 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4049 case 5: /* D2 vblank/vline */
4051 case 0: /* D2 vblank */
4052 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
4053 if (rdev->irq.crtc_vblank_int[1]) {
4054 drm_handle_vblank(rdev->ddev, 1);
4055 rdev->pm.vblank_sync = true;
4056 wake_up(&rdev->irq.vblank_queue);
4058 if (atomic_read(&rdev->irq.pflip[1]))
4059 radeon_crtc_handle_vblank(rdev, 1);
4060 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4061 DRM_DEBUG("IH: D2 vblank\n");
4064 case 1: /* D1 vline */
4065 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
4066 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4067 DRM_DEBUG("IH: D2 vline\n");
4071 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4075 case 9: /* D1 pflip */
4076 DRM_DEBUG("IH: D1 flip\n");
4077 if (radeon_use_pflipirq > 0)
4078 radeon_crtc_handle_flip(rdev, 0);
4080 case 11: /* D2 pflip */
4081 DRM_DEBUG("IH: D2 flip\n");
4082 if (radeon_use_pflipirq > 0)
4083 radeon_crtc_handle_flip(rdev, 1);
4085 case 19: /* HPD/DAC hotplug */
4088 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
4089 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4090 queue_hotplug = true;
4091 DRM_DEBUG("IH: HPD1\n");
4095 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
4096 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4097 queue_hotplug = true;
4098 DRM_DEBUG("IH: HPD2\n");
4102 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
4103 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4104 queue_hotplug = true;
4105 DRM_DEBUG("IH: HPD3\n");
4109 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
4110 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4111 queue_hotplug = true;
4112 DRM_DEBUG("IH: HPD4\n");
4116 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
4117 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4118 queue_hotplug = true;
4119 DRM_DEBUG("IH: HPD5\n");
4123 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
4124 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4125 queue_hotplug = true;
4126 DRM_DEBUG("IH: HPD6\n");
4130 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4137 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4138 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4140 DRM_DEBUG("IH: HDMI0\n");
4144 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4145 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4147 DRM_DEBUG("IH: HDMI1\n");
4151 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4156 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4157 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4159 case 176: /* CP_INT in ring buffer */
4160 case 177: /* CP_INT in IB1 */
4161 case 178: /* CP_INT in IB2 */
4162 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4163 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4165 case 181: /* CP EOP event */
4166 DRM_DEBUG("IH: CP EOP\n");
4167 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4169 case 224: /* DMA trap event */
4170 DRM_DEBUG("IH: DMA trap\n");
4171 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4173 case 230: /* thermal low to high */
4174 DRM_DEBUG("IH: thermal low to high\n");
4175 rdev->pm.dpm.thermal.high_to_low = false;
4176 queue_thermal = true;
4178 case 231: /* thermal high to low */
4179 DRM_DEBUG("IH: thermal high to low\n");
4180 rdev->pm.dpm.thermal.high_to_low = true;
4181 queue_thermal = true;
4183 case 233: /* GUI IDLE */
4184 DRM_DEBUG("IH: GUI idle\n");
4187 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4191 /* wptr/rptr are in bytes! */
4193 rptr &= rdev->ih.ptr_mask;
4196 schedule_work(&rdev->hotplug_work);
4198 schedule_work(&rdev->audio_work);
4199 if (queue_thermal && rdev->pm.dpm_enabled)
4200 schedule_work(&rdev->pm.dpm.thermal.work);
4201 rdev->ih.rptr = rptr;
4202 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4203 atomic_set(&rdev->ih.lock, 0);
4205 /* make sure wptr hasn't changed while processing */
4206 wptr = r600_get_ih_wptr(rdev);
4216 #if defined(CONFIG_DEBUG_FS)
4218 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4220 struct drm_info_node *node = (struct drm_info_node *) m->private;
4221 struct drm_device *dev = node->minor->dev;
4222 struct radeon_device *rdev = dev->dev_private;
4224 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4225 DREG32_SYS(m, rdev, VM_L2_STATUS);
4229 static struct drm_info_list r600_mc_info_list[] = {
4230 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4234 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4236 #if defined(CONFIG_DEBUG_FS)
4237 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4244 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4245 * rdev: radeon device structure
4247 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4248 * through the ring buffer. This leads to corruption in rendering, see
4249 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4250 * directly perform the HDP flush by writing the register through MMIO.
4252 void r600_mmio_hdp_flush(struct radeon_device *rdev)
4254 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4255 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4256 * This seems to cause problems on some AGP cards. Just use the old
4259 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4260 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4261 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4264 WREG32(HDP_DEBUG1, 0);
4265 tmp = readl((void __iomem *)ptr);
4267 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4270 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4272 u32 link_width_cntl, mask;
4274 if (rdev->flags & RADEON_IS_IGP)
4277 if (!(rdev->flags & RADEON_IS_PCIE))
4280 /* x2 cards have a special sequence */
4281 if (ASIC_IS_X2(rdev))
4284 radeon_gui_idle(rdev);
4288 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4291 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4294 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4297 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4300 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4303 /* not actually supported */
4304 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4307 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4310 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4314 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4315 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4316 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4317 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4318 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4320 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4323 int r600_get_pcie_lanes(struct radeon_device *rdev)
4325 u32 link_width_cntl;
4327 if (rdev->flags & RADEON_IS_IGP)
4330 if (!(rdev->flags & RADEON_IS_PCIE))
4333 /* x2 cards have a special sequence */
4334 if (ASIC_IS_X2(rdev))
4337 radeon_gui_idle(rdev);
4339 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4341 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4342 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4344 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4346 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4348 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4350 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4351 /* not actually supported */
4353 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4354 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4360 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4362 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4365 if (radeon_pcie_gen2 == 0)
4368 if (rdev->flags & RADEON_IS_IGP)
4371 if (!(rdev->flags & RADEON_IS_PCIE))
4374 /* x2 cards have a special sequence */
4375 if (ASIC_IS_X2(rdev))
4378 /* only RV6xx+ chips are supported */
4379 if (rdev->family <= CHIP_R600)
4382 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4383 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4386 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4387 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4388 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4392 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4394 /* 55 nm r6xx asics */
4395 if ((rdev->family == CHIP_RV670) ||
4396 (rdev->family == CHIP_RV620) ||
4397 (rdev->family == CHIP_RV635)) {
4398 /* advertise upconfig capability */
4399 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4400 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4401 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4402 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4403 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4404 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4405 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4406 LC_RECONFIG_ARC_MISSING_ESCAPE);
4407 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4408 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4410 link_width_cntl |= LC_UPCONFIGURE_DIS;
4411 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4415 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4416 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4417 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4419 /* 55 nm r6xx asics */
4420 if ((rdev->family == CHIP_RV670) ||
4421 (rdev->family == CHIP_RV620) ||
4422 (rdev->family == CHIP_RV635)) {
4423 WREG32(MM_CFGREGS_CNTL, 0x8);
4424 link_cntl2 = RREG32(0x4088);
4425 WREG32(MM_CFGREGS_CNTL, 0);
4426 /* not supported yet */
4427 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4431 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4432 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4433 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4434 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4435 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4436 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4438 tmp = RREG32(0x541c);
4439 WREG32(0x541c, tmp | 0x8);
4440 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4441 link_cntl2 = RREG16(0x4088);
4442 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4444 WREG16(0x4088, link_cntl2);
4445 WREG32(MM_CFGREGS_CNTL, 0);
4447 if ((rdev->family == CHIP_RV670) ||
4448 (rdev->family == CHIP_RV620) ||
4449 (rdev->family == CHIP_RV635)) {
4450 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4451 training_cntl &= ~LC_POINT_7_PLUS_EN;
4452 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4454 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4455 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4456 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4459 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4460 speed_cntl |= LC_GEN2_EN_STRAP;
4461 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4464 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4465 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4467 link_width_cntl |= LC_UPCONFIGURE_DIS;
4469 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4470 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4475 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4477 * @rdev: radeon_device pointer
4479 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4480 * Returns the 64 bit clock counter snapshot.
4482 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4486 mutex_lock(&rdev->gpu_clock_mutex);
4487 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4488 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4489 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4490 mutex_unlock(&rdev->gpu_clock_mutex);