2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
31 const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
50 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
69 void r600_dpm_print_class_info(u32 class, u32 class2)
71 printk("\tui class: ");
72 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
73 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
77 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
80 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
83 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
84 printk("performance\n");
87 printk("\tinternal class: ");
88 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
92 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
94 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
96 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
97 printk("limited_pwr ");
98 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
100 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
102 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
104 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
106 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
108 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
110 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
112 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
114 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
116 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
118 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
119 printk("limited_pwr2 ");
120 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
122 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
128 void r600_dpm_print_cap_info(u32 caps)
131 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
132 printk("single_disp ");
133 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
135 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
140 void r600_dpm_print_ps_status(struct radeon_device *rdev,
141 struct radeon_ps *rps)
143 printk("\tstatus: ");
144 if (rps == rdev->pm.dpm.current_ps)
146 if (rps == rdev->pm.dpm.requested_ps)
148 if (rps == rdev->pm.dpm.boot_ps)
153 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
155 struct drm_device *dev = rdev->ddev;
156 struct drm_crtc *crtc;
157 struct radeon_crtc *radeon_crtc;
158 u32 line_time_us, vblank_lines;
159 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
161 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
162 radeon_crtc = to_radeon_crtc(crtc);
163 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
164 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
165 radeon_crtc->hw_mode.clock;
166 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
167 radeon_crtc->hw_mode.crtc_vdisplay +
168 (radeon_crtc->v_border * 2);
169 vblank_time_us = vblank_lines * line_time_us;
174 return vblank_time_us;
177 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
179 struct drm_device *dev = rdev->ddev;
180 struct drm_crtc *crtc;
181 struct radeon_crtc *radeon_crtc;
184 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
185 radeon_crtc = to_radeon_crtc(crtc);
186 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
187 vrefresh = radeon_crtc->hw_mode.vrefresh;
195 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
202 i_c = (i * r_c) / 100;
211 *p = i_c / (1 << (2 * (*u)));
214 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
219 if ((fl == 0) || (fh == 0) || (fl > fh))
223 t1 = (t * (k - 100));
224 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
226 ah = ((a * t) + 5000) / 10000;
235 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
240 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
242 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
244 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
246 for (i = 0; i < rdev->usec_timeout; i++) {
247 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
252 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
254 WREG32(GRBM_PWR_CNTL, 0x1);
255 RREG32(GRBM_PWR_CNTL);
259 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
262 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
264 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
267 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
270 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
272 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
275 void r600_enable_acpi_pm(struct radeon_device *rdev)
277 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
280 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
283 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
285 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
288 bool r600_dynamicpm_enabled(struct radeon_device *rdev)
290 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
296 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
299 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
301 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
304 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
307 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
309 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
312 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
315 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
317 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
320 void r600_wait_for_spll_change(struct radeon_device *rdev)
324 for (i = 0; i < rdev->usec_timeout; i++) {
325 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
331 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
333 WREG32(CG_BSP, BSP(p) | BSU(u));
336 void r600_set_at(struct radeon_device *rdev,
337 u32 l_to_m, u32 m_to_h,
338 u32 h_to_m, u32 m_to_l)
340 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
341 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
344 void r600_set_tc(struct radeon_device *rdev,
345 u32 index, u32 u_t, u32 d_t)
347 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
350 void r600_select_td(struct radeon_device *rdev,
353 if (td == R600_TD_AUTO)
354 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
356 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
357 if (td == R600_TD_UP)
358 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
359 if (td == R600_TD_DOWN)
360 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
363 void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
368 void r600_set_tpu(struct radeon_device *rdev, u32 u)
370 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
373 void r600_set_tpc(struct radeon_device *rdev, u32 c)
375 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
378 void r600_set_sstu(struct radeon_device *rdev, u32 u)
380 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
383 void r600_set_sst(struct radeon_device *rdev, u32 t)
385 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
388 void r600_set_git(struct radeon_device *rdev, u32 t)
390 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
393 void r600_set_fctu(struct radeon_device *rdev, u32 u)
395 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
398 void r600_set_fct(struct radeon_device *rdev, u32 t)
400 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
403 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
405 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
408 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
410 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
413 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
415 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
418 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
420 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
423 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
425 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
428 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
430 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
433 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
435 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
438 void r600_engine_clock_entry_enable(struct radeon_device *rdev,
439 u32 index, bool enable)
442 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
443 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
445 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
446 0, ~STEP_0_SPLL_ENTRY_VALID);
449 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
450 u32 index, bool enable)
453 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
454 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
456 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
457 0, ~STEP_0_SPLL_STEP_ENABLE);
460 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
461 u32 index, bool enable)
464 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
465 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
467 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
468 0, ~STEP_0_POST_DIV_EN);
471 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
472 u32 index, u32 divider)
474 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
475 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
478 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
479 u32 index, u32 divider)
481 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
482 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
485 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
486 u32 index, u32 divider)
488 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
489 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
492 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
493 u32 index, u32 step_time)
495 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
496 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
499 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
501 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
504 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
506 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
509 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
511 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
514 void r600_voltage_control_enable_pins(struct radeon_device *rdev,
517 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
518 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
522 void r600_voltage_control_program_voltages(struct radeon_device *rdev,
523 enum r600_power_level index, u64 pins)
526 u32 ix = 3 - (3 & index);
528 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
530 mask = 7 << (3 * ix);
531 tmp = RREG32(VID_UPPER_GPIO_CNTL);
532 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
533 WREG32(VID_UPPER_GPIO_CNTL, tmp);
536 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
541 gpio = RREG32(GPIOPAD_MASK);
543 WREG32(GPIOPAD_MASK, gpio);
545 gpio = RREG32(GPIOPAD_EN);
547 WREG32(GPIOPAD_EN, gpio);
549 gpio = RREG32(GPIOPAD_A);
551 WREG32(GPIOPAD_A, gpio);
554 void r600_power_level_enable(struct radeon_device *rdev,
555 enum r600_power_level index, bool enable)
557 u32 ix = 3 - (3 & index);
560 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
561 ~CTXSW_FREQ_STATE_ENABLE);
563 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
564 ~CTXSW_FREQ_STATE_ENABLE);
567 void r600_power_level_set_voltage_index(struct radeon_device *rdev,
568 enum r600_power_level index, u32 voltage_index)
570 u32 ix = 3 - (3 & index);
572 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
573 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
576 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
577 enum r600_power_level index, u32 mem_clock_index)
579 u32 ix = 3 - (3 & index);
581 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
582 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
585 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
586 enum r600_power_level index, u32 eng_clock_index)
588 u32 ix = 3 - (3 & index);
590 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
591 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
594 void r600_power_level_set_watermark_id(struct radeon_device *rdev,
595 enum r600_power_level index,
596 enum r600_display_watermark watermark_id)
598 u32 ix = 3 - (3 & index);
601 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
602 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
603 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
606 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
607 enum r600_power_level index, bool compatible)
609 u32 ix = 3 - (3 & index);
613 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
614 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
617 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
621 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
622 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
626 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
630 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
631 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
635 void r600_power_level_set_enter_index(struct radeon_device *rdev,
636 enum r600_power_level index)
638 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
639 ~DYN_PWR_ENTER_INDEX_MASK);
642 void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
643 enum r600_power_level index)
647 for (i = 0; i < rdev->usec_timeout; i++) {
648 if (r600_power_level_get_target_index(rdev) != index)
653 for (i = 0; i < rdev->usec_timeout; i++) {
654 if (r600_power_level_get_current_index(rdev) != index)
660 void r600_wait_for_power_level(struct radeon_device *rdev,
661 enum r600_power_level index)
665 for (i = 0; i < rdev->usec_timeout; i++) {
666 if (r600_power_level_get_target_index(rdev) == index)
671 for (i = 0; i < rdev->usec_timeout; i++) {
672 if (r600_power_level_get_current_index(rdev) == index)
678 void r600_start_dpm(struct radeon_device *rdev)
680 r600_enable_sclk_control(rdev, false);
681 r600_enable_mclk_control(rdev, false);
683 r600_dynamicpm_enable(rdev, true);
685 radeon_wait_for_vblank(rdev, 0);
686 radeon_wait_for_vblank(rdev, 1);
688 r600_enable_spll_bypass(rdev, true);
689 r600_wait_for_spll_change(rdev);
690 r600_enable_spll_bypass(rdev, false);
691 r600_wait_for_spll_change(rdev);
693 r600_enable_spll_bypass(rdev, true);
694 r600_wait_for_spll_change(rdev);
695 r600_enable_spll_bypass(rdev, false);
696 r600_wait_for_spll_change(rdev);
698 r600_enable_sclk_control(rdev, true);
699 r600_enable_mclk_control(rdev, true);
702 void r600_stop_dpm(struct radeon_device *rdev)
704 r600_dynamicpm_enable(rdev, false);
707 int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
712 void r600_dpm_post_set_power_state(struct radeon_device *rdev)
717 bool r600_is_uvd_state(u32 class, u32 class2)
719 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
721 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
723 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
725 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
727 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
732 static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
733 int min_temp, int max_temp)
735 int low_temp = 0 * 1000;
736 int high_temp = 255 * 1000;
738 if (low_temp < min_temp)
740 if (high_temp > max_temp)
741 high_temp = max_temp;
742 if (high_temp < low_temp) {
743 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
747 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
748 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
749 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
751 rdev->pm.dpm.thermal.min_temp = low_temp;
752 rdev->pm.dpm.thermal.max_temp = high_temp;
757 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
760 case THERMAL_TYPE_RV6XX:
761 case THERMAL_TYPE_RV770:
762 case THERMAL_TYPE_EVERGREEN:
763 case THERMAL_TYPE_SUMO:
764 case THERMAL_TYPE_NI:
765 case THERMAL_TYPE_SI:
766 case THERMAL_TYPE_CI:
767 case THERMAL_TYPE_KV:
769 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
770 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
771 return false; /* need special handling */
772 case THERMAL_TYPE_NONE:
773 case THERMAL_TYPE_EXTERNAL:
774 case THERMAL_TYPE_EXTERNAL_GPIO:
780 int r600_dpm_late_enable(struct radeon_device *rdev)
784 if (rdev->irq.installed &&
785 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
786 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
789 rdev->irq.dpm_thermal = true;
790 radeon_irq_set(rdev);
797 struct _ATOM_POWERPLAY_INFO info;
798 struct _ATOM_POWERPLAY_INFO_V2 info_2;
799 struct _ATOM_POWERPLAY_INFO_V3 info_3;
800 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
801 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
802 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
803 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
804 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
808 struct _ATOM_PPLIB_FANTABLE fan;
809 struct _ATOM_PPLIB_FANTABLE2 fan2;
812 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
813 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
815 u32 size = atom_table->ucNumEntries *
816 sizeof(struct radeon_clock_voltage_dependency_entry);
818 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
820 radeon_table->entries = kzalloc(size, GFP_KERNEL);
821 if (!radeon_table->entries)
824 entry = &atom_table->entries[0];
825 for (i = 0; i < atom_table->ucNumEntries; i++) {
826 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
827 (entry->ucClockHigh << 16);
828 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
829 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
830 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
832 radeon_table->count = atom_table->ucNumEntries;
837 int r600_get_platform_caps(struct radeon_device *rdev)
839 struct radeon_mode_info *mode_info = &rdev->mode_info;
840 union power_info *power_info;
841 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
845 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
846 &frev, &crev, &data_offset))
848 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
850 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
851 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
852 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
857 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
858 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
859 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
860 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
861 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
862 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
863 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
865 int r600_parse_extended_power_table(struct radeon_device *rdev)
867 struct radeon_mode_info *mode_info = &rdev->mode_info;
868 union power_info *power_info;
869 union fan_info *fan_info;
870 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
871 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
876 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
877 &frev, &crev, &data_offset))
879 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
882 if (le16_to_cpu(power_info->pplib.usTableSize) >=
883 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
884 if (power_info->pplib3.usFanTableOffset) {
885 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
886 le16_to_cpu(power_info->pplib3.usFanTableOffset));
887 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
888 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
889 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
890 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
891 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
892 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
893 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
894 if (fan_info->fan.ucFanTableFormat >= 2)
895 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
897 rdev->pm.dpm.fan.t_max = 10900;
898 rdev->pm.dpm.fan.cycle_delay = 100000;
899 rdev->pm.dpm.fan.ucode_fan_control = true;
903 /* clock dependancy tables, shedding tables */
904 if (le16_to_cpu(power_info->pplib.usTableSize) >=
905 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
906 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
907 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
908 (mode_info->atom_context->bios + data_offset +
909 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
910 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
915 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
916 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
917 (mode_info->atom_context->bios + data_offset +
918 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
919 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
922 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
926 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
927 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
928 (mode_info->atom_context->bios + data_offset +
929 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
930 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
933 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
934 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
938 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
939 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
940 (mode_info->atom_context->bios + data_offset +
941 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
942 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
945 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
946 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
947 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
951 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
952 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
953 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
954 (mode_info->atom_context->bios + data_offset +
955 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
956 if (clk_v->ucNumEntries) {
957 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
958 le16_to_cpu(clk_v->entries[0].usSclkLow) |
959 (clk_v->entries[0].ucSclkHigh << 16);
960 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
961 le16_to_cpu(clk_v->entries[0].usMclkLow) |
962 (clk_v->entries[0].ucMclkHigh << 16);
963 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
964 le16_to_cpu(clk_v->entries[0].usVddc);
965 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
966 le16_to_cpu(clk_v->entries[0].usVddci);
969 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
970 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
971 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
972 (mode_info->atom_context->bios + data_offset +
973 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
974 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
976 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
977 kzalloc(psl->ucNumEntries *
978 sizeof(struct radeon_phase_shedding_limits_entry),
980 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
981 r600_free_extended_power_table(rdev);
985 entry = &psl->entries[0];
986 for (i = 0; i < psl->ucNumEntries; i++) {
987 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
988 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
989 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
990 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
991 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
992 le16_to_cpu(entry->usVoltage);
993 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
994 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
996 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1002 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1003 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1004 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1005 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1006 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1007 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1008 if (rdev->pm.dpm.tdp_od_limit)
1009 rdev->pm.dpm.power_control = true;
1011 rdev->pm.dpm.power_control = false;
1012 rdev->pm.dpm.tdp_adjustment = 0;
1013 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1014 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1015 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1016 if (power_info->pplib5.usCACLeakageTableOffset) {
1017 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1018 (ATOM_PPLIB_CAC_Leakage_Table *)
1019 (mode_info->atom_context->bios + data_offset +
1020 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1021 ATOM_PPLIB_CAC_Leakage_Record *entry;
1022 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1023 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1024 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1025 r600_free_extended_power_table(rdev);
1028 entry = &cac_table->entries[0];
1029 for (i = 0; i < cac_table->ucNumEntries; i++) {
1030 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1031 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1032 le16_to_cpu(entry->usVddc1);
1033 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1034 le16_to_cpu(entry->usVddc2);
1035 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1036 le16_to_cpu(entry->usVddc3);
1038 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1039 le16_to_cpu(entry->usVddc);
1040 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1041 le32_to_cpu(entry->ulLeakageValue);
1043 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1044 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1046 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1051 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1052 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1053 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1054 (mode_info->atom_context->bios + data_offset +
1055 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1056 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1057 ext_hdr->usVCETableOffset) {
1058 VCEClockInfoArray *array = (VCEClockInfoArray *)
1059 (mode_info->atom_context->bios + data_offset +
1060 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1061 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1062 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1063 (mode_info->atom_context->bios + data_offset +
1064 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1065 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1066 ATOM_PPLIB_VCE_State_Table *states =
1067 (ATOM_PPLIB_VCE_State_Table *)
1068 (mode_info->atom_context->bios + data_offset +
1069 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1070 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1071 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1072 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1073 ATOM_PPLIB_VCE_State_Record *state_entry;
1074 VCEClockInfo *vce_clk;
1075 u32 size = limits->numEntries *
1076 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1077 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1078 kzalloc(size, GFP_KERNEL);
1079 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1080 r600_free_extended_power_table(rdev);
1083 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1085 entry = &limits->entries[0];
1086 state_entry = &states->entries[0];
1087 for (i = 0; i < limits->numEntries; i++) {
1088 vce_clk = (VCEClockInfo *)
1089 ((u8 *)&array->entries[0] +
1090 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1091 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1092 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1093 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1094 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1095 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1096 le16_to_cpu(entry->usVoltage);
1097 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1098 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1100 for (i = 0; i < states->numEntries; i++) {
1101 if (i >= RADEON_MAX_VCE_LEVELS)
1103 vce_clk = (VCEClockInfo *)
1104 ((u8 *)&array->entries[0] +
1105 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1106 rdev->pm.dpm.vce_states[i].evclk =
1107 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1108 rdev->pm.dpm.vce_states[i].ecclk =
1109 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1110 rdev->pm.dpm.vce_states[i].clk_idx =
1111 state_entry->ucClockInfoIndex & 0x3f;
1112 rdev->pm.dpm.vce_states[i].pstate =
1113 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
1114 state_entry = (ATOM_PPLIB_VCE_State_Record *)
1115 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1118 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1119 ext_hdr->usUVDTableOffset) {
1120 UVDClockInfoArray *array = (UVDClockInfoArray *)
1121 (mode_info->atom_context->bios + data_offset +
1122 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1123 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1124 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1125 (mode_info->atom_context->bios + data_offset +
1126 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1127 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1128 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1129 u32 size = limits->numEntries *
1130 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1131 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1132 kzalloc(size, GFP_KERNEL);
1133 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1134 r600_free_extended_power_table(rdev);
1137 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1139 entry = &limits->entries[0];
1140 for (i = 0; i < limits->numEntries; i++) {
1141 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1142 ((u8 *)&array->entries[0] +
1143 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1144 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1145 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1146 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1147 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1148 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1149 le16_to_cpu(entry->usVoltage);
1150 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1151 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1154 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1155 ext_hdr->usSAMUTableOffset) {
1156 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1157 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1158 (mode_info->atom_context->bios + data_offset +
1159 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1160 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1161 u32 size = limits->numEntries *
1162 sizeof(struct radeon_clock_voltage_dependency_entry);
1163 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1164 kzalloc(size, GFP_KERNEL);
1165 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1166 r600_free_extended_power_table(rdev);
1169 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1171 entry = &limits->entries[0];
1172 for (i = 0; i < limits->numEntries; i++) {
1173 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1174 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1175 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1176 le16_to_cpu(entry->usVoltage);
1177 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1178 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1181 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1182 ext_hdr->usPPMTableOffset) {
1183 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1184 (mode_info->atom_context->bios + data_offset +
1185 le16_to_cpu(ext_hdr->usPPMTableOffset));
1186 rdev->pm.dpm.dyn_state.ppm_table =
1187 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1188 if (!rdev->pm.dpm.dyn_state.ppm_table) {
1189 r600_free_extended_power_table(rdev);
1192 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1193 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1194 le16_to_cpu(ppm->usCpuCoreNumber);
1195 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1196 le32_to_cpu(ppm->ulPlatformTDP);
1197 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1198 le32_to_cpu(ppm->ulSmallACPlatformTDP);
1199 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1200 le32_to_cpu(ppm->ulPlatformTDC);
1201 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1202 le32_to_cpu(ppm->ulSmallACPlatformTDC);
1203 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1204 le32_to_cpu(ppm->ulApuTDP);
1205 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1206 le32_to_cpu(ppm->ulDGpuTDP);
1207 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1208 le32_to_cpu(ppm->ulDGpuUlvPower);
1209 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1210 le32_to_cpu(ppm->ulTjmax);
1212 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1213 ext_hdr->usACPTableOffset) {
1214 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1215 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1216 (mode_info->atom_context->bios + data_offset +
1217 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1218 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1219 u32 size = limits->numEntries *
1220 sizeof(struct radeon_clock_voltage_dependency_entry);
1221 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1222 kzalloc(size, GFP_KERNEL);
1223 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1224 r600_free_extended_power_table(rdev);
1227 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1229 entry = &limits->entries[0];
1230 for (i = 0; i < limits->numEntries; i++) {
1231 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1232 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1233 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1234 le16_to_cpu(entry->usVoltage);
1235 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1236 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1239 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1240 ext_hdr->usPowerTuneTableOffset) {
1241 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1242 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1243 ATOM_PowerTune_Table *pt;
1244 rdev->pm.dpm.dyn_state.cac_tdp_table =
1245 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1246 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1247 r600_free_extended_power_table(rdev);
1251 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1252 (mode_info->atom_context->bios + data_offset +
1253 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1254 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1255 ppt->usMaximumPowerDeliveryLimit;
1256 pt = &ppt->power_tune_table;
1258 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1259 (mode_info->atom_context->bios + data_offset +
1260 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1261 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1262 pt = &ppt->power_tune_table;
1264 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1265 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1266 le16_to_cpu(pt->usConfigurableTDP);
1267 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1268 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1269 le16_to_cpu(pt->usBatteryPowerLimit);
1270 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1271 le16_to_cpu(pt->usSmallPowerLimit);
1272 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1273 le16_to_cpu(pt->usLowCACLeakage);
1274 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1275 le16_to_cpu(pt->usHighCACLeakage);
1282 void r600_free_extended_power_table(struct radeon_device *rdev)
1284 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1286 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1287 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1288 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1289 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1290 kfree(dyn_state->cac_leakage_table.entries);
1291 kfree(dyn_state->phase_shedding_limits_table.entries);
1292 kfree(dyn_state->ppm_table);
1293 kfree(dyn_state->cac_tdp_table);
1294 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1295 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1296 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1297 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1300 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1302 enum radeon_pcie_gen asic_gen,
1303 enum radeon_pcie_gen default_gen)
1306 case RADEON_PCIE_GEN1:
1307 return RADEON_PCIE_GEN1;
1308 case RADEON_PCIE_GEN2:
1309 return RADEON_PCIE_GEN2;
1310 case RADEON_PCIE_GEN3:
1311 return RADEON_PCIE_GEN3;
1313 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1314 return RADEON_PCIE_GEN3;
1315 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1316 return RADEON_PCIE_GEN2;
1318 return RADEON_PCIE_GEN1;
1320 return RADEON_PCIE_GEN1;
1323 u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1327 switch (asic_lanes) {
1330 return default_lanes;
1346 u8 r600_encode_pci_lane_width(u32 lanes)
1348 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1353 return encoded_lanes[lanes];