2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega10_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega10_powertune.h"
38 #include "smu9_driver_if.h"
39 #include "vega10_inc.h"
41 #include "pppcielanes.h"
42 #include "vega10_hwmgr.h"
43 #include "vega10_processpptables.h"
44 #include "vega10_pptable.h"
45 #include "vega10_thermal.h"
48 #include "amd_pcie_helpers.h"
49 #include "cgs_linux.h"
50 #include "ppinterrupt.h"
51 #include "pp_overdriver.h"
53 #define VOLTAGE_SCALE 4
54 #define VOLTAGE_VID_OFFSET_SCALE1 625
55 #define VOLTAGE_VID_OFFSET_SCALE2 100
57 #define HBM_MEMORY_CHANNEL_WIDTH 128
59 uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
61 #define MEM_FREQ_LOW_LATENCY 25000
62 #define MEM_FREQ_HIGH_LATENCY 80000
63 #define MEM_LATENCY_HIGH 245
64 #define MEM_LATENCY_LOW 35
65 #define MEM_LATENCY_ERR 0xFFFF
67 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
68 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
70 //DF_CS_AON0_DramBaseAddress0
71 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
72 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
73 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
74 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
75 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
76 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
77 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
78 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
79 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
80 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
82 const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
84 struct vega10_power_state *cast_phw_vega10_power_state(
85 struct pp_hw_power_state *hw_ps)
87 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
88 "Invalid Powerstate Type!",
91 return (struct vega10_power_state *)hw_ps;
94 const struct vega10_power_state *cast_const_phw_vega10_power_state(
95 const struct pp_hw_power_state *hw_ps)
97 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
98 "Invalid Powerstate Type!",
101 return (const struct vega10_power_state *)hw_ps;
104 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
106 struct vega10_hwmgr *data =
107 (struct vega10_hwmgr *)(hwmgr->backend);
109 data->registry_data.sclk_dpm_key_disabled =
110 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
111 data->registry_data.socclk_dpm_key_disabled =
112 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
113 data->registry_data.mclk_dpm_key_disabled =
114 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
115 data->registry_data.pcie_dpm_key_disabled =
116 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
118 data->registry_data.dcefclk_dpm_key_disabled =
119 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
121 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
122 data->registry_data.power_containment_support = 1;
123 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
124 data->registry_data.enable_tdc_limit_feature = 1;
127 data->registry_data.clock_stretcher_support =
128 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
130 data->registry_data.ulv_support =
131 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
133 data->registry_data.sclk_deep_sleep_support =
134 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
136 data->registry_data.disable_water_mark = 0;
138 data->registry_data.fan_control_support = 1;
139 data->registry_data.thermal_support = 1;
140 data->registry_data.fw_ctf_enabled = 1;
142 data->registry_data.avfs_support = 1;
143 data->registry_data.led_dpm_enabled = 1;
145 data->registry_data.vr0hot_enabled = 1;
146 data->registry_data.vr1hot_enabled = 1;
147 data->registry_data.regulator_hot_gpio_support = 1;
149 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
150 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
151 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
152 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
153 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
163 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
164 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
165 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
166 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
169 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
171 struct vega10_hwmgr *data =
172 (struct vega10_hwmgr *)(hwmgr->backend);
173 struct phm_ppt_v2_information *table_info =
174 (struct phm_ppt_v2_information *)hwmgr->pptable;
175 struct cgs_system_info sys_info = {0};
178 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
179 PHM_PlatformCaps_SclkDeepSleep);
181 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_DynamicPatchPowerState);
184 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
185 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
186 PHM_PlatformCaps_ControlVDDCI);
188 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
189 PHM_PlatformCaps_TablelessHardwareInterface);
191 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
192 PHM_PlatformCaps_EnableSMU7ThermalManagement);
194 sys_info.size = sizeof(struct cgs_system_info);
195 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
196 result = cgs_query_system_info(hwmgr->device, &sys_info);
198 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_UVDPowerGating);
202 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_VCEPowerGating);
206 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
207 PHM_PlatformCaps_UnTabledHardwareInterface);
209 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
210 PHM_PlatformCaps_FanSpeedInTableIsRPM);
212 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213 PHM_PlatformCaps_ODFuzzyFanControlSupport);
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_DynamicPowerManagement);
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_SMC);
221 /* power tune caps */
222 /* assume disabled */
223 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 PHM_PlatformCaps_PowerContainment);
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_SQRamping);
227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DBRamping);
229 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_TDRamping);
231 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_TCPRamping);
234 if (data->registry_data.power_containment_support)
235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_PowerContainment);
237 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_CAC);
240 if (table_info->tdp_table->usClockStretchAmount &&
241 data->registry_data.clock_stretcher_support)
242 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 PHM_PlatformCaps_ClockStretcher);
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_RegulatorHot);
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_AutomaticDCTransition);
250 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_UVDDPM);
252 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_VCEDPM);
258 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
260 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
263 vega10_initialize_power_tune_defaults(hwmgr);
265 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
266 data->smu_features[i].smu_feature_id = 0xffff;
267 data->smu_features[i].smu_feature_bitmap = 1 << i;
268 data->smu_features[i].enabled = false;
269 data->smu_features[i].supported = false;
272 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
273 FEATURE_DPM_PREFETCHER_BIT;
274 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
275 FEATURE_DPM_GFXCLK_BIT;
276 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
277 FEATURE_DPM_UCLK_BIT;
278 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
279 FEATURE_DPM_SOCCLK_BIT;
280 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
282 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
284 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
285 FEATURE_DPM_MP0CLK_BIT;
286 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
287 FEATURE_DPM_LINK_BIT;
288 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
289 FEATURE_DPM_DCEFCLK_BIT;
290 data->smu_features[GNLD_ULV].smu_feature_id =
292 data->smu_features[GNLD_AVFS].smu_feature_id =
294 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
295 FEATURE_DS_GFXCLK_BIT;
296 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
297 FEATURE_DS_SOCCLK_BIT;
298 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
300 data->smu_features[GNLD_PPT].smu_feature_id =
302 data->smu_features[GNLD_TDC].smu_feature_id =
304 data->smu_features[GNLD_THERMAL].smu_feature_id =
306 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
307 FEATURE_GFX_PER_CU_CG_BIT;
308 data->smu_features[GNLD_RM].smu_feature_id =
310 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
311 FEATURE_DS_DCEFCLK_BIT;
312 data->smu_features[GNLD_ACDC].smu_feature_id =
314 data->smu_features[GNLD_VR0HOT].smu_feature_id =
316 data->smu_features[GNLD_VR1HOT].smu_feature_id =
318 data->smu_features[GNLD_FW_CTF].smu_feature_id =
320 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
321 FEATURE_LED_DISPLAY_BIT;
322 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
323 FEATURE_FAN_CONTROL_BIT;
324 data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
325 FEATURE_VOLTAGE_CONTROLLER_BIT;
327 if (!data->registry_data.prefetcher_dpm_key_disabled)
328 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
330 if (!data->registry_data.sclk_dpm_key_disabled)
331 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
333 if (!data->registry_data.mclk_dpm_key_disabled)
334 data->smu_features[GNLD_DPM_UCLK].supported = true;
336 if (!data->registry_data.socclk_dpm_key_disabled)
337 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
339 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
340 PHM_PlatformCaps_UVDDPM))
341 data->smu_features[GNLD_DPM_UVD].supported = true;
343 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
344 PHM_PlatformCaps_VCEDPM))
345 data->smu_features[GNLD_DPM_VCE].supported = true;
347 if (!data->registry_data.pcie_dpm_key_disabled)
348 data->smu_features[GNLD_DPM_LINK].supported = true;
350 if (!data->registry_data.dcefclk_dpm_key_disabled)
351 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
353 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
354 PHM_PlatformCaps_SclkDeepSleep) &&
355 data->registry_data.sclk_deep_sleep_support) {
356 data->smu_features[GNLD_DS_GFXCLK].supported = true;
357 data->smu_features[GNLD_DS_SOCCLK].supported = true;
358 data->smu_features[GNLD_DS_LCLK].supported = true;
359 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
362 if (data->registry_data.enable_pkg_pwr_tracking_feature)
363 data->smu_features[GNLD_PPT].supported = true;
365 if (data->registry_data.enable_tdc_limit_feature)
366 data->smu_features[GNLD_TDC].supported = true;
368 if (data->registry_data.thermal_support)
369 data->smu_features[GNLD_THERMAL].supported = true;
371 if (data->registry_data.fan_control_support)
372 data->smu_features[GNLD_FAN_CONTROL].supported = true;
374 if (data->registry_data.fw_ctf_enabled)
375 data->smu_features[GNLD_FW_CTF].supported = true;
377 if (data->registry_data.avfs_support)
378 data->smu_features[GNLD_AVFS].supported = true;
380 if (data->registry_data.led_dpm_enabled)
381 data->smu_features[GNLD_LED_DISPLAY].supported = true;
383 if (data->registry_data.vr1hot_enabled)
384 data->smu_features[GNLD_VR1HOT].supported = true;
386 if (data->registry_data.vr0hot_enabled)
387 data->smu_features[GNLD_VR0HOT].supported = true;
391 #ifdef PPLIB_VEGA10_EVV_SUPPORT
392 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
393 phm_ppt_v1_voltage_lookup_table *lookup_table,
394 uint16_t virtual_voltage_id, int32_t *socclk)
398 struct phm_ppt_v2_information *table_info =
399 (struct phm_ppt_v2_information *)(hwmgr->pptable);
401 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
402 "Lookup table is empty",
405 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
406 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
407 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
408 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
412 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
413 "Can't find requested voltage id in vdd_dep_on_socclk table!",
416 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
421 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
423 * Get Leakage VDDC based on leakage ID.
425 * @param hwmgr the address of the powerplay hardware manager.
428 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
430 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
435 struct phm_ppt_v2_information *table_info =
436 (struct phm_ppt_v2_information *)hwmgr->pptable;
437 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
438 table_info->vdd_dep_on_socclk;
441 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
442 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
444 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
445 table_info->vddc_lookup_table, vv_id, &sclk)) {
446 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
447 PHM_PlatformCaps_ClockStretcher)) {
448 for (j = 1; j < socclk_table->count; j++) {
449 if (socclk_table->entries[j].clk == sclk &&
450 socclk_table->entries[j].cks_enable == 0) {
457 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
458 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
459 "Error retrieving EVV voltage value!",
463 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
464 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
465 "Invalid VDDC value", result = -EINVAL;);
467 /* the voltage should not be zero nor equal to leakage ID */
468 if (vddc != 0 && vddc != vv_id) {
469 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
470 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
471 data->vddc_leakage.count++;
480 * Change virtual leakage voltage to actual value.
482 * @param hwmgr the address of the powerplay hardware manager.
483 * @param pointer to changing voltage
484 * @param pointer to leakage table
486 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
487 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
491 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
492 for (index = 0; index < leakage_table->count; index++) {
493 /* if this voltage matches a leakage voltage ID */
494 /* patch with actual leakage voltage */
495 if (leakage_table->leakage_id[index] == *voltage) {
496 *voltage = leakage_table->actual_voltage[index];
501 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
502 pr_info("Voltage value looks like a Leakage ID \
503 but it's not patched\n");
507 * Patch voltage lookup table by EVV leakages.
509 * @param hwmgr the address of the powerplay hardware manager.
510 * @param pointer to voltage lookup table
511 * @param pointer to leakage table
514 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
515 phm_ppt_v1_voltage_lookup_table *lookup_table,
516 struct vega10_leakage_voltage *leakage_table)
520 for (i = 0; i < lookup_table->count; i++)
521 vega10_patch_with_vdd_leakage(hwmgr,
522 &lookup_table->entries[i].us_vdd, leakage_table);
527 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
528 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
531 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
537 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
538 struct pp_hwmgr *hwmgr)
542 struct phm_ppt_v2_information *table_info =
543 (struct phm_ppt_v2_information *)(hwmgr->pptable);
544 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
545 table_info->vdd_dep_on_socclk;
546 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
547 table_info->vdd_dep_on_sclk;
548 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
549 table_info->vdd_dep_on_dcefclk;
550 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
551 table_info->vdd_dep_on_pixclk;
552 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
553 table_info->vdd_dep_on_dispclk;
554 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
555 table_info->vdd_dep_on_phyclk;
556 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
557 table_info->vdd_dep_on_mclk;
558 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
559 table_info->mm_dep_table;
561 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
562 voltage_id = socclk_table->entries[entry_id].vddInd;
563 socclk_table->entries[entry_id].vddc =
564 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
567 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
568 voltage_id = gfxclk_table->entries[entry_id].vddInd;
569 gfxclk_table->entries[entry_id].vddc =
570 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
573 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
574 voltage_id = dcefclk_table->entries[entry_id].vddInd;
575 dcefclk_table->entries[entry_id].vddc =
576 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
579 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
580 voltage_id = pixclk_table->entries[entry_id].vddInd;
581 pixclk_table->entries[entry_id].vddc =
582 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
585 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
586 voltage_id = dspclk_table->entries[entry_id].vddInd;
587 dspclk_table->entries[entry_id].vddc =
588 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
591 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
592 voltage_id = phyclk_table->entries[entry_id].vddInd;
593 phyclk_table->entries[entry_id].vddc =
594 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
597 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
598 voltage_id = mclk_table->entries[entry_id].vddInd;
599 mclk_table->entries[entry_id].vddc =
600 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
601 voltage_id = mclk_table->entries[entry_id].vddciInd;
602 mclk_table->entries[entry_id].vddci =
603 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
604 voltage_id = mclk_table->entries[entry_id].mvddInd;
605 mclk_table->entries[entry_id].mvdd =
606 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
609 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
610 voltage_id = mm_table->entries[entry_id].vddcInd;
611 mm_table->entries[entry_id].vddc =
612 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
619 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
620 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
622 uint32_t table_size, i, j;
623 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
625 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
626 "Lookup table is empty", return -EINVAL);
628 table_size = lookup_table->count;
630 /* Sorting voltages */
631 for (i = 0; i < table_size - 1; i++) {
632 for (j = i + 1; j > 0; j--) {
633 if (lookup_table->entries[j].us_vdd <
634 lookup_table->entries[j - 1].us_vdd) {
635 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
636 lookup_table->entries[j - 1] = lookup_table->entries[j];
637 lookup_table->entries[j] = tmp_voltage_lookup_record;
645 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
649 struct phm_ppt_v2_information *table_info =
650 (struct phm_ppt_v2_information *)(hwmgr->pptable);
651 #ifdef PPLIB_VEGA10_EVV_SUPPORT
652 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
654 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
655 table_info->vddc_lookup_table, &(data->vddc_leakage));
659 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
660 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
665 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
669 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
676 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
678 struct phm_ppt_v2_information *table_info =
679 (struct phm_ppt_v2_information *)(hwmgr->pptable);
680 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
681 table_info->vdd_dep_on_socclk;
682 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
683 table_info->vdd_dep_on_mclk;
685 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
686 "VDD dependency on SCLK table is missing. \
687 This table is mandatory", return -EINVAL);
688 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
689 "VDD dependency on SCLK table is empty. \
690 This table is mandatory", return -EINVAL);
692 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
693 "VDD dependency on MCLK table is missing. \
694 This table is mandatory", return -EINVAL);
695 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
696 "VDD dependency on MCLK table is empty. \
697 This table is mandatory", return -EINVAL);
699 table_info->max_clock_voltage_on_ac.sclk =
700 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
701 table_info->max_clock_voltage_on_ac.mclk =
702 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
703 table_info->max_clock_voltage_on_ac.vddc =
704 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
705 table_info->max_clock_voltage_on_ac.vddci =
706 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
708 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
709 table_info->max_clock_voltage_on_ac.sclk;
710 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
711 table_info->max_clock_voltage_on_ac.mclk;
712 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
713 table_info->max_clock_voltage_on_ac.vddc;
714 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
715 table_info->max_clock_voltage_on_ac.vddci;
720 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
722 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
723 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
725 kfree(hwmgr->backend);
726 hwmgr->backend = NULL;
731 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
734 struct vega10_hwmgr *data;
735 uint32_t config_telemetry = 0;
736 struct pp_atomfwctrl_voltage_table vol_table;
737 struct cgs_system_info sys_info = {0};
739 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
743 hwmgr->backend = data;
745 vega10_set_default_registry_data(hwmgr);
747 data->disable_dpm_mask = 0xff;
748 data->workload_mask = 0xff;
750 /* need to set voltage control types before EVV patching */
751 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
752 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
753 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
756 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
757 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
758 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
759 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
761 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
762 (vol_table.telemetry_offset & 0xff);
763 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
766 kfree(hwmgr->backend);
767 hwmgr->backend = NULL;
768 PP_ASSERT_WITH_CODE(false,
769 "VDDCR_SOC is not SVID2!",
774 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
775 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
776 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
777 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
780 ((vol_table.telemetry_slope << 24) & 0xff000000) |
781 ((vol_table.telemetry_offset << 16) & 0xff0000);
782 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
787 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
788 PHM_PlatformCaps_ControlVDDCI)) {
789 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
790 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
791 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
794 data->config_telemetry = config_telemetry;
796 vega10_set_features_platform_caps(hwmgr);
798 vega10_init_dpm_defaults(hwmgr);
800 #ifdef PPLIB_VEGA10_EVV_SUPPORT
801 /* Get leakage voltage based on leakage ID. */
802 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
803 "Get EVV Voltage Failed. Abort Driver loading!",
807 /* Patch our voltage dependency table with actual leakage voltage
808 * We need to perform leakage translation before it's used by other functions
810 vega10_complete_dependency_tables(hwmgr);
812 /* Parse pptable data read from VBIOS */
813 vega10_set_private_data_based_on_pptable(hwmgr);
815 data->is_tlu_enabled = false;
817 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
818 VEGA10_MAX_HARDWARE_POWERLEVELS;
819 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
820 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
822 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
823 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
824 hwmgr->platform_descriptor.clockStep.engineClock = 500;
825 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
827 sys_info.size = sizeof(struct cgs_system_info);
828 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
829 result = cgs_query_system_info(hwmgr->device, &sys_info);
830 data->total_active_cus = sys_info.value;
831 /* Setup default Overdrive Fan control settings */
832 data->odn_fan_table.target_fan_speed =
833 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
834 data->odn_fan_table.target_temperature =
835 hwmgr->thermal_controller.
836 advanceFanControlParameters.ucTargetTemperature;
837 data->odn_fan_table.min_performance_clock =
838 hwmgr->thermal_controller.advanceFanControlParameters.
839 ulMinFanSCLKAcousticLimit;
840 data->odn_fan_table.min_fan_limit =
841 hwmgr->thermal_controller.
842 advanceFanControlParameters.usFanPWMMinLimit *
843 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
848 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
850 struct vega10_hwmgr *data =
851 (struct vega10_hwmgr *)(hwmgr->backend);
853 data->low_sclk_interrupt_threshold = 0;
858 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
860 struct vega10_hwmgr *data =
861 (struct vega10_hwmgr *)(hwmgr->backend);
862 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
864 struct pp_atomfwctrl_voltage_table table;
870 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
871 VOLTAGE_OBJ_GPIO_LUT, &table);
874 tmp = table.mask_low;
875 for (i = 0, j = 0; i < 32; i++) {
877 mask |= (uint32_t)(i << (8 * j));
885 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
886 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
887 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
891 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
893 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
894 "Failed to init sclk threshold!",
897 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
898 "Failed to set up led dpm config!",
904 static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
906 uint32_t features_enabled;
908 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
909 if (features_enabled & SMC_DPM_FEATURES)
916 * Remove repeated voltage values and create table with unique values.
918 * @param hwmgr the address of the powerplay hardware manager.
919 * @param vol_table the pointer to changing voltage table
920 * @return 0 in success
923 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
924 struct pp_atomfwctrl_voltage_table *vol_table)
929 struct pp_atomfwctrl_voltage_table *table;
931 PP_ASSERT_WITH_CODE(vol_table,
932 "Voltage Table empty.", return -EINVAL);
933 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
939 table->mask_low = vol_table->mask_low;
940 table->phase_delay = vol_table->phase_delay;
942 for (i = 0; i < vol_table->count; i++) {
943 vvalue = vol_table->entries[i].value;
946 for (j = 0; j < table->count; j++) {
947 if (vvalue == table->entries[j].value) {
954 table->entries[table->count].value = vvalue;
955 table->entries[table->count].smio_low =
956 vol_table->entries[i].smio_low;
961 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
967 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
968 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
969 struct pp_atomfwctrl_voltage_table *vol_table)
973 PP_ASSERT_WITH_CODE(dep_table->count,
974 "Voltage Dependency Table empty.",
977 vol_table->mask_low = 0;
978 vol_table->phase_delay = 0;
979 vol_table->count = dep_table->count;
981 for (i = 0; i < vol_table->count; i++) {
982 vol_table->entries[i].value = dep_table->entries[i].mvdd;
983 vol_table->entries[i].smio_low = 0;
986 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
988 "Failed to trim MVDD Table!",
994 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
995 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
996 struct pp_atomfwctrl_voltage_table *vol_table)
1000 PP_ASSERT_WITH_CODE(dep_table->count,
1001 "Voltage Dependency Table empty.",
1004 vol_table->mask_low = 0;
1005 vol_table->phase_delay = 0;
1006 vol_table->count = dep_table->count;
1008 for (i = 0; i < dep_table->count; i++) {
1009 vol_table->entries[i].value = dep_table->entries[i].vddci;
1010 vol_table->entries[i].smio_low = 0;
1013 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1014 "Failed to trim VDDCI table.",
1020 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1021 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1022 struct pp_atomfwctrl_voltage_table *vol_table)
1026 PP_ASSERT_WITH_CODE(dep_table->count,
1027 "Voltage Dependency Table empty.",
1030 vol_table->mask_low = 0;
1031 vol_table->phase_delay = 0;
1032 vol_table->count = dep_table->count;
1034 for (i = 0; i < vol_table->count; i++) {
1035 vol_table->entries[i].value = dep_table->entries[i].vddc;
1036 vol_table->entries[i].smio_low = 0;
1042 /* ---- Voltage Tables ----
1043 * If the voltage table would be bigger than
1044 * what will fit into the state table on
1045 * the SMC keep only the higher entries.
1047 static void vega10_trim_voltage_table_to_fit_state_table(
1048 struct pp_hwmgr *hwmgr,
1049 uint32_t max_vol_steps,
1050 struct pp_atomfwctrl_voltage_table *vol_table)
1052 unsigned int i, diff;
1054 if (vol_table->count <= max_vol_steps)
1057 diff = vol_table->count - max_vol_steps;
1059 for (i = 0; i < max_vol_steps; i++)
1060 vol_table->entries[i] = vol_table->entries[i + diff];
1062 vol_table->count = max_vol_steps;
1066 * Create Voltage Tables.
1068 * @param hwmgr the address of the powerplay hardware manager.
1071 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1073 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1074 struct phm_ppt_v2_information *table_info =
1075 (struct phm_ppt_v2_information *)hwmgr->pptable;
1078 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1079 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1080 result = vega10_get_mvdd_voltage_table(hwmgr,
1081 table_info->vdd_dep_on_mclk,
1082 &(data->mvdd_voltage_table));
1083 PP_ASSERT_WITH_CODE(!result,
1084 "Failed to retrieve MVDDC table!",
1088 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1089 result = vega10_get_vddci_voltage_table(hwmgr,
1090 table_info->vdd_dep_on_mclk,
1091 &(data->vddci_voltage_table));
1092 PP_ASSERT_WITH_CODE(!result,
1093 "Failed to retrieve VDDCI_MEM table!",
1097 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1098 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1099 result = vega10_get_vdd_voltage_table(hwmgr,
1100 table_info->vdd_dep_on_sclk,
1101 &(data->vddc_voltage_table));
1102 PP_ASSERT_WITH_CODE(!result,
1103 "Failed to retrieve VDDCR_SOC table!",
1107 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1108 "Too many voltage values for VDDC. Trimming to fit state table.",
1109 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1110 16, &(data->vddc_voltage_table)));
1112 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1113 "Too many voltage values for VDDCI. Trimming to fit state table.",
1114 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1115 16, &(data->vddci_voltage_table)));
1117 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1118 "Too many voltage values for MVDD. Trimming to fit state table.",
1119 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1120 16, &(data->mvdd_voltage_table)));
1127 * @fn vega10_init_dpm_state
1128 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1130 * @param dpm_state - the address of the DPM Table to initiailize.
1133 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1135 dpm_state->soft_min_level = 0xff;
1136 dpm_state->soft_max_level = 0xff;
1137 dpm_state->hard_min_level = 0xff;
1138 dpm_state->hard_max_level = 0xff;
1141 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1142 struct vega10_single_dpm_table *dpm_table,
1143 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1147 for (i = 0; i < dep_table->count; i++) {
1148 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1149 dep_table->entries[i].clk) {
1150 dpm_table->dpm_levels[dpm_table->count].value =
1151 dep_table->entries[i].clk;
1152 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1157 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1159 struct vega10_hwmgr *data =
1160 (struct vega10_hwmgr *)(hwmgr->backend);
1161 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1162 struct phm_ppt_v2_information *table_info =
1163 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1164 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1165 table_info->pcie_table;
1168 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1169 "Incorrect number of PCIE States from VBIOS!",
1172 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1173 if (data->registry_data.pcieSpeedOverride)
1174 pcie_table->pcie_gen[i] =
1175 data->registry_data.pcieSpeedOverride;
1177 pcie_table->pcie_gen[i] =
1178 bios_pcie_table->entries[i].gen_speed;
1180 if (data->registry_data.pcieLaneOverride)
1181 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1182 data->registry_data.pcieLaneOverride);
1184 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1185 bios_pcie_table->entries[i].lane_width);
1186 if (data->registry_data.pcieClockOverride)
1187 pcie_table->lclk[i] =
1188 data->registry_data.pcieClockOverride;
1190 pcie_table->lclk[i] =
1191 bios_pcie_table->entries[i].pcie_sclk;
1194 pcie_table->count = NUM_LINK_LEVELS;
1200 * This function is to initialize all DPM state tables
1201 * for SMU based on the dependency table.
1202 * Dynamic state patching function will then trim these
1203 * state tables to the allowed range based
1204 * on the power policy or external client requests,
1205 * such as UVD request, etc.
1207 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1209 struct vega10_hwmgr *data =
1210 (struct vega10_hwmgr *)(hwmgr->backend);
1211 struct phm_ppt_v2_information *table_info =
1212 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1213 struct vega10_single_dpm_table *dpm_table;
1216 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1217 table_info->vdd_dep_on_socclk;
1218 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1219 table_info->vdd_dep_on_sclk;
1220 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1221 table_info->vdd_dep_on_mclk;
1222 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1223 table_info->mm_dep_table;
1224 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1225 table_info->vdd_dep_on_dcefclk;
1226 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1227 table_info->vdd_dep_on_pixclk;
1228 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1229 table_info->vdd_dep_on_dispclk;
1230 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1231 table_info->vdd_dep_on_phyclk;
1233 PP_ASSERT_WITH_CODE(dep_soc_table,
1234 "SOCCLK dependency table is missing. This table is mandatory",
1236 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1237 "SOCCLK dependency table is empty. This table is mandatory",
1240 PP_ASSERT_WITH_CODE(dep_gfx_table,
1241 "GFXCLK dependency table is missing. This table is mandatory",
1243 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1244 "GFXCLK dependency table is empty. This table is mandatory",
1247 PP_ASSERT_WITH_CODE(dep_mclk_table,
1248 "MCLK dependency table is missing. This table is mandatory",
1250 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1251 "MCLK dependency table has to have is missing. This table is mandatory",
1254 /* Initialize Sclk DPM table based on allow Sclk values */
1255 data->dpm_table.soc_table.count = 0;
1256 data->dpm_table.gfx_table.count = 0;
1257 data->dpm_table.dcef_table.count = 0;
1259 dpm_table = &(data->dpm_table.soc_table);
1260 vega10_setup_default_single_dpm_table(hwmgr,
1264 vega10_init_dpm_state(&(dpm_table->dpm_state));
1266 dpm_table = &(data->dpm_table.gfx_table);
1267 vega10_setup_default_single_dpm_table(hwmgr,
1270 vega10_init_dpm_state(&(dpm_table->dpm_state));
1272 /* Initialize Mclk DPM table based on allow Mclk values */
1273 data->dpm_table.mem_table.count = 0;
1274 dpm_table = &(data->dpm_table.mem_table);
1275 vega10_setup_default_single_dpm_table(hwmgr,
1278 vega10_init_dpm_state(&(dpm_table->dpm_state));
1280 data->dpm_table.eclk_table.count = 0;
1281 dpm_table = &(data->dpm_table.eclk_table);
1282 for (i = 0; i < dep_mm_table->count; i++) {
1283 if (i == 0 || dpm_table->dpm_levels
1284 [dpm_table->count - 1].value <=
1285 dep_mm_table->entries[i].eclk) {
1286 dpm_table->dpm_levels[dpm_table->count].value =
1287 dep_mm_table->entries[i].eclk;
1288 dpm_table->dpm_levels[dpm_table->count].enabled =
1289 (i == 0) ? true : false;
1293 vega10_init_dpm_state(&(dpm_table->dpm_state));
1295 data->dpm_table.vclk_table.count = 0;
1296 data->dpm_table.dclk_table.count = 0;
1297 dpm_table = &(data->dpm_table.vclk_table);
1298 for (i = 0; i < dep_mm_table->count; i++) {
1299 if (i == 0 || dpm_table->dpm_levels
1300 [dpm_table->count - 1].value <=
1301 dep_mm_table->entries[i].vclk) {
1302 dpm_table->dpm_levels[dpm_table->count].value =
1303 dep_mm_table->entries[i].vclk;
1304 dpm_table->dpm_levels[dpm_table->count].enabled =
1305 (i == 0) ? true : false;
1309 vega10_init_dpm_state(&(dpm_table->dpm_state));
1311 dpm_table = &(data->dpm_table.dclk_table);
1312 for (i = 0; i < dep_mm_table->count; i++) {
1313 if (i == 0 || dpm_table->dpm_levels
1314 [dpm_table->count - 1].value <=
1315 dep_mm_table->entries[i].dclk) {
1316 dpm_table->dpm_levels[dpm_table->count].value =
1317 dep_mm_table->entries[i].dclk;
1318 dpm_table->dpm_levels[dpm_table->count].enabled =
1319 (i == 0) ? true : false;
1323 vega10_init_dpm_state(&(dpm_table->dpm_state));
1325 /* Assume there is no headless Vega10 for now */
1326 dpm_table = &(data->dpm_table.dcef_table);
1327 vega10_setup_default_single_dpm_table(hwmgr,
1331 vega10_init_dpm_state(&(dpm_table->dpm_state));
1333 dpm_table = &(data->dpm_table.pixel_table);
1334 vega10_setup_default_single_dpm_table(hwmgr,
1338 vega10_init_dpm_state(&(dpm_table->dpm_state));
1340 dpm_table = &(data->dpm_table.display_table);
1341 vega10_setup_default_single_dpm_table(hwmgr,
1345 vega10_init_dpm_state(&(dpm_table->dpm_state));
1347 dpm_table = &(data->dpm_table.phy_table);
1348 vega10_setup_default_single_dpm_table(hwmgr,
1352 vega10_init_dpm_state(&(dpm_table->dpm_state));
1354 vega10_setup_default_pcie_table(hwmgr);
1356 /* save a copy of the default DPM table */
1357 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1358 sizeof(struct vega10_dpm_table));
1360 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1361 PHM_PlatformCaps_ODNinACSupport) ||
1362 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1363 PHM_PlatformCaps_ODNinDCSupport)) {
1364 data->odn_dpm_table.odn_core_clock_dpm_levels.
1365 number_of_performance_levels = data->dpm_table.gfx_table.count;
1366 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1367 data->odn_dpm_table.odn_core_clock_dpm_levels.
1368 performance_level_entries[i].clock =
1369 data->dpm_table.gfx_table.dpm_levels[i].value;
1370 data->odn_dpm_table.odn_core_clock_dpm_levels.
1371 performance_level_entries[i].enabled = true;
1374 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1375 dep_gfx_table->count;
1376 for (i = 0; i < dep_gfx_table->count; i++) {
1377 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1378 dep_gfx_table->entries[i].clk;
1379 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1380 dep_gfx_table->entries[i].vddInd;
1381 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1382 dep_gfx_table->entries[i].cks_enable;
1383 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1384 dep_gfx_table->entries[i].cks_voffset;
1387 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1388 number_of_performance_levels = data->dpm_table.mem_table.count;
1389 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1390 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1391 performance_level_entries[i].clock =
1392 data->dpm_table.mem_table.dpm_levels[i].value;
1393 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1394 performance_level_entries[i].enabled = true;
1397 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1398 for (i = 0; i < dep_mclk_table->count; i++) {
1399 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1400 dep_mclk_table->entries[i].clk;
1401 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1402 dep_mclk_table->entries[i].vddInd;
1403 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1404 dep_mclk_table->entries[i].vddci;
1412 * @fn vega10_populate_ulv_state
1413 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1415 * @param hwmgr - the address of the hardware manager.
1418 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1420 struct vega10_hwmgr *data =
1421 (struct vega10_hwmgr *)(hwmgr->backend);
1422 struct phm_ppt_v2_information *table_info =
1423 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1425 data->smc_state_table.pp_table.UlvOffsetVid =
1426 (uint8_t)table_info->us_ulv_voltage_offset;
1428 data->smc_state_table.pp_table.UlvSmnclkDid =
1429 (uint8_t)(table_info->us_ulv_smnclk_did);
1430 data->smc_state_table.pp_table.UlvMp1clkDid =
1431 (uint8_t)(table_info->us_ulv_mp1clk_did);
1432 data->smc_state_table.pp_table.UlvGfxclkBypass =
1433 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1434 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1435 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1436 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1437 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1442 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1443 uint32_t lclock, uint8_t *curr_lclk_did)
1445 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1447 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1449 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1451 "Failed to get LCLK clock settings from VBIOS!",
1454 *curr_lclk_did = dividers.ulDid;
1459 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1462 struct vega10_hwmgr *data =
1463 (struct vega10_hwmgr *)(hwmgr->backend);
1464 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1465 struct vega10_pcie_table *pcie_table =
1466 &(data->dpm_table.pcie_table);
1469 for (i = 0; i < pcie_table->count; i++) {
1470 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1471 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1473 result = vega10_populate_single_lclk_level(hwmgr,
1474 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1476 pr_info("Populate LClock Level %d Failed!\n", i);
1482 while (i < NUM_LINK_LEVELS) {
1483 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1484 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1486 result = vega10_populate_single_lclk_level(hwmgr,
1487 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1489 pr_info("Populate LClock Level %d Failed!\n", i);
1499 * Populates single SMC GFXSCLK structure using the provided engine clock
1501 * @param hwmgr the address of the hardware manager
1502 * @param gfx_clock the GFX clock to use to populate the structure.
1503 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1506 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1507 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
1509 struct phm_ppt_v2_information *table_info =
1510 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1511 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1512 table_info->vdd_dep_on_sclk;
1513 struct vega10_hwmgr *data =
1514 (struct vega10_hwmgr *)(hwmgr->backend);
1515 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1516 uint32_t gfx_max_clock =
1517 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1520 if (data->apply_overdrive_next_settings_mask &
1521 DPMTABLE_OD_UPDATE_VDDC)
1522 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1523 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1525 PP_ASSERT_WITH_CODE(dep_on_sclk,
1526 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1529 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1530 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1532 for (i = 0; i < dep_on_sclk->count; i++) {
1533 if (dep_on_sclk->entries[i].clk == gfx_clock)
1536 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1537 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1541 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1542 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1543 gfx_clock, ÷rs),
1544 "Failed to get GFX Clock settings from VBIOS!",
1547 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1548 current_gfxclk_level->FbMult =
1549 cpu_to_le32(dividers.ulPll_fb_mult);
1550 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1551 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1552 current_gfxclk_level->SsFbMult =
1553 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1554 current_gfxclk_level->SsSlewFrac =
1555 cpu_to_le16(dividers.usPll_ss_slew_frac);
1556 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1562 * @brief Populates single SMC SOCCLK structure using the provided clock.
1564 * @param hwmgr - the address of the hardware manager.
1565 * @param soc_clock - the SOC clock to use to populate the structure.
1566 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1567 * @return 0 on success..
1569 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1570 uint32_t soc_clock, uint8_t *current_soc_did,
1571 uint8_t *current_vol_index)
1573 struct phm_ppt_v2_information *table_info =
1574 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1575 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1576 table_info->vdd_dep_on_socclk;
1577 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1580 PP_ASSERT_WITH_CODE(dep_on_soc,
1581 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1583 for (i = 0; i < dep_on_soc->count; i++) {
1584 if (dep_on_soc->entries[i].clk == soc_clock)
1587 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1588 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1590 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1591 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1592 soc_clock, ÷rs),
1593 "Failed to get SOC Clock settings from VBIOS!",
1596 *current_soc_did = (uint8_t)dividers.ulDid;
1597 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1602 uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1604 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1608 for (i = 0; i < dep_table->count; i++) {
1609 if (dep_table->entries[i].clk == clk)
1610 return dep_table->entries[i].vddc;
1613 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1618 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1620 * @param hwmgr the address of the hardware manager
1622 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1624 struct vega10_hwmgr *data =
1625 (struct vega10_hwmgr *)(hwmgr->backend);
1626 struct phm_ppt_v2_information *table_info =
1627 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1628 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1629 table_info->vdd_dep_on_socclk;
1630 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1631 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1635 for (i = 0; i < dpm_table->count; i++) {
1636 result = vega10_populate_single_gfx_level(hwmgr,
1637 dpm_table->dpm_levels[i].value,
1638 &(pp_table->GfxclkLevel[i]));
1644 while (i < NUM_GFXCLK_DPM_LEVELS) {
1645 result = vega10_populate_single_gfx_level(hwmgr,
1646 dpm_table->dpm_levels[j].value,
1647 &(pp_table->GfxclkLevel[i]));
1653 pp_table->GfxclkSlewRate =
1654 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1656 dpm_table = &(data->dpm_table.soc_table);
1657 for (i = 0; i < dpm_table->count; i++) {
1658 pp_table->SocVid[i] =
1659 (uint8_t)convert_to_vid(
1660 vega10_locate_vddc_given_clock(hwmgr,
1661 dpm_table->dpm_levels[i].value,
1663 result = vega10_populate_single_soc_level(hwmgr,
1664 dpm_table->dpm_levels[i].value,
1665 &(pp_table->SocclkDid[i]),
1666 &(pp_table->SocDpmVoltageIndex[i]));
1672 while (i < NUM_SOCCLK_DPM_LEVELS) {
1673 pp_table->SocVid[i] = pp_table->SocVid[j];
1674 result = vega10_populate_single_soc_level(hwmgr,
1675 dpm_table->dpm_levels[j].value,
1676 &(pp_table->SocclkDid[i]),
1677 &(pp_table->SocDpmVoltageIndex[i]));
1687 * @brief Populates single SMC GFXCLK structure using the provided clock.
1689 * @param hwmgr - the address of the hardware manager.
1690 * @param mem_clock - the memory clock to use to populate the structure.
1691 * @return 0 on success..
1693 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1694 uint32_t mem_clock, uint8_t *current_mem_vid,
1695 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1697 struct vega10_hwmgr *data =
1698 (struct vega10_hwmgr *)(hwmgr->backend);
1699 struct phm_ppt_v2_information *table_info =
1700 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1701 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1702 table_info->vdd_dep_on_mclk;
1703 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1704 uint32_t mem_max_clock =
1705 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1708 if (data->apply_overdrive_next_settings_mask &
1709 DPMTABLE_OD_UPDATE_VDDC)
1710 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1711 &data->odn_dpm_table.vdd_dependency_on_mclk;
1713 PP_ASSERT_WITH_CODE(dep_on_mclk,
1714 "Invalid SOC_VDD-UCLK Dependency Table!",
1717 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
1718 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1720 for (i = 0; i < dep_on_mclk->count; i++) {
1721 if (dep_on_mclk->entries[i].clk == mem_clock)
1724 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1725 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1729 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1730 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs),
1731 "Failed to get UCLK settings from VBIOS!",
1735 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1736 *current_mem_soc_vind =
1737 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1738 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1739 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1741 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1742 "Invalid Divider ID!",
1749 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1751 * @param pHwMgr - the address of the hardware manager.
1752 * @return PP_Result_OK on success.
1754 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1756 struct vega10_hwmgr *data =
1757 (struct vega10_hwmgr *)(hwmgr->backend);
1758 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1759 struct vega10_single_dpm_table *dpm_table =
1760 &(data->dpm_table.mem_table);
1762 uint32_t i, j, reg, mem_channels;
1764 for (i = 0; i < dpm_table->count; i++) {
1765 result = vega10_populate_single_memory_level(hwmgr,
1766 dpm_table->dpm_levels[i].value,
1767 &(pp_table->MemVid[i]),
1768 &(pp_table->UclkLevel[i]),
1769 &(pp_table->MemSocVoltageIndex[i]));
1775 while (i < NUM_UCLK_DPM_LEVELS) {
1776 result = vega10_populate_single_memory_level(hwmgr,
1777 dpm_table->dpm_levels[j].value,
1778 &(pp_table->MemVid[i]),
1779 &(pp_table->UclkLevel[i]),
1780 &(pp_table->MemSocVoltageIndex[i]));
1786 reg = soc15_get_register_offset(DF_HWID, 0,
1787 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1788 mmDF_CS_AON0_DramBaseAddress0);
1789 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1790 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1791 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1792 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1793 pp_table->MemoryChannelWidth =
1794 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1795 channel_number[mem_channels]);
1797 pp_table->LowestUclkReservedForUlv =
1798 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1803 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1804 DSPCLK_e disp_clock)
1806 struct vega10_hwmgr *data =
1807 (struct vega10_hwmgr *)(hwmgr->backend);
1808 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1809 struct phm_ppt_v2_information *table_info =
1810 (struct phm_ppt_v2_information *)
1812 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1814 uint16_t clk = 0, vddc = 0;
1817 switch (disp_clock) {
1818 case DSPCLK_DCEFCLK:
1819 dep_table = table_info->vdd_dep_on_dcefclk;
1821 case DSPCLK_DISPCLK:
1822 dep_table = table_info->vdd_dep_on_dispclk;
1825 dep_table = table_info->vdd_dep_on_pixclk;
1828 dep_table = table_info->vdd_dep_on_phyclk;
1834 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1835 "Number Of Entries Exceeded maximum!",
1838 for (i = 0; i < dep_table->count; i++) {
1839 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1840 vddc = table_info->vddc_lookup_table->
1841 entries[dep_table->entries[i].vddInd].us_vdd;
1842 vid = (uint8_t)convert_to_vid(vddc);
1843 pp_table->DisplayClockTable[disp_clock][i].Freq =
1845 pp_table->DisplayClockTable[disp_clock][i].Vid =
1849 while (i < NUM_DSPCLK_LEVELS) {
1850 pp_table->DisplayClockTable[disp_clock][i].Freq =
1852 pp_table->DisplayClockTable[disp_clock][i].Vid =
1860 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1864 for (i = 0; i < DSPCLK_COUNT; i++) {
1865 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1866 "Failed to populate Clock in DisplayClockTable!",
1873 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1874 uint32_t eclock, uint8_t *current_eclk_did,
1875 uint8_t *current_soc_vol)
1877 struct phm_ppt_v2_information *table_info =
1878 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1879 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1880 table_info->mm_dep_table;
1881 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1884 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1885 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1887 "Failed to get ECLK clock settings from VBIOS!",
1890 *current_eclk_did = (uint8_t)dividers.ulDid;
1892 for (i = 0; i < dep_table->count; i++) {
1893 if (dep_table->entries[i].eclk == eclock)
1894 *current_soc_vol = dep_table->entries[i].vddcInd;
1900 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1902 struct vega10_hwmgr *data =
1903 (struct vega10_hwmgr *)(hwmgr->backend);
1904 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1905 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1906 int result = -EINVAL;
1909 for (i = 0; i < dpm_table->count; i++) {
1910 result = vega10_populate_single_eclock_level(hwmgr,
1911 dpm_table->dpm_levels[i].value,
1912 &(pp_table->EclkDid[i]),
1913 &(pp_table->VceDpmVoltageIndex[i]));
1919 while (i < NUM_VCE_DPM_LEVELS) {
1920 result = vega10_populate_single_eclock_level(hwmgr,
1921 dpm_table->dpm_levels[j].value,
1922 &(pp_table->EclkDid[i]),
1923 &(pp_table->VceDpmVoltageIndex[i]));
1932 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1933 uint32_t vclock, uint8_t *current_vclk_did)
1935 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1937 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1938 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1940 "Failed to get VCLK clock settings from VBIOS!",
1943 *current_vclk_did = (uint8_t)dividers.ulDid;
1948 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1949 uint32_t dclock, uint8_t *current_dclk_did)
1951 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1953 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1954 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1956 "Failed to get DCLK clock settings from VBIOS!",
1959 *current_dclk_did = (uint8_t)dividers.ulDid;
1964 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1966 struct vega10_hwmgr *data =
1967 (struct vega10_hwmgr *)(hwmgr->backend);
1968 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1969 struct vega10_single_dpm_table *vclk_dpm_table =
1970 &(data->dpm_table.vclk_table);
1971 struct vega10_single_dpm_table *dclk_dpm_table =
1972 &(data->dpm_table.dclk_table);
1973 struct phm_ppt_v2_information *table_info =
1974 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1975 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1976 table_info->mm_dep_table;
1977 int result = -EINVAL;
1980 for (i = 0; i < vclk_dpm_table->count; i++) {
1981 result = vega10_populate_single_vclock_level(hwmgr,
1982 vclk_dpm_table->dpm_levels[i].value,
1983 &(pp_table->VclkDid[i]));
1989 while (i < NUM_UVD_DPM_LEVELS) {
1990 result = vega10_populate_single_vclock_level(hwmgr,
1991 vclk_dpm_table->dpm_levels[j].value,
1992 &(pp_table->VclkDid[i]));
1998 for (i = 0; i < dclk_dpm_table->count; i++) {
1999 result = vega10_populate_single_dclock_level(hwmgr,
2000 dclk_dpm_table->dpm_levels[i].value,
2001 &(pp_table->DclkDid[i]));
2007 while (i < NUM_UVD_DPM_LEVELS) {
2008 result = vega10_populate_single_dclock_level(hwmgr,
2009 dclk_dpm_table->dpm_levels[j].value,
2010 &(pp_table->DclkDid[i]));
2016 for (i = 0; i < dep_table->count; i++) {
2017 if (dep_table->entries[i].vclk ==
2018 vclk_dpm_table->dpm_levels[i].value &&
2019 dep_table->entries[i].dclk ==
2020 dclk_dpm_table->dpm_levels[i].value)
2021 pp_table->UvdDpmVoltageIndex[i] =
2022 dep_table->entries[i].vddcInd;
2028 while (i < NUM_UVD_DPM_LEVELS) {
2029 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2036 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2038 struct vega10_hwmgr *data =
2039 (struct vega10_hwmgr *)(hwmgr->backend);
2040 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2041 struct phm_ppt_v2_information *table_info =
2042 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2043 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2044 table_info->vdd_dep_on_sclk;
2047 for (i = 0; i < dep_table->count; i++) {
2048 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2049 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2050 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2056 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2058 struct vega10_hwmgr *data =
2059 (struct vega10_hwmgr *)(hwmgr->backend);
2060 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2061 struct phm_ppt_v2_information *table_info =
2062 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2063 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2064 table_info->vdd_dep_on_sclk;
2065 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2069 pp_table->MinVoltageVid = (uint8_t)0xff;
2070 pp_table->MaxVoltageVid = (uint8_t)0;
2072 if (data->smu_features[GNLD_AVFS].supported) {
2073 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2075 pp_table->MinVoltageVid = (uint8_t)
2076 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2077 pp_table->MaxVoltageVid = (uint8_t)
2078 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2080 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2081 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2082 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2083 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2084 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2085 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2086 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2088 pp_table->BtcGbVdroopTableCksOff.a0 =
2089 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2090 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2091 pp_table->BtcGbVdroopTableCksOff.a1 =
2092 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2093 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2094 pp_table->BtcGbVdroopTableCksOff.a2 =
2095 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2096 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2098 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2099 pp_table->BtcGbVdroopTableCksOn.a0 =
2100 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2101 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2102 pp_table->BtcGbVdroopTableCksOn.a1 =
2103 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2104 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2105 pp_table->BtcGbVdroopTableCksOn.a2 =
2106 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2107 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2109 pp_table->AvfsGbCksOn.m1 =
2110 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2111 pp_table->AvfsGbCksOn.m2 =
2112 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2113 pp_table->AvfsGbCksOn.b =
2114 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2115 pp_table->AvfsGbCksOn.m1_shift = 24;
2116 pp_table->AvfsGbCksOn.m2_shift = 12;
2117 pp_table->AvfsGbCksOn.b_shift = 0;
2119 pp_table->OverrideAvfsGbCksOn =
2120 avfs_params.ucEnableGbFuseTableCkson;
2121 pp_table->AvfsGbCksOff.m1 =
2122 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2123 pp_table->AvfsGbCksOff.m2 =
2124 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2125 pp_table->AvfsGbCksOff.b =
2126 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2127 pp_table->AvfsGbCksOff.m1_shift = 24;
2128 pp_table->AvfsGbCksOff.m2_shift = 12;
2129 pp_table->AvfsGbCksOff.b_shift = 0;
2131 for (i = 0; i < dep_table->count; i++) {
2132 if (dep_table->entries[i].sclk_offset == 0)
2133 pp_table->StaticVoltageOffsetVid[i] = 248;
2135 pp_table->StaticVoltageOffsetVid[i] =
2136 (uint8_t)(dep_table->entries[i].sclk_offset *
2137 VOLTAGE_VID_OFFSET_SCALE2 /
2138 VOLTAGE_VID_OFFSET_SCALE1);
2141 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2142 data->disp_clk_quad_eqn_a) &&
2143 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2144 data->disp_clk_quad_eqn_b)) {
2145 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2146 (int32_t)data->disp_clk_quad_eqn_a;
2147 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2148 (int32_t)data->disp_clk_quad_eqn_b;
2149 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2150 (int32_t)data->disp_clk_quad_eqn_c;
2152 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2153 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2154 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2155 (int32_t)avfs_params.ulDispclk2GfxclkM2;
2156 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2157 (int32_t)avfs_params.ulDispclk2GfxclkB;
2160 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2161 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2162 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2164 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2165 data->dcef_clk_quad_eqn_a) &&
2166 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2167 data->dcef_clk_quad_eqn_b)) {
2168 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2169 (int32_t)data->dcef_clk_quad_eqn_a;
2170 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2171 (int32_t)data->dcef_clk_quad_eqn_b;
2172 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2173 (int32_t)data->dcef_clk_quad_eqn_c;
2175 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2176 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2177 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2178 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
2179 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2180 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2183 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2184 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2185 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2187 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2188 data->pixel_clk_quad_eqn_a) &&
2189 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2190 data->pixel_clk_quad_eqn_b)) {
2191 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2192 (int32_t)data->pixel_clk_quad_eqn_a;
2193 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2194 (int32_t)data->pixel_clk_quad_eqn_b;
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2196 (int32_t)data->pixel_clk_quad_eqn_c;
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2199 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2200 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2201 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
2202 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2203 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2206 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2207 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2208 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2209 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2210 data->phy_clk_quad_eqn_a) &&
2211 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2212 data->phy_clk_quad_eqn_b)) {
2213 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2214 (int32_t)data->phy_clk_quad_eqn_a;
2215 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2216 (int32_t)data->phy_clk_quad_eqn_b;
2217 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2218 (int32_t)data->phy_clk_quad_eqn_c;
2220 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2221 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2222 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2223 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
2224 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2225 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2228 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2229 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2230 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2232 data->smu_features[GNLD_AVFS].supported = false;
2239 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2241 struct vega10_hwmgr *data =
2242 (struct vega10_hwmgr *)(hwmgr->backend);
2243 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2244 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2247 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2249 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2250 PHM_PlatformCaps_RegulatorHot) &&
2251 (data->registry_data.regulator_hot_gpio_support)) {
2252 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2253 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2254 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2255 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2257 pp_table->VR0HotGpio = 0;
2258 pp_table->VR0HotPolarity = 0;
2259 pp_table->VR1HotGpio = 0;
2260 pp_table->VR1HotPolarity = 0;
2263 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2264 PHM_PlatformCaps_AutomaticDCTransition) &&
2265 (data->registry_data.ac_dc_switch_gpio_support)) {
2266 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2267 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2269 pp_table->AcDcGpio = 0;
2270 pp_table->AcDcPolarity = 0;
2277 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2279 struct vega10_hwmgr *data =
2280 (struct vega10_hwmgr *)(hwmgr->backend);
2282 if (data->smu_features[GNLD_AVFS].supported) {
2284 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2286 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2287 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2289 data->smu_features[GNLD_AVFS].enabled = true;
2291 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2293 data->smu_features[GNLD_AVFS].smu_feature_id),
2294 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2296 data->smu_features[GNLD_AVFS].enabled = false;
2303 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2307 uint64_t serial_number = 0;
2308 uint32_t top32, bottom32;
2309 struct phm_fuses_default fuse;
2311 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2312 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2314 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
2315 vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
2317 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
2318 vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
2320 serial_number = ((uint64_t)bottom32 << 32) | top32;
2322 if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
2323 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2324 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2325 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2326 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2327 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2328 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2329 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2330 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2331 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2332 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2333 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2334 PP_ASSERT_WITH_CODE(!result,
2335 "Failed to upload FuseOVerride!",
2342 static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2344 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2345 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2348 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2349 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2351 /* Optimize compute power profile: Use only highest
2352 * 2 power levels (if more than 2 are available)
2354 if (dpm_table->count > 2)
2355 min_level = dpm_table->count - 2;
2356 else if (dpm_table->count == 2)
2361 hwmgr->default_compute_power_profile.min_sclk =
2362 dpm_table->dpm_levels[min_level].value;
2364 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2365 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2371 * Initializes the SMC table and uploads it
2373 * @param hwmgr the address of the powerplay hardware manager.
2374 * @param pInput the pointer to input data (PowerState)
2377 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2380 struct vega10_hwmgr *data =
2381 (struct vega10_hwmgr *)(hwmgr->backend);
2382 struct phm_ppt_v2_information *table_info =
2383 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2384 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2385 struct pp_atomfwctrl_voltage_table voltage_table;
2386 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2388 result = vega10_setup_default_dpm_tables(hwmgr);
2389 PP_ASSERT_WITH_CODE(!result,
2390 "Failed to setup default DPM tables!",
2393 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2394 VOLTAGE_OBJ_SVID2, &voltage_table);
2395 pp_table->MaxVidStep = voltage_table.max_vid_step;
2397 pp_table->GfxDpmVoltageMode =
2398 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2399 pp_table->SocDpmVoltageMode =
2400 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2401 pp_table->UclkDpmVoltageMode =
2402 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2403 pp_table->UvdDpmVoltageMode =
2404 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2405 pp_table->VceDpmVoltageMode =
2406 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2407 pp_table->Mp0DpmVoltageMode =
2408 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2410 pp_table->DisplayDpmVoltageMode =
2411 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2413 if (data->registry_data.ulv_support &&
2414 table_info->us_ulv_voltage_offset) {
2415 result = vega10_populate_ulv_state(hwmgr);
2416 PP_ASSERT_WITH_CODE(!result,
2417 "Failed to initialize ULV state!",
2421 result = vega10_populate_smc_link_levels(hwmgr);
2422 PP_ASSERT_WITH_CODE(!result,
2423 "Failed to initialize Link Level!",
2426 result = vega10_populate_all_graphic_levels(hwmgr);
2427 PP_ASSERT_WITH_CODE(!result,
2428 "Failed to initialize Graphics Level!",
2431 result = vega10_populate_all_memory_levels(hwmgr);
2432 PP_ASSERT_WITH_CODE(!result,
2433 "Failed to initialize Memory Level!",
2436 result = vega10_populate_all_display_clock_levels(hwmgr);
2437 PP_ASSERT_WITH_CODE(!result,
2438 "Failed to initialize Display Level!",
2441 result = vega10_populate_smc_vce_levels(hwmgr);
2442 PP_ASSERT_WITH_CODE(!result,
2443 "Failed to initialize VCE Level!",
2446 result = vega10_populate_smc_uvd_levels(hwmgr);
2447 PP_ASSERT_WITH_CODE(!result,
2448 "Failed to initialize UVD Level!",
2451 if (data->registry_data.clock_stretcher_support) {
2452 result = vega10_populate_clock_stretcher_table(hwmgr);
2453 PP_ASSERT_WITH_CODE(!result,
2454 "Failed to populate Clock Stretcher Table!",
2458 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2460 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2461 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2462 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2463 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2464 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2465 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2466 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2467 if (0 != boot_up_values.usVddc) {
2468 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2469 PPSMC_MSG_SetFloorSocVoltage,
2470 (boot_up_values.usVddc * 4));
2471 data->vbios_boot_state.bsoc_vddc_lock = true;
2473 data->vbios_boot_state.bsoc_vddc_lock = false;
2475 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2476 PPSMC_MSG_SetMinDeepSleepDcefclk,
2477 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2480 result = vega10_populate_avfs_parameters(hwmgr);
2481 PP_ASSERT_WITH_CODE(!result,
2482 "Failed to initialize AVFS Parameters!",
2485 result = vega10_populate_gpio_parameters(hwmgr);
2486 PP_ASSERT_WITH_CODE(!result,
2487 "Failed to initialize GPIO Parameters!",
2490 pp_table->GfxclkAverageAlpha = (uint8_t)
2491 (data->gfxclk_average_alpha);
2492 pp_table->SocclkAverageAlpha = (uint8_t)
2493 (data->socclk_average_alpha);
2494 pp_table->UclkAverageAlpha = (uint8_t)
2495 (data->uclk_average_alpha);
2496 pp_table->GfxActivityAverageAlpha = (uint8_t)
2497 (data->gfx_activity_average_alpha);
2499 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2501 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2502 (uint8_t *)pp_table, PPTABLE);
2503 PP_ASSERT_WITH_CODE(!result,
2504 "Failed to upload PPtable!", return result);
2506 result = vega10_avfs_enable(hwmgr, true);
2507 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2510 vega10_save_default_power_profile(hwmgr);
2515 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2517 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2519 if (data->smu_features[GNLD_THERMAL].supported) {
2520 if (data->smu_features[GNLD_THERMAL].enabled)
2521 pr_info("THERMAL Feature Already enabled!");
2523 PP_ASSERT_WITH_CODE(
2524 !vega10_enable_smc_features(hwmgr->smumgr,
2526 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2527 "Enable THERMAL Feature Failed!",
2529 data->smu_features[GNLD_THERMAL].enabled = true;
2535 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2537 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2539 if (data->smu_features[GNLD_THERMAL].supported) {
2540 if (!data->smu_features[GNLD_THERMAL].enabled)
2541 pr_info("THERMAL Feature Already disabled!");
2543 PP_ASSERT_WITH_CODE(
2544 !vega10_enable_smc_features(hwmgr->smumgr,
2546 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2547 "disable THERMAL Feature Failed!",
2549 data->smu_features[GNLD_THERMAL].enabled = false;
2555 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2557 struct vega10_hwmgr *data =
2558 (struct vega10_hwmgr *)(hwmgr->backend);
2560 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2561 PHM_PlatformCaps_RegulatorHot)) {
2562 if (data->smu_features[GNLD_VR0HOT].supported) {
2563 PP_ASSERT_WITH_CODE(
2564 !vega10_enable_smc_features(hwmgr->smumgr,
2566 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2567 "Attempt to Enable VR0 Hot feature Failed!",
2569 data->smu_features[GNLD_VR0HOT].enabled = true;
2571 if (data->smu_features[GNLD_VR1HOT].supported) {
2572 PP_ASSERT_WITH_CODE(
2573 !vega10_enable_smc_features(hwmgr->smumgr,
2575 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2576 "Attempt to Enable VR0 Hot feature Failed!",
2578 data->smu_features[GNLD_VR1HOT].enabled = true;
2585 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2587 struct vega10_hwmgr *data =
2588 (struct vega10_hwmgr *)(hwmgr->backend);
2590 if (data->registry_data.ulv_support) {
2591 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2592 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2593 "Enable ULV Feature Failed!",
2595 data->smu_features[GNLD_ULV].enabled = true;
2601 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2603 struct vega10_hwmgr *data =
2604 (struct vega10_hwmgr *)(hwmgr->backend);
2606 if (data->registry_data.ulv_support) {
2607 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2608 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2609 "disable ULV Feature Failed!",
2611 data->smu_features[GNLD_ULV].enabled = false;
2617 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2619 struct vega10_hwmgr *data =
2620 (struct vega10_hwmgr *)(hwmgr->backend);
2622 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2623 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2624 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2625 "Attempt to Enable DS_GFXCLK Feature Failed!",
2627 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2630 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2631 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2632 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2633 "Attempt to Enable DS_SOCCLK Feature Failed!",
2635 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2638 if (data->smu_features[GNLD_DS_LCLK].supported) {
2639 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2640 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2641 "Attempt to Enable DS_LCLK Feature Failed!",
2643 data->smu_features[GNLD_DS_LCLK].enabled = true;
2646 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2647 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2648 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2649 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2651 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2657 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2659 struct vega10_hwmgr *data =
2660 (struct vega10_hwmgr *)(hwmgr->backend);
2662 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2663 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2664 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2665 "Attempt to disable DS_GFXCLK Feature Failed!",
2667 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2670 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2671 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2672 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2673 "Attempt to disable DS_ Feature Failed!",
2675 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2678 if (data->smu_features[GNLD_DS_LCLK].supported) {
2679 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2680 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2681 "Attempt to disable DS_LCLK Feature Failed!",
2683 data->smu_features[GNLD_DS_LCLK].enabled = false;
2686 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2687 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2688 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2689 "Attempt to disable DS_DCEFCLK Feature Failed!",
2691 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2697 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2699 struct vega10_hwmgr *data =
2700 (struct vega10_hwmgr *)(hwmgr->backend);
2701 uint32_t i, feature_mask = 0;
2704 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2705 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2706 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2707 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2708 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2711 for (i = 0; i < GNLD_DPM_MAX; i++) {
2712 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2713 if (data->smu_features[i].supported) {
2714 if (data->smu_features[i].enabled) {
2715 feature_mask |= data->smu_features[i].
2717 data->smu_features[i].enabled = false;
2723 vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
2729 * @brief Tell SMC to enabled the supported DPMs.
2731 * @param hwmgr - the address of the powerplay hardware manager.
2732 * @Param bitmap - bitmap for the features to enabled.
2733 * @return 0 on at least one DPM is successfully enabled.
2735 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2737 struct vega10_hwmgr *data =
2738 (struct vega10_hwmgr *)(hwmgr->backend);
2739 uint32_t i, feature_mask = 0;
2741 for (i = 0; i < GNLD_DPM_MAX; i++) {
2742 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2743 if (data->smu_features[i].supported) {
2744 if (!data->smu_features[i].enabled) {
2745 feature_mask |= data->smu_features[i].
2747 data->smu_features[i].enabled = true;
2753 if (vega10_enable_smc_features(hwmgr->smumgr,
2754 true, feature_mask)) {
2755 for (i = 0; i < GNLD_DPM_MAX; i++) {
2756 if (data->smu_features[i].smu_feature_bitmap &
2758 data->smu_features[i].enabled = false;
2762 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2763 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2764 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2765 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2766 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2769 if (data->vbios_boot_state.bsoc_vddc_lock) {
2770 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2771 PPSMC_MSG_SetFloorSocVoltage, 0);
2772 data->vbios_boot_state.bsoc_vddc_lock = false;
2775 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2776 PHM_PlatformCaps_Falcon_QuickTransition)) {
2777 if (data->smu_features[GNLD_ACDC].supported) {
2778 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2779 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2780 "Attempt to Enable DS_GFXCLK Feature Failed!",
2782 data->smu_features[GNLD_ACDC].enabled = true;
2789 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2791 struct vega10_hwmgr *data =
2792 (struct vega10_hwmgr *)(hwmgr->backend);
2793 int tmp_result, result = 0;
2795 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2796 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2797 PP_ASSERT_WITH_CODE(!tmp_result,
2798 "Failed to configure telemetry!",
2801 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2802 PPSMC_MSG_NumOfDisplays, 0);
2804 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2805 PP_ASSERT_WITH_CODE(!tmp_result,
2806 "DPM is already running right , skipping re-enablement!",
2809 tmp_result = vega10_construct_voltage_tables(hwmgr);
2810 PP_ASSERT_WITH_CODE(!tmp_result,
2811 "Failed to contruct voltage tables!",
2812 result = tmp_result);
2814 tmp_result = vega10_init_smc_table(hwmgr);
2815 PP_ASSERT_WITH_CODE(!tmp_result,
2816 "Failed to initialize SMC table!",
2817 result = tmp_result);
2819 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2820 PHM_PlatformCaps_ThermalController)) {
2821 tmp_result = vega10_enable_thermal_protection(hwmgr);
2822 PP_ASSERT_WITH_CODE(!tmp_result,
2823 "Failed to enable thermal protection!",
2824 result = tmp_result);
2827 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2828 PP_ASSERT_WITH_CODE(!tmp_result,
2829 "Failed to enable VR hot feature!",
2830 result = tmp_result);
2832 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2833 PP_ASSERT_WITH_CODE(!tmp_result,
2834 "Failed to enable deep sleep master switch!",
2835 result = tmp_result);
2837 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2838 PP_ASSERT_WITH_CODE(!tmp_result,
2839 "Failed to start DPM!", result = tmp_result);
2841 tmp_result = vega10_enable_power_containment(hwmgr);
2842 PP_ASSERT_WITH_CODE(!tmp_result,
2843 "Failed to enable power containment!",
2844 result = tmp_result);
2846 tmp_result = vega10_power_control_set_level(hwmgr);
2847 PP_ASSERT_WITH_CODE(!tmp_result,
2848 "Failed to power control set level!",
2849 result = tmp_result);
2851 tmp_result = vega10_enable_ulv(hwmgr);
2852 PP_ASSERT_WITH_CODE(!tmp_result,
2853 "Failed to enable ULV!",
2854 result = tmp_result);
2859 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2861 return sizeof(struct vega10_power_state);
2864 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2865 void *state, struct pp_power_state *power_state,
2866 void *pp_table, uint32_t classification_flag)
2868 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2869 struct vega10_power_state *vega10_power_state =
2870 cast_phw_vega10_power_state(&(power_state->hardware));
2871 struct vega10_performance_level *performance_level;
2872 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2873 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2874 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2875 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2876 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2877 (((unsigned long)powerplay_table) +
2878 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2879 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2880 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2881 (((unsigned long)powerplay_table) +
2882 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2883 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2884 (ATOM_Vega10_MCLK_Dependency_Table *)
2885 (((unsigned long)powerplay_table) +
2886 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2889 /* The following fields are not initialized here:
2890 * id orderedList allStatesList
2892 power_state->classification.ui_label =
2893 (le16_to_cpu(state_entry->usClassification) &
2894 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2895 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2896 power_state->classification.flags = classification_flag;
2897 /* NOTE: There is a classification2 flag in BIOS
2898 * that is not being used right now
2900 power_state->classification.temporary_state = false;
2901 power_state->classification.to_be_deleted = false;
2903 power_state->validation.disallowOnDC =
2904 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2905 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2907 power_state->display.disableFrameModulation = false;
2908 power_state->display.limitRefreshrate = false;
2909 power_state->display.enableVariBright =
2910 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2911 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2913 power_state->validation.supportedPowerLevels = 0;
2914 power_state->uvd_clocks.VCLK = 0;
2915 power_state->uvd_clocks.DCLK = 0;
2916 power_state->temperatures.min = 0;
2917 power_state->temperatures.max = 0;
2919 performance_level = &(vega10_power_state->performance_levels
2920 [vega10_power_state->performance_level_count++]);
2922 PP_ASSERT_WITH_CODE(
2923 (vega10_power_state->performance_level_count <
2924 NUM_GFXCLK_DPM_LEVELS),
2925 "Performance levels exceeds SMC limit!",
2928 PP_ASSERT_WITH_CODE(
2929 (vega10_power_state->performance_level_count <=
2930 hwmgr->platform_descriptor.
2931 hardwareActivityPerformanceLevels),
2932 "Performance levels exceeds Driver limit!",
2935 /* Performance levels are arranged from low to high. */
2936 performance_level->soc_clock = socclk_dep_table->entries
2937 [state_entry->ucSocClockIndexLow].ulClk;
2938 performance_level->gfx_clock = gfxclk_dep_table->entries
2939 [state_entry->ucGfxClockIndexLow].ulClk;
2940 performance_level->mem_clock = mclk_dep_table->entries
2941 [state_entry->ucMemClockIndexLow].ulMemClk;
2943 performance_level = &(vega10_power_state->performance_levels
2944 [vega10_power_state->performance_level_count++]);
2945 performance_level->soc_clock = socclk_dep_table->entries
2946 [state_entry->ucSocClockIndexHigh].ulClk;
2947 if (gfxclk_dep_table->ucRevId == 0) {
2948 performance_level->gfx_clock = gfxclk_dep_table->entries
2949 [state_entry->ucGfxClockIndexHigh].ulClk;
2950 } else if (gfxclk_dep_table->ucRevId == 1) {
2951 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
2952 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
2955 performance_level->mem_clock = mclk_dep_table->entries
2956 [state_entry->ucMemClockIndexHigh].ulMemClk;
2960 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2961 unsigned long entry_index, struct pp_power_state *state)
2964 struct vega10_power_state *ps;
2966 state->hardware.magic = PhwVega10_Magic;
2968 ps = cast_phw_vega10_power_state(&state->hardware);
2970 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2971 vega10_get_pp_table_entry_callback_func);
2974 * This is the earliest time we have all the dependency table
2975 * and the VBIOS boot state
2977 /* set DC compatible flag if this state supports DC */
2978 if (!state->validation.disallowOnDC)
2979 ps->dc_compatible = true;
2981 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2982 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2987 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
2988 struct pp_hw_power_state *hw_ps)
2993 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2994 struct pp_power_state *request_ps,
2995 const struct pp_power_state *current_ps)
2997 struct vega10_power_state *vega10_ps =
2998 cast_phw_vega10_power_state(&request_ps->hardware);
3001 struct PP_Clocks minimum_clocks = {0};
3002 bool disable_mclk_switching;
3003 bool disable_mclk_switching_for_frame_lock;
3004 bool disable_mclk_switching_for_vr;
3005 bool force_mclk_high;
3006 struct cgs_display_info info = {0};
3007 const struct phm_clock_and_voltage_limits *max_limits;
3009 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3010 struct phm_ppt_v2_information *table_info =
3011 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3013 uint32_t stable_pstate_sclk_dpm_percentage;
3014 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3017 data->battery_state = (PP_StateUILabel_Battery ==
3018 request_ps->classification.ui_label);
3020 if (vega10_ps->performance_level_count != 2)
3021 pr_info("VI should always have 2 performance levels");
3023 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3024 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3025 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3027 /* Cap clock DPM tables at DC MAX if it is in DC. */
3028 if (PP_PowerSource_DC == hwmgr->power_source) {
3029 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3030 if (vega10_ps->performance_levels[i].mem_clock >
3032 vega10_ps->performance_levels[i].mem_clock =
3034 if (vega10_ps->performance_levels[i].gfx_clock >
3036 vega10_ps->performance_levels[i].gfx_clock =
3041 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3042 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3044 cgs_get_active_displays_info(hwmgr->device, &info);
3046 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3047 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
3048 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
3050 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3051 PHM_PlatformCaps_StablePState)) {
3052 PP_ASSERT_WITH_CODE(
3053 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3054 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3055 "percent sclk value must range from 1% to 100%, setting default value",
3056 stable_pstate_sclk_dpm_percentage = 75);
3058 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3059 stable_pstate_sclk = (max_limits->sclk *
3060 stable_pstate_sclk_dpm_percentage) / 100;
3062 for (count = table_info->vdd_dep_on_sclk->count - 1;
3063 count >= 0; count--) {
3064 if (stable_pstate_sclk >=
3065 table_info->vdd_dep_on_sclk->entries[count].clk) {
3066 stable_pstate_sclk =
3067 table_info->vdd_dep_on_sclk->entries[count].clk;
3073 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3075 stable_pstate_mclk = max_limits->mclk;
3077 minimum_clocks.engineClock = stable_pstate_sclk;
3078 minimum_clocks.memoryClock = stable_pstate_mclk;
3081 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3082 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3084 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3085 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3087 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3089 if (hwmgr->gfx_arbiter.sclk_over_drive) {
3090 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3091 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3092 "Overdrive sclk exceeds limit",
3093 hwmgr->gfx_arbiter.sclk_over_drive =
3094 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3096 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3097 vega10_ps->performance_levels[1].gfx_clock =
3098 hwmgr->gfx_arbiter.sclk_over_drive;
3101 if (hwmgr->gfx_arbiter.mclk_over_drive) {
3102 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3103 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3104 "Overdrive mclk exceeds limit",
3105 hwmgr->gfx_arbiter.mclk_over_drive =
3106 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3108 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3109 vega10_ps->performance_levels[1].mem_clock =
3110 hwmgr->gfx_arbiter.mclk_over_drive;
3113 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3114 hwmgr->platform_descriptor.platformCaps,
3115 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3116 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3117 PHM_PlatformCaps_DisableMclkSwitchForVR);
3118 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3119 PHM_PlatformCaps_ForceMclkHigh);
3121 disable_mclk_switching = (info.display_count > 1) ||
3122 disable_mclk_switching_for_frame_lock ||
3123 disable_mclk_switching_for_vr ||
3126 sclk = vega10_ps->performance_levels[0].gfx_clock;
3127 mclk = vega10_ps->performance_levels[0].mem_clock;
3129 if (sclk < minimum_clocks.engineClock)
3130 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3131 max_limits->sclk : minimum_clocks.engineClock;
3133 if (mclk < minimum_clocks.memoryClock)
3134 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3135 max_limits->mclk : minimum_clocks.memoryClock;
3137 vega10_ps->performance_levels[0].gfx_clock = sclk;
3138 vega10_ps->performance_levels[0].mem_clock = mclk;
3140 if (vega10_ps->performance_levels[1].gfx_clock <
3141 vega10_ps->performance_levels[0].gfx_clock)
3142 vega10_ps->performance_levels[0].gfx_clock =
3143 vega10_ps->performance_levels[1].gfx_clock;
3145 if (disable_mclk_switching) {
3146 /* Set Mclk the max of level 0 and level 1 */
3147 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3148 mclk = vega10_ps->performance_levels[1].mem_clock;
3150 /* Find the lowest MCLK frequency that is within
3151 * the tolerable latency defined in DAL
3154 for (i = 0; i < data->mclk_latency_table.count; i++) {
3155 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3156 (data->mclk_latency_table.entries[i].frequency >=
3157 vega10_ps->performance_levels[0].mem_clock) &&
3158 (data->mclk_latency_table.entries[i].frequency <=
3159 vega10_ps->performance_levels[1].mem_clock))
3160 mclk = data->mclk_latency_table.entries[i].frequency;
3162 vega10_ps->performance_levels[0].mem_clock = mclk;
3164 if (vega10_ps->performance_levels[1].mem_clock <
3165 vega10_ps->performance_levels[0].mem_clock)
3166 vega10_ps->performance_levels[0].mem_clock =
3167 vega10_ps->performance_levels[1].mem_clock;
3170 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3171 PHM_PlatformCaps_StablePState)) {
3172 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3173 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3174 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3181 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3183 const struct phm_set_power_state_input *states =
3184 (const struct phm_set_power_state_input *)input;
3185 const struct vega10_power_state *vega10_ps =
3186 cast_const_phw_vega10_power_state(states->pnew_state);
3187 struct vega10_hwmgr *data =
3188 (struct vega10_hwmgr *)(hwmgr->backend);
3189 struct vega10_single_dpm_table *sclk_table =
3190 &(data->dpm_table.gfx_table);
3191 uint32_t sclk = vega10_ps->performance_levels
3192 [vega10_ps->performance_level_count - 1].gfx_clock;
3193 struct vega10_single_dpm_table *mclk_table =
3194 &(data->dpm_table.mem_table);
3195 uint32_t mclk = vega10_ps->performance_levels
3196 [vega10_ps->performance_level_count - 1].mem_clock;
3197 struct PP_Clocks min_clocks = {0};
3199 struct cgs_display_info info = {0};
3201 data->need_update_dpm_table = 0;
3203 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3204 PHM_PlatformCaps_ODNinACSupport) ||
3205 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3206 PHM_PlatformCaps_ODNinDCSupport)) {
3207 for (i = 0; i < sclk_table->count; i++) {
3208 if (sclk == sclk_table->dpm_levels[i].value)
3212 if (!(data->apply_overdrive_next_settings_mask &
3213 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3214 /* Check SCLK in DAL's minimum clocks
3215 * in case DeepSleep divider update is required.
3217 if (data->display_timing.min_clock_in_sr !=
3218 min_clocks.engineClockInSR &&
3219 (min_clocks.engineClockInSR >=
3220 VEGA10_MINIMUM_ENGINE_CLOCK ||
3221 data->display_timing.min_clock_in_sr >=
3222 VEGA10_MINIMUM_ENGINE_CLOCK))
3223 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3226 cgs_get_active_displays_info(hwmgr->device, &info);
3228 if (data->display_timing.num_existing_displays !=
3230 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3232 for (i = 0; i < sclk_table->count; i++) {
3233 if (sclk == sclk_table->dpm_levels[i].value)
3237 if (i >= sclk_table->count)
3238 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3240 /* Check SCLK in DAL's minimum clocks
3241 * in case DeepSleep divider update is required.
3243 if (data->display_timing.min_clock_in_sr !=
3244 min_clocks.engineClockInSR &&
3245 (min_clocks.engineClockInSR >=
3246 VEGA10_MINIMUM_ENGINE_CLOCK ||
3247 data->display_timing.min_clock_in_sr >=
3248 VEGA10_MINIMUM_ENGINE_CLOCK))
3249 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3252 for (i = 0; i < mclk_table->count; i++) {
3253 if (mclk == mclk_table->dpm_levels[i].value)
3257 cgs_get_active_displays_info(hwmgr->device, &info);
3259 if (i >= mclk_table->count)
3260 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3262 if (data->display_timing.num_existing_displays !=
3263 info.display_count ||
3264 i >= mclk_table->count)
3265 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3270 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3271 struct pp_hwmgr *hwmgr, const void *input)
3274 const struct phm_set_power_state_input *states =
3275 (const struct phm_set_power_state_input *)input;
3276 const struct vega10_power_state *vega10_ps =
3277 cast_const_phw_vega10_power_state(states->pnew_state);
3278 struct vega10_hwmgr *data =
3279 (struct vega10_hwmgr *)(hwmgr->backend);
3280 uint32_t sclk = vega10_ps->performance_levels
3281 [vega10_ps->performance_level_count - 1].gfx_clock;
3282 uint32_t mclk = vega10_ps->performance_levels
3283 [vega10_ps->performance_level_count - 1].mem_clock;
3284 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3285 struct vega10_dpm_table *golden_dpm_table =
3286 &data->golden_dpm_table;
3287 uint32_t dpm_count, clock_percent;
3290 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3291 PHM_PlatformCaps_ODNinACSupport) ||
3292 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3293 PHM_PlatformCaps_ODNinDCSupport)) {
3295 if (!data->need_update_dpm_table &&
3296 !data->apply_optimized_settings &&
3297 !data->apply_overdrive_next_settings_mask)
3300 if (data->apply_overdrive_next_settings_mask &
3301 DPMTABLE_OD_UPDATE_SCLK) {
3303 dpm_count < dpm_table->gfx_table.count;
3305 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3306 data->odn_dpm_table.odn_core_clock_dpm_levels.
3307 performance_level_entries[dpm_count].enabled;
3308 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3309 data->odn_dpm_table.odn_core_clock_dpm_levels.
3310 performance_level_entries[dpm_count].clock;
3314 if (data->apply_overdrive_next_settings_mask &
3315 DPMTABLE_OD_UPDATE_MCLK) {
3317 dpm_count < dpm_table->mem_table.count;
3319 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3320 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3321 performance_level_entries[dpm_count].enabled;
3322 dpm_table->mem_table.dpm_levels[dpm_count].value =
3323 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3324 performance_level_entries[dpm_count].clock;
3328 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3329 data->apply_optimized_settings ||
3330 (data->apply_overdrive_next_settings_mask &
3331 DPMTABLE_OD_UPDATE_SCLK)) {
3332 result = vega10_populate_all_graphic_levels(hwmgr);
3333 PP_ASSERT_WITH_CODE(!result,
3334 "Failed to populate SCLK during \
3335 PopulateNewDPMClocksStates Function!",
3339 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3340 (data->apply_overdrive_next_settings_mask &
3341 DPMTABLE_OD_UPDATE_MCLK)){
3342 result = vega10_populate_all_memory_levels(hwmgr);
3343 PP_ASSERT_WITH_CODE(!result,
3344 "Failed to populate MCLK during \
3345 PopulateNewDPMClocksStates Function!",
3349 if (!data->need_update_dpm_table &&
3350 !data->apply_optimized_settings)
3353 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3354 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3356 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3358 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3359 PHM_PlatformCaps_OD6PlusinACSupport) ||
3360 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3361 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3362 /* Need to do calculation based on the golden DPM table
3363 * as the Heatmap GPU Clock axis is also based on
3364 * the default values
3366 PP_ASSERT_WITH_CODE(
3367 golden_dpm_table->gfx_table.dpm_levels
3368 [golden_dpm_table->gfx_table.count - 1].value,
3372 dpm_count = dpm_table->gfx_table.count < 2 ?
3373 0 : dpm_table->gfx_table.count - 2;
3374 for (i = dpm_count; i > 1; i--) {
3375 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3376 [golden_dpm_table->gfx_table.count - 1].value) {
3378 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3379 [golden_dpm_table->gfx_table.count - 1].value) *
3381 golden_dpm_table->gfx_table.dpm_levels
3382 [golden_dpm_table->gfx_table.count - 1].value;
3384 dpm_table->gfx_table.dpm_levels[i].value =
3385 golden_dpm_table->gfx_table.dpm_levels[i].value +
3386 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3387 clock_percent) / 100;
3388 } else if (golden_dpm_table->
3389 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3392 ((golden_dpm_table->gfx_table.dpm_levels
3393 [golden_dpm_table->gfx_table.count - 1].value -
3395 golden_dpm_table->gfx_table.dpm_levels
3396 [golden_dpm_table->gfx_table.count-1].value;
3398 dpm_table->gfx_table.dpm_levels[i].value =
3399 golden_dpm_table->gfx_table.dpm_levels[i].value -
3400 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3401 clock_percent) / 100;
3403 dpm_table->gfx_table.dpm_levels[i].value =
3404 golden_dpm_table->gfx_table.dpm_levels[i].value;
3409 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3410 data->smu_features[GNLD_DPM_UCLK].supported) {
3412 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3415 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3416 PHM_PlatformCaps_OD6PlusinACSupport) ||
3417 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3418 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3420 PP_ASSERT_WITH_CODE(
3421 golden_dpm_table->mem_table.dpm_levels
3422 [golden_dpm_table->mem_table.count - 1].value,
3426 dpm_count = dpm_table->mem_table.count < 2 ?
3427 0 : dpm_table->mem_table.count - 2;
3428 for (i = dpm_count; i > 1; i--) {
3429 if (mclk > golden_dpm_table->mem_table.dpm_levels
3430 [golden_dpm_table->mem_table.count-1].value) {
3431 clock_percent = ((mclk -
3432 golden_dpm_table->mem_table.dpm_levels
3433 [golden_dpm_table->mem_table.count-1].value) *
3435 golden_dpm_table->mem_table.dpm_levels
3436 [golden_dpm_table->mem_table.count-1].value;
3438 dpm_table->mem_table.dpm_levels[i].value =
3439 golden_dpm_table->mem_table.dpm_levels[i].value +
3440 (golden_dpm_table->mem_table.dpm_levels[i].value *
3441 clock_percent) / 100;
3442 } else if (golden_dpm_table->mem_table.dpm_levels
3443 [dpm_table->mem_table.count-1].value > mclk) {
3444 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3445 [golden_dpm_table->mem_table.count-1].value - mclk) *
3447 golden_dpm_table->mem_table.dpm_levels
3448 [golden_dpm_table->mem_table.count-1].value;
3450 dpm_table->mem_table.dpm_levels[i].value =
3451 golden_dpm_table->mem_table.dpm_levels[i].value -
3452 (golden_dpm_table->mem_table.dpm_levels[i].value *
3453 clock_percent) / 100;
3455 dpm_table->mem_table.dpm_levels[i].value =
3456 golden_dpm_table->mem_table.dpm_levels[i].value;
3461 if ((data->need_update_dpm_table &
3462 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3463 data->apply_optimized_settings) {
3464 result = vega10_populate_all_graphic_levels(hwmgr);
3465 PP_ASSERT_WITH_CODE(!result,
3466 "Failed to populate SCLK during \
3467 PopulateNewDPMClocksStates Function!",
3471 if (data->need_update_dpm_table &
3472 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3473 result = vega10_populate_all_memory_levels(hwmgr);
3474 PP_ASSERT_WITH_CODE(!result,
3475 "Failed to populate MCLK during \
3476 PopulateNewDPMClocksStates Function!",
3483 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3484 struct vega10_single_dpm_table *dpm_table,
3485 uint32_t low_limit, uint32_t high_limit)
3489 for (i = 0; i < dpm_table->count; i++) {
3490 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3491 (dpm_table->dpm_levels[i].value > high_limit))
3492 dpm_table->dpm_levels[i].enabled = false;
3494 dpm_table->dpm_levels[i].enabled = true;
3499 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3500 struct vega10_single_dpm_table *dpm_table,
3501 uint32_t low_limit, uint32_t high_limit,
3502 uint32_t disable_dpm_mask)
3506 for (i = 0; i < dpm_table->count; i++) {
3507 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3508 (dpm_table->dpm_levels[i].value > high_limit))
3509 dpm_table->dpm_levels[i].enabled = false;
3510 else if (!((1 << i) & disable_dpm_mask))
3511 dpm_table->dpm_levels[i].enabled = false;
3513 dpm_table->dpm_levels[i].enabled = true;
3518 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3519 const struct vega10_power_state *vega10_ps)
3521 struct vega10_hwmgr *data =
3522 (struct vega10_hwmgr *)(hwmgr->backend);
3523 uint32_t high_limit_count;
3525 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3526 "power state did not have any performance level",
3529 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3531 vega10_trim_single_dpm_states(hwmgr,
3532 &(data->dpm_table.soc_table),
3533 vega10_ps->performance_levels[0].soc_clock,
3534 vega10_ps->performance_levels[high_limit_count].soc_clock);
3536 vega10_trim_single_dpm_states_with_mask(hwmgr,
3537 &(data->dpm_table.gfx_table),
3538 vega10_ps->performance_levels[0].gfx_clock,
3539 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3540 data->disable_dpm_mask);
3542 vega10_trim_single_dpm_states(hwmgr,
3543 &(data->dpm_table.mem_table),
3544 vega10_ps->performance_levels[0].mem_clock,
3545 vega10_ps->performance_levels[high_limit_count].mem_clock);
3550 static uint32_t vega10_find_lowest_dpm_level(
3551 struct vega10_single_dpm_table *table)
3555 for (i = 0; i < table->count; i++) {
3556 if (table->dpm_levels[i].enabled)
3563 static uint32_t vega10_find_highest_dpm_level(
3564 struct vega10_single_dpm_table *table)
3568 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3569 for (i = table->count; i > 0; i--) {
3570 if (table->dpm_levels[i - 1].enabled)
3574 pr_info("DPM Table Has Too Many Entries!");
3575 return MAX_REGULAR_DPM_NUMBER - 1;
3581 static void vega10_apply_dal_minimum_voltage_request(
3582 struct pp_hwmgr *hwmgr)
3587 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3589 struct vega10_hwmgr *data =
3590 (struct vega10_hwmgr *)(hwmgr->backend);
3592 vega10_apply_dal_minimum_voltage_request(hwmgr);
3594 if (!data->registry_data.sclk_dpm_key_disabled) {
3595 if (data->smc_state_table.gfx_boot_level !=
3596 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3597 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3599 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3600 data->smc_state_table.gfx_boot_level),
3601 "Failed to set soft min sclk index!",
3603 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3604 data->smc_state_table.gfx_boot_level;
3608 if (!data->registry_data.mclk_dpm_key_disabled) {
3609 if (data->smc_state_table.mem_boot_level !=
3610 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3611 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3613 PPSMC_MSG_SetSoftMinUclkByIndex,
3614 data->smc_state_table.mem_boot_level),
3615 "Failed to set soft min mclk index!",
3618 data->dpm_table.mem_table.dpm_state.soft_min_level =
3619 data->smc_state_table.mem_boot_level;
3626 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3628 struct vega10_hwmgr *data =
3629 (struct vega10_hwmgr *)(hwmgr->backend);
3631 vega10_apply_dal_minimum_voltage_request(hwmgr);
3633 if (!data->registry_data.sclk_dpm_key_disabled) {
3634 if (data->smc_state_table.gfx_max_level !=
3635 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3636 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3638 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3639 data->smc_state_table.gfx_max_level),
3640 "Failed to set soft max sclk index!",
3642 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3643 data->smc_state_table.gfx_max_level;
3647 if (!data->registry_data.mclk_dpm_key_disabled) {
3648 if (data->smc_state_table.mem_max_level !=
3649 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3650 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3652 PPSMC_MSG_SetSoftMaxUclkByIndex,
3653 data->smc_state_table.mem_max_level),
3654 "Failed to set soft max mclk index!",
3656 data->dpm_table.mem_table.dpm_state.soft_max_level =
3657 data->smc_state_table.mem_max_level;
3664 static int vega10_generate_dpm_level_enable_mask(
3665 struct pp_hwmgr *hwmgr, const void *input)
3667 struct vega10_hwmgr *data =
3668 (struct vega10_hwmgr *)(hwmgr->backend);
3669 const struct phm_set_power_state_input *states =
3670 (const struct phm_set_power_state_input *)input;
3671 const struct vega10_power_state *vega10_ps =
3672 cast_const_phw_vega10_power_state(states->pnew_state);
3675 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3676 "Attempt to Trim DPM States Failed!",
3679 data->smc_state_table.gfx_boot_level =
3680 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3681 data->smc_state_table.gfx_max_level =
3682 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3683 data->smc_state_table.mem_boot_level =
3684 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3685 data->smc_state_table.mem_max_level =
3686 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3688 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3689 "Attempt to upload DPM Bootup Levels Failed!",
3691 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3692 "Attempt to upload DPM Max Levels Failed!",
3694 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3695 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3698 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3699 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3704 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3706 struct vega10_hwmgr *data =
3707 (struct vega10_hwmgr *)(hwmgr->backend);
3709 if (data->smu_features[GNLD_DPM_VCE].supported) {
3710 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3712 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3713 "Attempt to Enable/Disable DPM VCE Failed!",
3715 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3721 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3723 struct vega10_hwmgr *data =
3724 (struct vega10_hwmgr *)(hwmgr->backend);
3726 uint32_t low_sclk_interrupt_threshold = 0;
3728 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3729 PHM_PlatformCaps_SclkThrottleLowNotification)
3730 && (hwmgr->gfx_arbiter.sclk_threshold !=
3731 data->low_sclk_interrupt_threshold)) {
3732 data->low_sclk_interrupt_threshold =
3733 hwmgr->gfx_arbiter.sclk_threshold;
3734 low_sclk_interrupt_threshold =
3735 data->low_sclk_interrupt_threshold;
3737 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3738 cpu_to_le32(low_sclk_interrupt_threshold);
3740 /* This message will also enable SmcToHost Interrupt */
3741 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3742 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3743 (uint32_t)low_sclk_interrupt_threshold);
3749 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3752 int tmp_result, result = 0;
3753 struct vega10_hwmgr *data =
3754 (struct vega10_hwmgr *)(hwmgr->backend);
3755 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3757 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3758 PP_ASSERT_WITH_CODE(!tmp_result,
3759 "Failed to find DPM states clocks in DPM table!",
3760 result = tmp_result);
3762 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3763 PP_ASSERT_WITH_CODE(!tmp_result,
3764 "Failed to populate and upload SCLK MCLK DPM levels!",
3765 result = tmp_result);
3767 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3768 PP_ASSERT_WITH_CODE(!tmp_result,
3769 "Failed to generate DPM level enabled mask!",
3770 result = tmp_result);
3772 tmp_result = vega10_update_sclk_threshold(hwmgr);
3773 PP_ASSERT_WITH_CODE(!tmp_result,
3774 "Failed to update SCLK threshold!",
3775 result = tmp_result);
3777 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3778 (uint8_t *)pp_table, PPTABLE);
3779 PP_ASSERT_WITH_CODE(!result,
3780 "Failed to upload PPtable!", return result);
3782 data->apply_optimized_settings = false;
3783 data->apply_overdrive_next_settings_mask = 0;
3788 static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3790 struct pp_power_state *ps;
3791 struct vega10_power_state *vega10_ps;
3796 ps = hwmgr->request_ps;
3801 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3804 return vega10_ps->performance_levels[0].gfx_clock;
3806 return vega10_ps->performance_levels
3807 [vega10_ps->performance_level_count - 1].gfx_clock;
3810 static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3812 struct pp_power_state *ps;
3813 struct vega10_power_state *vega10_ps;
3818 ps = hwmgr->request_ps;
3823 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3826 return vega10_ps->performance_levels[0].mem_clock;
3828 return vega10_ps->performance_levels
3829 [vega10_ps->performance_level_count-1].mem_clock;
3832 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3833 struct pp_gpu_power *query)
3837 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
3838 PPSMC_MSG_GetCurrPkgPwr),
3839 "Failed to get current package power!",
3842 vega10_read_arg_from_smc(hwmgr->smumgr, &value);
3843 /* power value is an integer */
3844 query->average_gpu_power = value << 8;
3849 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3850 void *value, int *size)
3852 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3853 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3854 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3858 case AMDGPU_PP_SENSOR_GFX_SCLK:
3859 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3861 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
3862 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3866 case AMDGPU_PP_SENSOR_GFX_MCLK:
3867 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
3869 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
3870 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3874 case AMDGPU_PP_SENSOR_GPU_LOAD:
3875 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3877 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
3878 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3882 case AMDGPU_PP_SENSOR_GPU_TEMP:
3883 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3886 case AMDGPU_PP_SENSOR_UVD_POWER:
3887 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3890 case AMDGPU_PP_SENSOR_VCE_POWER:
3891 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3894 case AMDGPU_PP_SENSOR_GPU_POWER:
3895 if (*size < sizeof(struct pp_gpu_power))
3898 *size = sizeof(struct pp_gpu_power);
3899 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3909 static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3912 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3913 PPSMC_MSG_SetUclkFastSwitch,
3917 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3918 struct pp_display_clock_request *clock_req)
3921 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3922 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
3923 DSPCLK_e clk_select = 0;
3924 uint32_t clk_request = 0;
3927 case amd_pp_dcef_clock:
3928 clk_select = DSPCLK_DCEFCLK;
3930 case amd_pp_disp_clock:
3931 clk_select = DSPCLK_DISPCLK;
3933 case amd_pp_pixel_clock:
3934 clk_select = DSPCLK_PIXCLK;
3936 case amd_pp_phy_clock:
3937 clk_select = DSPCLK_PHYCLK;
3940 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3946 clk_request = (clk_freq << 16) | clk_select;
3947 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3948 PPSMC_MSG_RequestDisplayClockByFreq,
3955 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3956 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3962 if (mclk_table == NULL || mclk_table->count == 0)
3965 count = (uint8_t)(mclk_table->count);
3967 for(i = 0; i < count; i++) {
3968 if(mclk_table->entries[i].clk >= frequency)
3975 static int vega10_notify_smc_display_config_after_ps_adjustment(
3976 struct pp_hwmgr *hwmgr)
3978 struct vega10_hwmgr *data =
3979 (struct vega10_hwmgr *)(hwmgr->backend);
3980 struct vega10_single_dpm_table *dpm_table =
3981 &data->dpm_table.dcef_table;
3982 struct phm_ppt_v2_information *table_info =
3983 (struct phm_ppt_v2_information *)hwmgr->pptable;
3984 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3986 uint32_t num_active_disps = 0;
3987 struct cgs_display_info info = {0};
3988 struct PP_Clocks min_clocks = {0};
3990 struct pp_display_clock_request clock_req;
3992 info.mode_info = NULL;
3994 cgs_get_active_displays_info(hwmgr->device, &info);
3996 num_active_disps = info.display_count;
3998 if (num_active_disps > 1)
3999 vega10_notify_smc_display_change(hwmgr, false);
4001 vega10_notify_smc_display_change(hwmgr, true);
4003 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
4004 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
4005 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
4007 for (i = 0; i < dpm_table->count; i++) {
4008 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
4012 if (i < dpm_table->count) {
4013 clock_req.clock_type = amd_pp_dcef_clock;
4014 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
4015 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
4016 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4017 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
4018 min_clocks.dcefClockInSR /100),
4019 "Attempt to set divider for DCEFCLK Failed!",);
4021 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4024 pr_info("Cannot find requested DCEFCLK!");
4027 if (min_clocks.memoryClock != 0) {
4028 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
4029 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
4030 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
4036 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
4038 struct vega10_hwmgr *data =
4039 (struct vega10_hwmgr *)(hwmgr->backend);
4041 data->smc_state_table.gfx_boot_level =
4042 data->smc_state_table.gfx_max_level =
4043 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4044 data->smc_state_table.mem_boot_level =
4045 data->smc_state_table.mem_max_level =
4046 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4048 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4049 "Failed to upload boot level to highest!",
4052 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4053 "Failed to upload dpm max level to highest!",
4059 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4061 struct vega10_hwmgr *data =
4062 (struct vega10_hwmgr *)(hwmgr->backend);
4064 data->smc_state_table.gfx_boot_level =
4065 data->smc_state_table.gfx_max_level =
4066 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4067 data->smc_state_table.mem_boot_level =
4068 data->smc_state_table.mem_max_level =
4069 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4071 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4072 "Failed to upload boot level to highest!",
4075 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4076 "Failed to upload dpm max level to highest!",
4083 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4085 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4087 data->smc_state_table.gfx_boot_level =
4088 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4089 data->smc_state_table.gfx_max_level =
4090 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4091 data->smc_state_table.mem_boot_level =
4092 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4093 data->smc_state_table.mem_max_level =
4094 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4096 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4097 "Failed to upload DPM Bootup Levels!",
4100 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4101 "Failed to upload DPM Max Levels!",
4106 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4107 enum amd_dpm_forced_level level)
4112 case AMD_DPM_FORCED_LEVEL_HIGH:
4113 ret = vega10_force_dpm_highest(hwmgr);
4117 case AMD_DPM_FORCED_LEVEL_LOW:
4118 ret = vega10_force_dpm_lowest(hwmgr);
4122 case AMD_DPM_FORCED_LEVEL_AUTO:
4123 ret = vega10_unforce_dpm_levels(hwmgr);
4131 hwmgr->dpm_level = level;
4136 static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4141 case AMD_FAN_CTRL_NONE:
4142 result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4144 case AMD_FAN_CTRL_MANUAL:
4145 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4146 PHM_PlatformCaps_MicrocodeFanControl))
4147 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4149 case AMD_FAN_CTRL_AUTO:
4150 result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
4152 result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4160 static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4162 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4164 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4165 return AMD_FAN_CTRL_MANUAL;
4167 return AMD_FAN_CTRL_AUTO;
4170 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4171 struct amd_pp_simple_clock_info *info)
4173 struct phm_ppt_v2_information *table_info =
4174 (struct phm_ppt_v2_information *)hwmgr->pptable;
4175 struct phm_clock_and_voltage_limits *max_limits =
4176 &table_info->max_clock_voltage_on_ac;
4178 info->engine_max_clock = max_limits->sclk;
4179 info->memory_max_clock = max_limits->mclk;
4184 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4185 struct pp_clock_levels_with_latency *clocks)
4187 struct phm_ppt_v2_information *table_info =
4188 (struct phm_ppt_v2_information *)hwmgr->pptable;
4189 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4190 table_info->vdd_dep_on_sclk;
4193 for (i = 0; i < dep_table->count; i++) {
4194 if (dep_table->entries[i].clk) {
4195 clocks->data[clocks->num_levels].clocks_in_khz =
4196 dep_table->entries[i].clk;
4197 clocks->num_levels++;
4203 static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4206 if (clock >= MEM_FREQ_LOW_LATENCY &&
4207 clock < MEM_FREQ_HIGH_LATENCY)
4208 return MEM_LATENCY_HIGH;
4209 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4210 return MEM_LATENCY_LOW;
4212 return MEM_LATENCY_ERR;
4215 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4216 struct pp_clock_levels_with_latency *clocks)
4218 struct phm_ppt_v2_information *table_info =
4219 (struct phm_ppt_v2_information *)hwmgr->pptable;
4220 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4221 table_info->vdd_dep_on_mclk;
4222 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4225 clocks->num_levels = 0;
4226 data->mclk_latency_table.count = 0;
4228 for (i = 0; i < dep_table->count; i++) {
4229 if (dep_table->entries[i].clk) {
4230 clocks->data[clocks->num_levels].clocks_in_khz =
4231 data->mclk_latency_table.entries
4232 [data->mclk_latency_table.count].frequency =
4233 dep_table->entries[i].clk;
4234 clocks->data[clocks->num_levels].latency_in_us =
4235 data->mclk_latency_table.entries
4236 [data->mclk_latency_table.count].latency =
4237 vega10_get_mem_latency(hwmgr,
4238 dep_table->entries[i].clk);
4239 clocks->num_levels++;
4240 data->mclk_latency_table.count++;
4245 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4246 struct pp_clock_levels_with_latency *clocks)
4248 struct phm_ppt_v2_information *table_info =
4249 (struct phm_ppt_v2_information *)hwmgr->pptable;
4250 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4251 table_info->vdd_dep_on_dcefclk;
4254 for (i = 0; i < dep_table->count; i++) {
4255 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4256 clocks->data[i].latency_in_us = 0;
4257 clocks->num_levels++;
4261 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4262 struct pp_clock_levels_with_latency *clocks)
4264 struct phm_ppt_v2_information *table_info =
4265 (struct phm_ppt_v2_information *)hwmgr->pptable;
4266 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4267 table_info->vdd_dep_on_socclk;
4270 for (i = 0; i < dep_table->count; i++) {
4271 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4272 clocks->data[i].latency_in_us = 0;
4273 clocks->num_levels++;
4277 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4278 enum amd_pp_clock_type type,
4279 struct pp_clock_levels_with_latency *clocks)
4282 case amd_pp_sys_clock:
4283 vega10_get_sclks(hwmgr, clocks);
4285 case amd_pp_mem_clock:
4286 vega10_get_memclocks(hwmgr, clocks);
4288 case amd_pp_dcef_clock:
4289 vega10_get_dcefclocks(hwmgr, clocks);
4291 case amd_pp_soc_clock:
4292 vega10_get_socclocks(hwmgr, clocks);
4301 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4302 enum amd_pp_clock_type type,
4303 struct pp_clock_levels_with_voltage *clocks)
4305 struct phm_ppt_v2_information *table_info =
4306 (struct phm_ppt_v2_information *)hwmgr->pptable;
4307 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4311 case amd_pp_mem_clock:
4312 dep_table = table_info->vdd_dep_on_mclk;
4314 case amd_pp_dcef_clock:
4315 dep_table = table_info->vdd_dep_on_dcefclk;
4317 case amd_pp_disp_clock:
4318 dep_table = table_info->vdd_dep_on_dispclk;
4320 case amd_pp_pixel_clock:
4321 dep_table = table_info->vdd_dep_on_pixclk;
4323 case amd_pp_phy_clock:
4324 dep_table = table_info->vdd_dep_on_phyclk;
4330 for (i = 0; i < dep_table->count; i++) {
4331 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4332 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4333 entries[dep_table->entries[i].vddInd].us_vdd);
4334 clocks->num_levels++;
4337 if (i < dep_table->count)
4343 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4344 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4346 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4347 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4351 if (!data->registry_data.disable_water_mark) {
4352 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4353 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4354 cpu_to_le16((uint16_t)
4355 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4357 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4358 cpu_to_le16((uint16_t)
4359 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4361 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4362 cpu_to_le16((uint16_t)
4363 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4365 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4366 cpu_to_le16((uint16_t)
4367 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4369 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4370 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4373 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4374 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4375 cpu_to_le16((uint16_t)
4376 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4378 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4379 cpu_to_le16((uint16_t)
4380 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4382 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4383 cpu_to_le16((uint16_t)
4384 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4386 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4387 cpu_to_le16((uint16_t)
4388 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4390 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4391 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4393 data->water_marks_bitmap = WaterMarksExist;
4399 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4400 enum pp_clock_type type, uint32_t mask)
4402 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4405 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4410 for (i = 0; i < 32; i++) {
4411 if (mask & (1 << i))
4414 data->smc_state_table.gfx_boot_level = i;
4416 for (i = 31; i >= 0; i--) {
4417 if (mask & (1 << i))
4420 data->smc_state_table.gfx_max_level = i;
4422 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4423 "Failed to upload boot level to lowest!",
4426 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4427 "Failed to upload dpm max level to highest!",
4432 for (i = 0; i < 32; i++) {
4433 if (mask & (1 << i))
4436 data->smc_state_table.mem_boot_level = i;
4438 for (i = 31; i >= 0; i--) {
4439 if (mask & (1 << i))
4442 data->smc_state_table.mem_max_level = i;
4444 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4445 "Failed to upload boot level to lowest!",
4448 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4449 "Failed to upload dpm max level to highest!",
4462 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4463 enum pp_clock_type type, char *buf)
4465 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4466 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4467 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4468 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4469 int i, now, size = 0;
4473 if (data->registry_data.sclk_dpm_key_disabled)
4476 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4477 PPSMC_MSG_GetCurrentGfxclkIndex),
4478 "Attempt to get current sclk index Failed!",
4480 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4482 "Attempt to read sclk index Failed!",
4485 for (i = 0; i < sclk_table->count; i++)
4486 size += sprintf(buf + size, "%d: %uMhz %s\n",
4487 i, sclk_table->dpm_levels[i].value / 100,
4488 (i == now) ? "*" : "");
4491 if (data->registry_data.mclk_dpm_key_disabled)
4494 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4495 PPSMC_MSG_GetCurrentUclkIndex),
4496 "Attempt to get current mclk index Failed!",
4498 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4500 "Attempt to read mclk index Failed!",
4503 for (i = 0; i < mclk_table->count; i++)
4504 size += sprintf(buf + size, "%d: %uMhz %s\n",
4505 i, mclk_table->dpm_levels[i].value / 100,
4506 (i == now) ? "*" : "");
4509 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4510 PPSMC_MSG_GetCurrentLinkIndex),
4511 "Attempt to get current mclk index Failed!",
4513 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4515 "Attempt to read mclk index Failed!",
4518 for (i = 0; i < pcie_table->count; i++)
4519 size += sprintf(buf + size, "%d: %s %s\n", i,
4520 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4521 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4522 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4523 (i == now) ? "*" : "");
4531 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4533 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4535 uint32_t num_turned_on_displays = 1;
4536 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4537 struct cgs_display_info info = {0};
4539 if ((data->water_marks_bitmap & WaterMarksExist) &&
4540 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4541 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4542 (uint8_t *)wm_table, WMTABLE);
4543 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4544 data->water_marks_bitmap |= WaterMarksLoaded;
4547 if (data->water_marks_bitmap & WaterMarksLoaded) {
4548 cgs_get_active_displays_info(hwmgr->device, &info);
4549 num_turned_on_displays = info.display_count;
4550 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4551 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4557 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4559 struct vega10_hwmgr *data =
4560 (struct vega10_hwmgr *)(hwmgr->backend);
4562 if (data->smu_features[GNLD_DPM_UVD].supported) {
4563 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4565 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4566 "Attempt to Enable/Disable DPM UVD Failed!",
4568 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4573 static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4575 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4577 data->vce_power_gated = bgate;
4578 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4581 static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4583 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4585 data->uvd_power_gated = bgate;
4586 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4589 static inline bool vega10_are_power_levels_equal(
4590 const struct vega10_performance_level *pl1,
4591 const struct vega10_performance_level *pl2)
4593 return ((pl1->soc_clock == pl2->soc_clock) &&
4594 (pl1->gfx_clock == pl2->gfx_clock) &&
4595 (pl1->mem_clock == pl2->mem_clock));
4598 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4599 const struct pp_hw_power_state *pstate1,
4600 const struct pp_hw_power_state *pstate2, bool *equal)
4602 const struct vega10_power_state *psa;
4603 const struct vega10_power_state *psb;
4606 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4609 psa = cast_const_phw_vega10_power_state(pstate1);
4610 psb = cast_const_phw_vega10_power_state(pstate2);
4611 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4612 if (psa->performance_level_count != psb->performance_level_count) {
4617 for (i = 0; i < psa->performance_level_count; i++) {
4618 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4619 /* If we have found even one performance level pair that is different the states are different. */
4625 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4626 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4627 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4628 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4634 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4636 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4637 bool is_update_required = false;
4638 struct cgs_display_info info = {0, 0, NULL};
4640 cgs_get_active_displays_info(hwmgr->device, &info);
4642 if (data->display_timing.num_existing_displays != info.display_count)
4643 is_update_required = true;
4645 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4646 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4647 is_update_required = true;
4650 return is_update_required;
4653 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4655 int tmp_result, result = 0;
4657 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4658 PP_ASSERT_WITH_CODE(tmp_result == 0,
4659 "DPM is not running right now, no need to disable DPM!",
4662 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4663 PHM_PlatformCaps_ThermalController))
4664 vega10_disable_thermal_protection(hwmgr);
4666 tmp_result = vega10_disable_power_containment(hwmgr);
4667 PP_ASSERT_WITH_CODE((tmp_result == 0),
4668 "Failed to disable power containment!", result = tmp_result);
4670 tmp_result = vega10_avfs_enable(hwmgr, false);
4671 PP_ASSERT_WITH_CODE((tmp_result == 0),
4672 "Failed to disable AVFS!", result = tmp_result);
4674 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4675 PP_ASSERT_WITH_CODE((tmp_result == 0),
4676 "Failed to stop DPM!", result = tmp_result);
4678 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4679 PP_ASSERT_WITH_CODE((tmp_result == 0),
4680 "Failed to disable deep sleep!", result = tmp_result);
4682 tmp_result = vega10_disable_ulv(hwmgr);
4683 PP_ASSERT_WITH_CODE((tmp_result == 0),
4684 "Failed to disable ulv!", result = tmp_result);
4689 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4691 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4694 result = vega10_disable_dpm_tasks(hwmgr);
4695 PP_ASSERT_WITH_CODE((0 == result),
4696 "[disable_dpm_tasks] Failed to disable DPM!",
4698 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4703 static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4704 uint32_t *sclk_idx, uint32_t *mclk_idx,
4705 uint32_t min_sclk, uint32_t min_mclk)
4707 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4708 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4711 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4712 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4713 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4719 for (i = 0; i < dpm_table->mem_table.count; i++) {
4720 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4721 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4728 static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4729 struct amd_pp_profile *request)
4731 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4732 uint32_t sclk_idx = ~0, mclk_idx = ~0;
4734 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4737 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4738 request->min_sclk, request->min_mclk);
4740 if (sclk_idx != ~0) {
4741 if (!data->registry_data.sclk_dpm_key_disabled)
4742 PP_ASSERT_WITH_CODE(
4743 !smum_send_msg_to_smc_with_parameter(
4745 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4747 "Failed to set soft min sclk index!",
4751 if (mclk_idx != ~0) {
4752 if (!data->registry_data.mclk_dpm_key_disabled)
4753 PP_ASSERT_WITH_CODE(
4754 !smum_send_msg_to_smc_with_parameter(
4756 PPSMC_MSG_SetSoftMinUclkByIndex,
4758 "Failed to set soft min mclk index!",
4765 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4767 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4768 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4769 struct vega10_single_dpm_table *golden_sclk_table =
4770 &(data->golden_dpm_table.gfx_table);
4773 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4774 golden_sclk_table->dpm_levels
4775 [golden_sclk_table->count - 1].value) *
4777 golden_sclk_table->dpm_levels
4778 [golden_sclk_table->count - 1].value;
4783 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4785 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4786 struct vega10_single_dpm_table *golden_sclk_table =
4787 &(data->golden_dpm_table.gfx_table);
4788 struct pp_power_state *ps;
4789 struct vega10_power_state *vega10_ps;
4791 ps = hwmgr->request_ps;
4796 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4798 vega10_ps->performance_levels
4799 [vega10_ps->performance_level_count - 1].gfx_clock =
4800 golden_sclk_table->dpm_levels
4801 [golden_sclk_table->count - 1].value *
4803 golden_sclk_table->dpm_levels
4804 [golden_sclk_table->count - 1].value;
4806 if (vega10_ps->performance_levels
4807 [vega10_ps->performance_level_count - 1].gfx_clock >
4808 hwmgr->platform_descriptor.overdriveLimit.engineClock)
4809 vega10_ps->performance_levels
4810 [vega10_ps->performance_level_count - 1].gfx_clock =
4811 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4816 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4818 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4819 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4820 struct vega10_single_dpm_table *golden_mclk_table =
4821 &(data->golden_dpm_table.mem_table);
4824 value = (mclk_table->dpm_levels
4825 [mclk_table->count - 1].value -
4826 golden_mclk_table->dpm_levels
4827 [golden_mclk_table->count - 1].value) *
4829 golden_mclk_table->dpm_levels
4830 [golden_mclk_table->count - 1].value;
4835 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4837 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4838 struct vega10_single_dpm_table *golden_mclk_table =
4839 &(data->golden_dpm_table.mem_table);
4840 struct pp_power_state *ps;
4841 struct vega10_power_state *vega10_ps;
4843 ps = hwmgr->request_ps;
4848 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4850 vega10_ps->performance_levels
4851 [vega10_ps->performance_level_count - 1].mem_clock =
4852 golden_mclk_table->dpm_levels
4853 [golden_mclk_table->count - 1].value *
4855 golden_mclk_table->dpm_levels
4856 [golden_mclk_table->count - 1].value;
4858 if (vega10_ps->performance_levels
4859 [vega10_ps->performance_level_count - 1].mem_clock >
4860 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
4861 vega10_ps->performance_levels
4862 [vega10_ps->performance_level_count - 1].mem_clock =
4863 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4868 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4869 .backend_init = vega10_hwmgr_backend_init,
4870 .backend_fini = vega10_hwmgr_backend_fini,
4871 .asic_setup = vega10_setup_asic_task,
4872 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
4873 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
4874 .get_num_of_pp_table_entries =
4875 vega10_get_number_of_powerplay_table_entries,
4876 .get_power_state_size = vega10_get_power_state_size,
4877 .get_pp_table_entry = vega10_get_pp_table_entry,
4878 .patch_boot_state = vega10_patch_boot_state,
4879 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4880 .power_state_set = vega10_set_power_state_tasks,
4881 .get_sclk = vega10_dpm_get_sclk,
4882 .get_mclk = vega10_dpm_get_mclk,
4883 .notify_smc_display_config_after_ps_adjustment =
4884 vega10_notify_smc_display_config_after_ps_adjustment,
4885 .force_dpm_level = vega10_dpm_force_dpm_level,
4886 .get_temperature = vega10_thermal_get_temperature,
4887 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4888 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4889 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4890 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4891 .reset_fan_speed_to_default =
4892 vega10_fan_ctrl_reset_fan_speed_to_default,
4893 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4894 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4895 .uninitialize_thermal_controller =
4896 vega10_thermal_ctrl_uninitialize_thermal_controller,
4897 .set_fan_control_mode = vega10_set_fan_control_mode,
4898 .get_fan_control_mode = vega10_get_fan_control_mode,
4899 .read_sensor = vega10_read_sensor,
4900 .get_dal_power_level = vega10_get_dal_power_level,
4901 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4902 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4903 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4904 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4905 .force_clock_level = vega10_force_clock_level,
4906 .print_clock_levels = vega10_print_clock_levels,
4907 .display_config_changed = vega10_display_configuration_changed_task,
4908 .powergate_uvd = vega10_power_gate_uvd,
4909 .powergate_vce = vega10_power_gate_vce,
4910 .check_states_equal = vega10_check_states_equal,
4911 .check_smc_update_required_for_display_configuration =
4912 vega10_check_smc_update_required_for_display_configuration,
4913 .power_off_asic = vega10_power_off_asic,
4914 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
4915 .set_power_profile_state = vega10_set_power_profile_state,
4916 .get_sclk_od = vega10_get_sclk_od,
4917 .set_sclk_od = vega10_set_sclk_od,
4918 .get_mclk_od = vega10_get_mclk_od,
4919 .set_mclk_od = vega10_set_mclk_od,
4920 .avfs_control = vega10_avfs_enable,
4923 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4925 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4926 hwmgr->pptable_func = &vega10_pptable_funcs;
4927 pp_vega10_thermal_initialize(hwmgr);