From: Rex Zhu Date: Fri, 23 Dec 2016 06:07:25 +0000 (+0800) Subject: drm/amd/powerplay: delete dpm code for Cz/St. X-Git-Tag: v4.11-rc1~83^2~31^2~96 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=db7da7aa3a8c6a25964f2216fed35f4bf11ceac1;p=karo-tx-linux.git drm/amd/powerplay: delete dpm code for Cz/St. The powerplay implementation has been the default for a while now. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 41bd2bf28f4c..dba097ccdd9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -52,8 +52,7 @@ amdgpu-y += \ # add SMC block amdgpu-y += \ amdgpu_dpm.o \ - amdgpu_powerplay.o \ - cz_smc.o cz_dpm.o + amdgpu_powerplay.o # add DCE block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 95a568df8551..b1921c7da36b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -78,10 +78,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) amd_pp->ip_funcs = &kv_dpm_ip_funcs; break; #endif - case CHIP_CARRIZO: - case CHIP_STONEY: - amd_pp->ip_funcs = &cz_dpm_ip_funcs; - break; default: ret = -EINVAL; break; @@ -102,11 +98,9 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_TONGA: case CHIP_FIJI: case CHIP_TOPAZ: - adev->pp_enabled = true; - break; case CHIP_CARRIZO: case CHIP_STONEY: - adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; + adev->pp_enabled = true; break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c deleted file mode 100644 index ba2b66be9022..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ /dev/null @@ -1,2320 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include -#include "drmP.h" -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_atombios.h" -#include "vid.h" -#include "vi_dpm.h" -#include "amdgpu_dpm.h" -#include "cz_dpm.h" -#include "cz_ppsmc.h" -#include "atom.h" - -#include "smu/smu_8_0_d.h" -#include "smu/smu_8_0_sh_mask.h" -#include "gca/gfx_8_0_d.h" -#include "gca/gfx_8_0_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "bif/bif_5_1_d.h" -#include "gfx_v8_0.h" - -static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); -static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); -static void cz_dpm_fini(struct amdgpu_device *adev); - -static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) -{ - struct cz_ps *ps = rps->ps_priv; - - return ps; -} - -static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev, - uint16_t voltage) -{ - uint16_t tmp = 6200 - voltage * 25; - - return tmp; -} - -static void cz_construct_max_power_limits_table(struct amdgpu_device *adev, - struct amdgpu_clock_and_voltage_limits *table) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (dep_table->count > 0) { - table->sclk = dep_table->entries[dep_table->count - 1].clk; - table->vddc = cz_convert_8bit_index_to_voltage(adev, - dep_table->entries[dep_table->count - 1].v); - } - - table->mclk = pi->sys_info.nbp_memory_clock[0]; - -} - -union igp_info { - struct _ATOM_INTEGRATED_SYSTEM_INFO info; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; -}; - -static int cz_parse_sys_info_table(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); - union igp_info *igp_info; - u8 frev, crev; - u16 data_offset; - int i = 0; - - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - igp_info = (union igp_info *)(mode_info->atom_context->bios + - data_offset); - - if (crev != 9) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); - return -EINVAL; - } - pi->sys_info.bootup_sclk = - le32_to_cpu(igp_info->info_9.ulBootUpEngineClock); - pi->sys_info.bootup_uma_clk = - le32_to_cpu(igp_info->info_9.ulBootUpUMAClock); - pi->sys_info.dentist_vco_freq = - le32_to_cpu(igp_info->info_9.ulDentistVCOFreq); - pi->sys_info.bootup_nb_voltage_index = - le16_to_cpu(igp_info->info_9.usBootUpNBVoltage); - - if (igp_info->info_9.ucHtcTmpLmt == 0) - pi->sys_info.htc_tmp_lmt = 203; - else - pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt; - - if (igp_info->info_9.ucHtcHystLmt == 0) - pi->sys_info.htc_hyst_lmt = 5; - else - pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt; - - if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); - return -EINVAL; - } - - if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) && - pi->enable_nb_ps_policy) - pi->sys_info.nb_dpm_enable = true; - else - pi->sys_info.nb_dpm_enable = false; - - for (i = 0; i < CZ_NUM_NBPSTATES; i++) { - if (i < CZ_NUM_NBPMEMORY_CLOCK) - pi->sys_info.nbp_memory_clock[i] = - le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]); - pi->sys_info.nbp_n_clock[i] = - le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]); - } - - for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++) - pi->sys_info.display_clock[i] = - le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK); - - for (i = 0; i < CZ_NUM_NBPSTATES; i++) - pi->sys_info.nbp_voltage_index[i] = - le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]); - - if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) - pi->caps_enable_dfs_bypass = true; - - pi->sys_info.uma_channel_number = - igp_info->info_9.ucUMAChannelNumber; - - cz_construct_max_power_limits_table(adev, - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); - } - - return 0; -} - -static void cz_patch_voltage_values(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *acp_table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - if (uvd_table->count) { - for (i = 0; i < uvd_table->count; i++) - uvd_table->entries[i].v = - cz_convert_8bit_index_to_voltage(adev, - uvd_table->entries[i].v); - } - - if (vce_table->count) { - for (i = 0; i < vce_table->count; i++) - vce_table->entries[i].v = - cz_convert_8bit_index_to_voltage(adev, - vce_table->entries[i].v); - } - - if (acp_table->count) { - for (i = 0; i < acp_table->count; i++) - acp_table->entries[i].v = - cz_convert_8bit_index_to_voltage(adev, - acp_table->entries[i].v); - } - -} - -static void cz_construct_boot_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->boot_pl.sclk = pi->sys_info.bootup_sclk; - pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; - pi->boot_pl.ds_divider_index = 0; - pi->boot_pl.ss_divider_index = 0; - pi->boot_pl.allow_gnb_slow = 1; - pi->boot_pl.force_nbp_state = 0; - pi->boot_pl.display_wm = 0; - pi->boot_pl.vce_wm = 0; - -} - -static void cz_patch_boot_state(struct amdgpu_device *adev, - struct cz_ps *ps) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - ps->num_levels = 1; - ps->levels[0] = pi->boot_pl; -} - -union pplib_clock_info { - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; - struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo; -}; - -static void cz_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = cz_get_ps(rps); - struct cz_pl *pl = &ps->levels[index]; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - pl->sclk = table->entries[clock_info->carrizo.index].clk; - pl->vddc_index = table->entries[clock_info->carrizo.index].v; - - ps->num_levels = index + 1; - - if (pi->caps_sclk_ds) { - pl->ds_divider_index = 5; - pl->ss_divider_index = 5; - } - -} - -static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - struct cz_ps *ps = cz_get_ps(rps); - - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - adev->pm.dpm.boot_ps = rps; - cz_patch_boot_state(adev, ps); - } - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; - -} - -union power_info { - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static int cz_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct cz_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * - state_array->ucNumEntries, GFP_KERNEL); - - if (!adev->pm.dpm.ps) - return -ENOMEM; - - power_state_offset = (u8 *)state_array->states; - adev->pm.dpm.platform_caps = - le32_to_cpu(power_info->pplib.ulPlatformCaps); - adev->pm.dpm.backbias_response_time = - le16_to_cpu(power_info->pplib.usBackbiasTime); - adev->pm.dpm.voltage_response_time = - le16_to_cpu(power_info->pplib.usVoltageTime); - - for (i = 0; i < state_array->ucNumEntries; i++) { - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - - ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL); - if (ps == NULL) { - for (j = 0; j < i; j++) - kfree(adev->pm.dpm.ps[j].ps_priv); - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - - adev->pm.dpm.ps[i].ps_priv = ps; - k = 0; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= CZ_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * - clock_info_array->ucEntrySize]; - cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i], - k, clock_info); - k++; - } - cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - return 0; -} - -static int cz_process_firmware_header(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - u32 tmp; - int ret; - - ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, - DpmTable), - &tmp, pi->sram_end); - - if (ret == 0) - pi->dpm_table_start = tmp; - - return ret; -} - -static int cz_dpm_init(struct amdgpu_device *adev) -{ - struct cz_power_info *pi; - int ret, i; - - pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL); - if (NULL == pi) - return -ENOMEM; - - adev->pm.dpm.priv = pi; - - ret = amdgpu_get_platform_caps(adev); - if (ret) - goto err; - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) - goto err; - - pi->sram_end = SMC_RAM_END; - - /* set up DPM defaults */ - for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) - pi->active_target[i] = CZ_AT_DFLT; - - pi->mgcg_cgtt_local0 = 0x0; - pi->mgcg_cgtt_local1 = 0x0; - pi->clock_slow_down_step = 25000; - pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = false; - pi->caps_power_containment = true; - pi->caps_cac = true; - pi->didt_enabled = false; - if (pi->didt_enabled) { - pi->caps_sq_ramping = true; - pi->caps_db_ramping = true; - pi->caps_td_ramping = true; - pi->caps_tcp_ramping = true; - } - if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) - pi->caps_sclk_ds = true; - else - pi->caps_sclk_ds = false; - - pi->voting_clients = 0x00c00033; - pi->auto_thermal_throttling_enabled = true; - pi->bapm_enabled = false; - pi->disable_nb_ps3_in_battery = false; - pi->voltage_drop_threshold = 0; - pi->caps_sclk_throttle_low_notification = false; - pi->gfx_pg_threshold = 500; - pi->caps_fps = true; - /* uvd */ - pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; - pi->caps_uvd_dpm = true; - /* vce */ - pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; - pi->caps_vce_dpm = true; - /* acp */ - pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; - pi->caps_acp_dpm = true; - - pi->caps_stable_power_state = false; - pi->nb_dpm_enabled_by_driver = true; - pi->nb_dpm_enabled = false; - pi->caps_voltage_island = false; - /* flags which indicate need to upload pptable */ - pi->need_pptable_upload = true; - - ret = cz_parse_sys_info_table(adev); - if (ret) - goto err; - - cz_patch_voltage_values(adev); - cz_construct_boot_state(adev); - - ret = cz_parse_power_table(adev); - if (ret) - goto err; - - ret = cz_process_firmware_header(adev); - if (ret) - goto err; - - pi->dpm_enabled = true; - pi->uvd_dynamic_pg = false; - - return 0; -err: - cz_dpm_fini(adev); - return ret; -} - -static void cz_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) - kfree(adev->pm.dpm.ps[i].ps_priv); - - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - amdgpu_free_extended_power_table(adev); -} - -#define ixSMUSVI_NB_CURRENTVID 0xD8230044 -#define CURRENT_NB_VID_MASK 0xff000000 -#define CURRENT_NB_VID__SHIFT 24 -#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 -#define CURRENT_GFX_VID_MASK 0xff000000 -#define CURRENT_GFX_VID__SHIFT 24 - -static void -cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, - struct seq_file *m) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); - u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); - u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); - u32 sclk, vclk, dclk, ecclk, tmp; - u16 vddnb, vddgfx; - - if (sclk_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); - } else { - sclk = table->entries[sclk_index].clk; - seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); - } - - tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & - CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; - vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & - CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; - vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); - - seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); - if (!pi->uvd_power_gated) { - if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); - } else { - vclk = uvd_table->entries[uvd_index].vclk; - dclk = uvd_table->entries[uvd_index].dclk; - seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); - } - } - - seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); - if (!pi->vce_power_gated) { - if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "invalid vce dpm level %d\n", vce_index); - } else { - ecclk = vce_table->entries[vce_index].ecclk; - seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); - } - } -} - -static void cz_dpm_print_power_state(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - int i; - struct cz_ps *ps = cz_get_ps(rps); - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->num_levels; i++) { - struct cz_pl *pl = &ps->levels[i]; - - DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - cz_convert_8bit_index_to_voltage(adev, pl->vddc_index)); - } - - amdgpu_dpm_print_ps_status(adev, rps); -} - -static void cz_dpm_set_funcs(struct amdgpu_device *adev); - -static int cz_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cz_dpm_set_funcs(adev); - - return 0; -} - - -static int cz_dpm_late_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (amdgpu_dpm) { - int ret; - /* init the sysfs and debugfs files late */ - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - return ret; - - /* powerdown unused blocks for now */ - cz_dpm_powergate_uvd(adev, true); - cz_dpm_powergate_vce(adev, true); - } - - return 0; -} - -static int cz_dpm_sw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; - /* fix me to add thermal support TODO */ - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - if (amdgpu_dpm == 0) - return 0; - - mutex_lock(&adev->pm.mutex); - ret = cz_dpm_init(adev); - if (ret) - goto dpm_init_failed; - - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_init_failed: - cz_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - - return ret; -} - -static int cz_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - amdgpu_pm_sysfs_fini(adev); - cz_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static void cz_reset_ap_mask(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->active_process_mask = 0; -} - -static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev, - void **table) -{ - return cz_smu_download_pptable(adev, table); -} - -static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct SMU8_Fusion_ClkTable *clock_table; - struct atom_clock_dividers dividers; - void *table = NULL; - uint8_t i = 0; - int ret = 0; - - struct amdgpu_clock_voltage_dependency_table *vddc_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - struct amdgpu_clock_voltage_dependency_table *vddgfx_table = - &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *acp_table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - if (!pi->need_pptable_upload) - return 0; - - ret = cz_dpm_download_pptable_from_smu(adev, &table); - if (ret) { - DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n"); - return -EINVAL; - } - - clock_table = (struct SMU8_Fusion_ClkTable *)table; - /* patch clock table */ - if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n"); - return -EINVAL; - } - - for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { - - /* vddc sclk */ - clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = - (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; - clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = - (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - /* vddgfx sclk */ - clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = - (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0; - - /* acp breakdown */ - clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = - (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; - clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = - (i < acp_table->count) ? acp_table->entries[i].clk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - /* uvd breakdown */ - clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = - (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; - clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = - (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = - (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; - clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = - (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - /* vce breakdown */ - clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = - (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; - clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = - (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - } - - /* its time to upload to SMU */ - ret = cz_smu_upload_pptable(adev); - if (ret) { - DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n"); - return ret; - } - - return 0; -} - -static void cz_init_sclk_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->sclk_dpm.soft_min_clk = 0; - pi->sclk_dpm.hard_min_clk = 0; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].clk; - } else { - DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].clk; - } - - pi->sclk_dpm.soft_max_clk = clock; - pi->sclk_dpm.hard_max_clk = clock; - -} - -static void cz_init_uvd_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->uvd_dpm.soft_min_clk = 0; - pi->uvd_dpm.hard_min_clk = 0; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].vclk; - } else { - DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].vclk; - } - - pi->uvd_dpm.soft_max_clk = clock; - pi->uvd_dpm.hard_max_clk = clock; - -} - -static void cz_init_vce_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->vce_dpm.soft_min_clk = table->entries[0].ecclk; - pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].ecclk; - } else { - /* future BIOS would fix this error */ - DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].ecclk; - } - - pi->vce_dpm.soft_max_clk = clock; - pi->vce_dpm.hard_max_clk = clock; - -} - -static void cz_init_acp_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->acp_dpm.soft_min_clk = 0; - pi->acp_dpm.hard_min_clk = 0; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].clk; - } else { - DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].clk; - } - - pi->acp_dpm.soft_max_clk = clock; - pi->acp_dpm.hard_max_clk = clock; - -} - -static void cz_init_pg_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->uvd_power_gated = false; - pi->vce_power_gated = false; - pi->acp_power_gated = false; - -} - -static void cz_init_sclk_threshold(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->low_sclk_interrupt_threshold = 0; -} - -static void cz_dpm_setup_asic(struct amdgpu_device *adev) -{ - cz_reset_ap_mask(adev); - cz_dpm_upload_pptable_to_smu(adev); - cz_init_sclk_limit(adev); - cz_init_uvd_limit(adev); - cz_init_vce_limit(adev); - cz_init_acp_limit(adev); - cz_init_pg_state(adev); - cz_init_sclk_threshold(adev); - -} - -static bool cz_check_smu_feature(struct amdgpu_device *adev, - uint32_t feature) -{ - uint32_t smu_feature = 0; - int ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_GetFeatureStatus, 0); - if (ret) { - DRM_ERROR("Failed to get SMU features from SMC.\n"); - return false; - } else { - smu_feature = cz_get_argument(adev); - if (feature & smu_feature) - return true; - } - - return false; -} - -static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev) -{ - if (cz_check_smu_feature(adev, - SMU_EnabledFeatureScoreboard_SclkDpmOn)) - return true; - - return false; -} - -static void cz_program_voting_clients(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); -} - -static void cz_clear_voting_clients(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); -} - -static int cz_start_dpm(struct amdgpu_device *adev) -{ - int ret = 0; - - if (amdgpu_dpm) { - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK); - if (ret) { - DRM_ERROR("SMU feature: SCLK_DPM enable failed\n"); - return -EINVAL; - } - } - - return 0; -} - -static int cz_stop_dpm(struct amdgpu_device *adev) -{ - int ret = 0; - - if (amdgpu_dpm && adev->pm.dpm_enabled) { - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK); - if (ret) { - DRM_ERROR("SMU feature: SCLK_DPM disable failed\n"); - return -EINVAL; - } - } - - return 0; -} - -static uint32_t cz_get_sclk_level(struct amdgpu_device *adev, - uint32_t clock, uint16_t msg) -{ - int i = 0; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - switch (msg) { - case PPSMC_MSG_SetSclkSoftMin: - case PPSMC_MSG_SetSclkHardMin: - for (i = 0; i < table->count; i++) - if (clock <= table->entries[i].clk) - break; - if (i == table->count) - i = table->count - 1; - break; - case PPSMC_MSG_SetSclkSoftMax: - case PPSMC_MSG_SetSclkHardMax: - for (i = table->count - 1; i >= 0; i--) - if (clock >= table->entries[i].clk) - break; - if (i < 0) - i = 0; - break; - default: - break; - } - - return i; -} - -static uint32_t cz_get_eclk_level(struct amdgpu_device *adev, - uint32_t clock, uint16_t msg) -{ - int i = 0; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - if (table->count == 0) - return 0; - - switch (msg) { - case PPSMC_MSG_SetEclkSoftMin: - case PPSMC_MSG_SetEclkHardMin: - for (i = 0; i < table->count-1; i++) - if (clock <= table->entries[i].ecclk) - break; - break; - case PPSMC_MSG_SetEclkSoftMax: - case PPSMC_MSG_SetEclkHardMax: - for (i = table->count - 1; i > 0; i--) - if (clock >= table->entries[i].ecclk) - break; - break; - default: - break; - } - - return i; -} - -static uint32_t cz_get_uvd_level(struct amdgpu_device *adev, - uint32_t clock, uint16_t msg) -{ - int i = 0; - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - - switch (msg) { - case PPSMC_MSG_SetUvdSoftMin: - case PPSMC_MSG_SetUvdHardMin: - for (i = 0; i < table->count; i++) - if (clock <= table->entries[i].vclk) - break; - if (i == table->count) - i = table->count - 1; - break; - case PPSMC_MSG_SetUvdSoftMax: - case PPSMC_MSG_SetUvdHardMax: - for (i = table->count - 1; i >= 0; i--) - if (clock >= table->entries[i].vclk) - break; - if (i < 0) - i = 0; - break; - default: - break; - } - - return i; -} - -static int cz_program_bootup_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - uint32_t soft_min_clk = 0; - uint32_t soft_max_clk = 0; - int ret = 0; - - pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk; - pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk; - - soft_min_clk = cz_get_sclk_level(adev, - pi->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin); - soft_max_clk = cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, soft_min_clk); - if (ret) - return -EINVAL; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, soft_max_clk); - if (ret) - return -EINVAL; - - return 0; -} - -/* TODO */ -static int cz_disable_cgpg(struct amdgpu_device *adev) -{ - return 0; -} - -/* TODO */ -static int cz_enable_cgpg(struct amdgpu_device *adev) -{ - return 0; -} - -/* TODO */ -static int cz_program_pt_config_registers(struct amdgpu_device *adev) -{ - return 0; -} - -static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - uint32_t reg = 0; - - if (pi->caps_sq_ramping) { - reg = RREG32_DIDT(ixDIDT_SQ_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_SQ_CTRL0, reg); - } - if (pi->caps_db_ramping) { - reg = RREG32_DIDT(ixDIDT_DB_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_DB_CTRL0, reg); - } - if (pi->caps_td_ramping) { - reg = RREG32_DIDT(ixDIDT_TD_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_TD_CTRL0, reg); - } - if (pi->caps_tcp_ramping) { - reg = RREG32_DIDT(ixDIDT_TCP_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_TCP_CTRL0, reg); - } - -} - -static int cz_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret; - - if (pi->caps_sq_ramping || pi->caps_db_ramping || - pi->caps_td_ramping || pi->caps_tcp_ramping) { - if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) { - ret = cz_disable_cgpg(adev); - if (ret) { - DRM_ERROR("Pre Di/Dt disable cg/pg failed\n"); - return -EINVAL; - } - adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE; - } - - ret = cz_program_pt_config_registers(adev); - if (ret) { - DRM_ERROR("Di/Dt config failed\n"); - return -EINVAL; - } - cz_do_enable_didt(adev, enable); - - if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) { - ret = cz_enable_cgpg(adev); - if (ret) { - DRM_ERROR("Post Di/Dt enable cg/pg failed\n"); - return -EINVAL; - } - adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - } - } - - return 0; -} - -/* TODO */ -static void cz_reset_acp_boot_level(struct amdgpu_device *adev) -{ -} - -static void cz_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = cz_get_ps(rps); - - pi->current_ps = *ps; - pi->current_rps = *rps; - pi->current_rps.ps_priv = &pi->current_ps; - adev->pm.dpm.current_ps = &pi->current_rps; - -} - -static void cz_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = cz_get_ps(rps); - - pi->requested_ps = *ps; - pi->requested_rps = *rps; - pi->requested_rps.ps_priv = &pi->requested_ps; - adev->pm.dpm.requested_ps = &pi->requested_rps; - -} - -/* PP arbiter support needed TODO */ -static void cz_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps) -{ - struct cz_ps *ps = cz_get_ps(new_rps); - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_and_voltage_limits *limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - /* 10kHz memory clock */ - uint32_t mclk = 0; - - ps->force_high = false; - ps->need_dfs_bypass = true; - pi->video_start = new_rps->dclk || new_rps->vclk || - new_rps->evclk || new_rps->ecclk; - - if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == - ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) - pi->battery_state = true; - else - pi->battery_state = false; - - if (pi->caps_stable_power_state) - mclk = limits->mclk; - - if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1]) - ps->force_high = true; - -} - -static int cz_dpm_enable(struct amdgpu_device *adev) -{ - const char *chip_name; - int ret = 0; - - /* renable will hang up SMU, so check first */ - if (cz_check_for_dpm_enabled(adev)) - return -EINVAL; - - cz_program_voting_clients(adev); - - switch (adev->asic_type) { - case CHIP_CARRIZO: - chip_name = "carrizo"; - break; - case CHIP_STONEY: - chip_name = "stoney"; - break; - default: - BUG(); - } - - - ret = cz_start_dpm(adev); - if (ret) { - DRM_ERROR("%s DPM enable failed\n", chip_name); - return -EINVAL; - } - - ret = cz_program_bootup_state(adev); - if (ret) { - DRM_ERROR("%s bootup state program failed\n", chip_name); - return -EINVAL; - } - - ret = cz_enable_didt(adev, true); - if (ret) { - DRM_ERROR("%s enable di/dt failed\n", chip_name); - return -EINVAL; - } - - cz_reset_acp_boot_level(adev); - cz_update_current_ps(adev, adev->pm.dpm.boot_ps); - - return 0; -} - -static int cz_dpm_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; - - mutex_lock(&adev->pm.mutex); - - /* smu init only needs to be called at startup, not resume. - * It should be in sw_init, but requires the fw info gathered - * in sw_init from other IP modules. - */ - ret = cz_smu_init(adev); - if (ret) { - DRM_ERROR("amdgpu: smc initialization failed\n"); - mutex_unlock(&adev->pm.mutex); - return ret; - } - - /* do the actual fw loading */ - ret = cz_smu_start(adev); - if (ret) { - DRM_ERROR("amdgpu: smc start failed\n"); - mutex_unlock(&adev->pm.mutex); - return ret; - } - - if (!amdgpu_dpm) { - adev->pm.dpm_enabled = false; - mutex_unlock(&adev->pm.mutex); - return ret; - } - - /* cz dpm setup asic */ - cz_dpm_setup_asic(adev); - - /* cz dpm enable */ - ret = cz_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static int cz_dpm_disable(struct amdgpu_device *adev) -{ - int ret = 0; - - if (!cz_check_for_dpm_enabled(adev)) - return -EINVAL; - - ret = cz_enable_didt(adev, false); - if (ret) { - DRM_ERROR("disable di/dt failed\n"); - return -EINVAL; - } - - /* powerup blocks */ - cz_dpm_powergate_uvd(adev, false); - cz_dpm_powergate_vce(adev, false); - - cz_clear_voting_clients(adev); - cz_stop_dpm(adev); - cz_update_current_ps(adev, adev->pm.dpm.boot_ps); - - return 0; -} - -static int cz_dpm_hw_fini(void *handle) -{ - int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* smu fini only needs to be called at teardown, not suspend. - * It should be in sw_fini, but we put it here for symmetry - * with smu init. - */ - cz_smu_fini(adev); - - if (adev->pm.dpm_enabled) { - ret = cz_dpm_disable(adev); - - adev->pm.dpm.current_ps = - adev->pm.dpm.requested_ps = - adev->pm.dpm.boot_ps; - } - - adev->pm.dpm_enabled = false; - - mutex_unlock(&adev->pm.mutex); - - return ret; -} - -static int cz_dpm_suspend(void *handle) -{ - int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - - ret = cz_dpm_disable(adev); - - adev->pm.dpm.current_ps = - adev->pm.dpm.requested_ps = - adev->pm.dpm.boot_ps; - - mutex_unlock(&adev->pm.mutex); - } - - return ret; -} - -static int cz_dpm_resume(void *handle) -{ - int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* do the actual fw loading */ - ret = cz_smu_start(adev); - if (ret) { - DRM_ERROR("amdgpu: smc start failed\n"); - mutex_unlock(&adev->pm.mutex); - return ret; - } - - if (!amdgpu_dpm) { - adev->pm.dpm_enabled = false; - mutex_unlock(&adev->pm.mutex); - return ret; - } - - /* cz dpm setup asic */ - cz_dpm_setup_asic(adev); - - /* cz dpm enable */ - ret = cz_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - - mutex_unlock(&adev->pm.mutex); - /* upon resume, re-compute the clocks */ - if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); - - return 0; -} - -static int cz_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int cz_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -static int cz_dpm_get_temperature(struct amdgpu_device *adev) -{ - int actual_temp = 0; - uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP); - uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); - - if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) - actual_temp = 1000 * ((temp / 8) - 49); - else - actual_temp = 1000 * (temp / 8); - - return actual_temp; -} - -static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - cz_update_requested_ps(adev, new_ps); - cz_apply_state_adjust_rules(adev, &pi->requested_rps, - &pi->current_rps); - - return 0; -} - -static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_and_voltage_limits *limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - uint32_t clock, stable_ps_clock = 0; - - clock = pi->sclk_dpm.soft_min_clk; - - if (pi->caps_stable_power_state) { - stable_ps_clock = limits->sclk * 75 / 100; - if (clock < stable_ps_clock) - clock = stable_ps_clock; - } - - if (clock != pi->sclk_dpm.soft_min_clk) { - pi->sclk_dpm.soft_min_clk = clock; - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(adev, clock, - PPSMC_MSG_SetSclkSoftMin)); - } - - if (pi->caps_stable_power_state && - pi->sclk_dpm.soft_max_clk != clock) { - pi->sclk_dpm.soft_max_clk = clock; - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, clock, - PPSMC_MSG_SetSclkSoftMax)); - } else { - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); - } - - return 0; -} - -static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (pi->caps_sclk_ds) { - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetMinDeepSleepSclk, - CZ_MIN_DEEP_SLEEP_SCLK); - } - - return 0; -} - -/* ?? without dal support, is this still needed in setpowerstate list*/ -static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetWatermarkFrequency, - pi->sclk_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) -{ - int ret = 0; - struct cz_power_info *pi = cz_get_pi(adev); - - /* also depend on dal NBPStateDisableRequired */ - if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) { - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, - NB_DPM_MASK); - if (ret) { - DRM_ERROR("amdgpu: nb dpm enable failed\n"); - return ret; - } - pi->nb_dpm_enabled = true; - } - - return ret; -} - -static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, - bool enable) -{ - if (enable) - cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate); - else - cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate); - -} - -static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = &pi->requested_ps; - - if (pi->sys_info.nb_dpm_enable) { - if (ps->force_high) - cz_dpm_nbdpm_lm_pstate_enable(adev, false); - else - cz_dpm_nbdpm_lm_pstate_enable(adev, true); - } - - return 0; -} - -/* with dpm enabled */ -static int cz_dpm_set_power_state(struct amdgpu_device *adev) -{ - cz_dpm_update_sclk_limit(adev); - cz_dpm_set_deep_sleep_sclk_threshold(adev); - cz_dpm_set_watermark_threshold(adev); - cz_dpm_enable_nbdpm(adev); - cz_dpm_update_low_memory_pstate(adev); - - return 0; -} - -static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_ps *ps = &pi->requested_rps; - - cz_update_current_ps(adev, ps); -} - -static int cz_dpm_force_highest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) { - pi->sclk_dpm.soft_min_clk = - pi->sclk_dpm.soft_max_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); - if (ret) - return ret; - } - - return ret; -} - -static int cz_dpm_force_lowest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) { - pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); - if (ret) - return ret; - } - - return ret; -} - -static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (!pi->max_sclk_level) { - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); - pi->max_sclk_level = cz_get_argument(adev) + 1; - } - - if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("Invalid max sclk level!\n"); - return -EINVAL; - } - - return pi->max_sclk_level; -} - -static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - uint32_t level = 0; - int ret = 0; - - pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk; - level = cz_dpm_get_max_sclk_level(adev) - 1; - if (level < dep_table->count) - pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk; - else - pi->sclk_dpm.soft_max_clk = - dep_table->entries[dep_table->count - 1].clk; - - /* get min/max sclk soft value - * notify SMU to execute */ - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); - if (ret) - return ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); - if (ret) - return ret; - - DRM_DEBUG("DPM unforce state min=%d, max=%d.\n", - pi->sclk_dpm.soft_min_clk, - pi->sclk_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) { - pi->uvd_dpm.soft_min_clk = - pi->uvd_dpm.soft_max_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMin, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_min_clk, - PPSMC_MSG_SetUvdSoftMin)); - if (ret) - return ret; - } - - return ret; -} - -static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) { - pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMax, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_max_clk, - PPSMC_MSG_SetUvdSoftMax)); - if (ret) - return ret; - } - - return ret; -} - -static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (!pi->max_uvd_level) { - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); - pi->max_uvd_level = cz_get_argument(adev) + 1; - } - - if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("Invalid max uvd level!\n"); - return -EINVAL; - } - - return pi->max_uvd_level; -} - -static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - uint32_t level = 0; - int ret = 0; - - pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk; - level = cz_dpm_get_max_uvd_level(adev) - 1; - if (level < dep_table->count) - pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk; - else - pi->uvd_dpm.soft_max_clk = - dep_table->entries[dep_table->count - 1].vclk; - - /* get min/max sclk soft value - * notify SMU to execute */ - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMin, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_min_clk, - PPSMC_MSG_SetUvdSoftMin)); - if (ret) - return ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMax, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_max_clk, - PPSMC_MSG_SetUvdSoftMax)); - if (ret) - return ret; - - DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n", - pi->uvd_dpm.soft_min_clk, - pi->uvd_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_vce_force_highest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) { - pi->vce_dpm.soft_min_clk = - pi->vce_dpm.soft_max_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMin, - cz_get_eclk_level(adev, - pi->vce_dpm.soft_min_clk, - PPSMC_MSG_SetEclkSoftMin)); - if (ret) - return ret; - } - - return ret; -} - -static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) { - pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMax, - cz_get_uvd_level(adev, - pi->vce_dpm.soft_max_clk, - PPSMC_MSG_SetEclkSoftMax)); - if (ret) - return ret; - } - - return ret; -} - -static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (!pi->max_vce_level) { - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); - pi->max_vce_level = cz_get_argument(adev) + 1; - } - - if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("Invalid max vce level!\n"); - return -EINVAL; - } - - return pi->max_vce_level; -} - -static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - uint32_t level = 0; - int ret = 0; - - pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk; - level = cz_dpm_get_max_vce_level(adev) - 1; - if (level < dep_table->count) - pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk; - else - pi->vce_dpm.soft_max_clk = - dep_table->entries[dep_table->count - 1].ecclk; - - /* get min/max sclk soft value - * notify SMU to execute */ - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMin, - cz_get_eclk_level(adev, - pi->vce_dpm.soft_min_clk, - PPSMC_MSG_SetEclkSoftMin)); - if (ret) - return ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMax, - cz_get_eclk_level(adev, - pi->vce_dpm.soft_max_clk, - PPSMC_MSG_SetEclkSoftMax)); - if (ret) - return ret; - - DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n", - pi->vce_dpm.soft_min_clk, - pi->vce_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMDGPU_DPM_FORCED_LEVEL_HIGH: - /* sclk */ - ret = cz_dpm_unforce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_force_highest(adev); - if (ret) - return ret; - - /* uvd */ - ret = cz_dpm_unforce_uvd_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_uvd_force_highest(adev); - if (ret) - return ret; - - /* vce */ - ret = cz_dpm_unforce_vce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_vce_force_highest(adev); - if (ret) - return ret; - break; - case AMDGPU_DPM_FORCED_LEVEL_LOW: - /* sclk */ - ret = cz_dpm_unforce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_force_lowest(adev); - if (ret) - return ret; - - /* uvd */ - ret = cz_dpm_unforce_uvd_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_uvd_force_lowest(adev); - if (ret) - return ret; - - /* vce */ - ret = cz_dpm_unforce_vce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_vce_force_lowest(adev); - if (ret) - return ret; - break; - case AMDGPU_DPM_FORCED_LEVEL_AUTO: - /* sclk */ - ret = cz_dpm_unforce_dpm_levels(adev); - if (ret) - return ret; - - /* uvd */ - ret = cz_dpm_unforce_uvd_dpm_levels(adev); - if (ret) - return ret; - - /* vce */ - ret = cz_dpm_unforce_vce_dpm_levels(adev); - if (ret) - return ret; - break; - default: - break; - } - - adev->pm.dpm.forced_level = level; - - return ret; -} - -/* fix me, display configuration change lists here - * mostly dal related*/ -static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev) -{ -} - -static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps); - - if (low) - return requested_state->levels[0].sclk; - else - return requested_state->levels[requested_state->num_levels - 1].sclk; - -} - -static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - return pi->sys_info.bootup_uma_clk; -} - -static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (enable && pi->caps_uvd_dpm ) { - pi->dpm_flags |= DPMFlags_UVD_Enabled; - DRM_DEBUG("UVD DPM Enabled.\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK); - } else { - pi->dpm_flags &= ~DPMFlags_UVD_Enabled; - DRM_DEBUG("UVD DPM Stopped\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK); - } - - return ret; -} - -static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate) -{ - return cz_enable_uvd_dpm(adev, !gate); -} - - -static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret; - - if (pi->uvd_power_gated == gate) - return; - - pi->uvd_power_gated = gate; - - if (gate) { - if (pi->caps_uvd_pg) { - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - if (ret) { - DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n"); - return; - } - - /* shutdown the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - - if (ret) { - DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n"); - return; - } - } - cz_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) { - /* power off the UVD block */ - ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); - if (ret) { - DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n"); - return; - } - } - } else { - if (pi->caps_uvd_pg) { - /* power on the UVD block */ - if (pi->uvd_dynamic_pg) - ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); - else - ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); - - if (ret) { - DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n"); - return; - } - - /* re-init the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - - if (ret) { - DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n"); - return; - } - - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - if (ret) { - DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n"); - return; - } - } - cz_update_uvd_dpm(adev, gate); - } -} - -static int cz_enable_vce_dpm(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (enable && pi->caps_vce_dpm) { - pi->dpm_flags |= DPMFlags_VCE_Enabled; - DRM_DEBUG("VCE DPM Enabled.\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, VCE_DPM_MASK); - - } else { - pi->dpm_flags &= ~DPMFlags_VCE_Enabled; - DRM_DEBUG("VCE DPM Stopped\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DisableAllSmuFeatures, VCE_DPM_MASK); - } - - return ret; -} - -static int cz_update_vce_dpm(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ - if (pi->caps_stable_power_state) { - pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; - } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ - /* leave it as set by user */ - /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/ - } - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(adev, - pi->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - return 0; -} - -static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (pi->caps_vce_pg) { - if (pi->vce_power_gated != gate) { - if (gate) { - /* disable clockgating so we can properly shut down the block */ - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - /* shutdown the VCE block */ - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - - cz_enable_vce_dpm(adev, false); - cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); - pi->vce_power_gated = true; - } else { - cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); - pi->vce_power_gated = false; - - /* re-init the VCE block */ - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - - cz_update_vce_dpm(adev); - cz_enable_vce_dpm(adev, true); - } - } else { - if (! pi->vce_power_gated) { - cz_update_vce_dpm(adev); - } - } - } else { /*pi->caps_vce_pg*/ - pi->vce_power_gated = gate; - cz_update_vce_dpm(adev); - cz_enable_vce_dpm(adev, !gate); - } -} - -static int cz_check_state_equal(struct amdgpu_device *adev, - struct amdgpu_ps *cps, - struct amdgpu_ps *rps, - bool *equal) -{ - if (equal == NULL) - return -EINVAL; - - *equal = false; - return 0; -} - -const struct amd_ip_funcs cz_dpm_ip_funcs = { - .name = "cz_dpm", - .early_init = cz_dpm_early_init, - .late_init = cz_dpm_late_init, - .sw_init = cz_dpm_sw_init, - .sw_fini = cz_dpm_sw_fini, - .hw_init = cz_dpm_hw_init, - .hw_fini = cz_dpm_hw_fini, - .suspend = cz_dpm_suspend, - .resume = cz_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = cz_dpm_set_clockgating_state, - .set_powergating_state = cz_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs cz_dpm_funcs = { - .get_temperature = cz_dpm_get_temperature, - .pre_set_power_state = cz_dpm_pre_set_power_state, - .set_power_state = cz_dpm_set_power_state, - .post_set_power_state = cz_dpm_post_set_power_state, - .display_configuration_changed = cz_dpm_display_configuration_changed, - .get_sclk = cz_dpm_get_sclk, - .get_mclk = cz_dpm_get_mclk, - .print_power_state = cz_dpm_print_power_state, - .debugfs_print_current_performance_level = - cz_dpm_debugfs_print_current_performance_level, - .force_performance_level = cz_dpm_force_dpm_level, - .vblank_too_short = NULL, - .powergate_uvd = cz_dpm_powergate_uvd, - .powergate_vce = cz_dpm_powergate_vce, - .check_state_equal = cz_check_state_equal, -}; - -static void cz_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &cz_dpm_funcs; -} - -const struct amdgpu_ip_block_version cz_dpm_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 8, - .minor = 0, - .rev = 0, - .funcs = &cz_dpm_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h deleted file mode 100644 index 5df8c1faab51..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __CZ_DPM_H__ -#define __CZ_DPM_H__ - -#include "smu8_fusion.h" - -#define CZ_AT_DFLT 30 -#define CZ_NUM_NBPSTATES 4 -#define CZ_NUM_NBPMEMORY_CLOCK 2 -#define CZ_MAX_HARDWARE_POWERLEVELS 8 -#define CZ_MAX_DISPLAY_CLOCK_LEVEL 8 -#define CZ_MAX_DISPLAYPHY_IDS 10 - -#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 - -#define SMC_RAM_END 0x40000 - -#define DPMFlags_SCLK_Enabled 0x00000001 -#define DPMFlags_UVD_Enabled 0x00000002 -#define DPMFlags_VCE_Enabled 0x00000004 -#define DPMFlags_ACP_Enabled 0x00000008 -#define DPMFlags_ForceHighestValid 0x40000000 -#define DPMFlags_Debug 0x80000000 - -/* Do not change the following, it is also defined in SMU8.h */ -#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 -#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 -#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 -#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 - -/* temporary solution to SetMinDeepSleepSclk - * should indicate by display adaptor - * 10k Hz unit*/ -#define CZ_MIN_DEEP_SLEEP_SCLK 800 - -enum cz_pt_config_reg_type { - CZ_CONFIGREG_MMR = 0, - CZ_CONFIGREG_SMC_IND, - CZ_CONFIGREG_DIDT_IND, - CZ_CONFIGREG_CACHE, - CZ_CONFIGREG_MAX -}; - -struct cz_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum cz_pt_config_reg_type type; -}; - -struct cz_dpm_entry { - uint32_t soft_min_clk; - uint32_t hard_min_clk; - uint32_t soft_max_clk; - uint32_t hard_max_clk; -}; - -struct cz_pl { - uint32_t sclk; - uint8_t vddc_index; - uint8_t ds_divider_index; - uint8_t ss_divider_index; - uint8_t allow_gnb_slow; - uint8_t force_nbp_state; - uint8_t display_wm; - uint8_t vce_wm; -}; - -struct cz_ps { - struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS]; - uint32_t num_levels; - bool need_dfs_bypass; - uint8_t dpm0_pg_nb_ps_lo; - uint8_t dpm0_pg_nb_ps_hi; - uint8_t dpmx_nb_ps_lo; - uint8_t dpmx_nb_ps_hi; - bool force_high; -}; - -struct cz_displayphy_entry { - uint8_t phy_present; - uint8_t active_lane_mapping; - uint8_t display_conf_type; - uint8_t num_active_lanes; -}; - -struct cz_displayphy_info { - bool phy_access_initialized; - struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS]; -}; - -struct cz_sys_info { - uint32_t bootup_uma_clk; - uint32_t bootup_sclk; - uint32_t dentist_vco_freq; - uint32_t nb_dpm_enable; - uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK]; - uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; - uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES]; - uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL]; - uint16_t bootup_nb_voltage_index; - uint8_t htc_tmp_lmt; - uint8_t htc_hyst_lmt; - uint32_t uma_channel_number; -}; - -struct cz_power_info { - uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS]; - struct cz_sys_info sys_info; - struct cz_pl boot_pl; - bool disable_nb_ps3_in_battery; - bool battery_state; - uint32_t lowest_valid; - uint32_t highest_valid; - uint16_t high_voltage_threshold; - /* smc offsets */ - uint32_t sram_end; - uint32_t dpm_table_start; - uint32_t soft_regs_start; - /* dpm SMU tables */ - uint8_t uvd_level_count; - uint8_t vce_level_count; - uint8_t acp_level_count; - uint32_t fps_high_threshold; - uint32_t fps_low_threshold; - /* dpm table */ - uint32_t dpm_flags; - struct cz_dpm_entry sclk_dpm; - struct cz_dpm_entry uvd_dpm; - struct cz_dpm_entry vce_dpm; - struct cz_dpm_entry acp_dpm; - - uint8_t uvd_boot_level; - uint8_t uvd_interval; - uint8_t vce_boot_level; - uint8_t vce_interval; - uint8_t acp_boot_level; - uint8_t acp_interval; - - uint8_t graphics_boot_level; - uint8_t graphics_interval; - uint8_t graphics_therm_throttle_enable; - uint8_t graphics_voltage_change_enable; - uint8_t graphics_clk_slow_enable; - uint8_t graphics_clk_slow_divider; - - uint32_t low_sclk_interrupt_threshold; - bool uvd_power_gated; - bool vce_power_gated; - bool acp_power_gated; - - uint32_t active_process_mask; - - uint32_t mgcg_cgtt_local0; - uint32_t mgcg_cgtt_local1; - uint32_t clock_slow_down_step; - uint32_t skip_clock_slow_down; - bool enable_nb_ps_policy; - uint32_t voting_clients; - uint32_t voltage_drop_threshold; - uint32_t gfx_pg_threshold; - uint32_t max_sclk_level; - uint32_t max_uvd_level; - uint32_t max_vce_level; - /* flags */ - bool didt_enabled; - bool video_start; - bool cac_enabled; - bool bapm_enabled; - bool nb_dpm_enabled_by_driver; - bool nb_dpm_enabled; - bool auto_thermal_throttling_enabled; - bool dpm_enabled; - bool need_pptable_upload; - /* caps */ - bool caps_cac; - bool caps_power_containment; - bool caps_sq_ramping; - bool caps_db_ramping; - bool caps_td_ramping; - bool caps_tcp_ramping; - bool caps_sclk_throttle_low_notification; - bool caps_fps; - bool caps_uvd_dpm; - bool caps_uvd_pg; - bool caps_vce_dpm; - bool caps_vce_pg; - bool caps_acp_dpm; - bool caps_acp_pg; - bool caps_stable_power_state; - bool caps_enable_dfs_bypass; - bool caps_sclk_ds; - bool caps_voltage_island; - /* power state */ - struct amdgpu_ps current_rps; - struct cz_ps current_ps; - struct amdgpu_ps requested_rps; - struct cz_ps requested_ps; - - bool uvd_power_down; - bool vce_power_down; - bool acp_power_down; - - bool uvd_dynamic_pg; -}; - -/* cz_smc.c */ -uint32_t cz_get_argument(struct amdgpu_device *adev); -int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg); -int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - uint16_t msg, uint32_t parameter); -int cz_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, uint32_t *value, uint32_t limit); -int cz_smu_upload_pptable(struct amdgpu_device *adev); -int cz_smu_download_pptable(struct amdgpu_device *adev, void **table); -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c deleted file mode 100644 index aed7033c0973..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ /dev/null @@ -1,995 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include "drmP.h" -#include "amdgpu.h" -#include "smu8.h" -#include "smu8_fusion.h" -#include "cz_ppsmc.h" -#include "cz_smumgr.h" -#include "smu_ucode_xfer_cz.h" -#include "amdgpu_ucode.h" -#include "cz_dpm.h" -#include "vi_dpm.h" - -#include "smu/smu_8_0_d.h" -#include "smu/smu_8_0_sh_mask.h" -#include "gca/gfx_8_0_d.h" -#include "gca/gfx_8_0_sh_mask.h" - -uint32_t cz_get_argument(struct amdgpu_device *adev) -{ - return RREG32(mmSMU_MP1_SRBM2P_ARG_0); -} - -static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = - (struct cz_smu_private_data *)(adev->smu.priv); - - return priv; -} - -static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg) -{ - int i; - u32 content = 0, tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), - SMU_MP1_SRBM2P_RESP_0, CONTENT); - if (content != tmp) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == adev->usec_timeout) - return -EINVAL; - - WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0); - WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg); - - return 0; -} - -int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg) -{ - int i; - u32 content = 0, tmp = 0; - - if (cz_send_msg_to_smc_async(adev, msg)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), - SMU_MP1_SRBM2P_RESP_0, CONTENT); - if (content != tmp) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == adev->usec_timeout) - return -EINVAL; - - if (PPSMC_Result_OK != tmp) { - dev_err(adev->dev, "SMC Failed to send Message.\n"); - return -EINVAL; - } - - return 0; -} - -int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - u16 msg, u32 parameter) -{ - WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc(adev, msg); -} - -static int cz_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); - - return 0; -} - -int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit) -{ - int ret; - - ret = cz_set_smc_sram_address(adev, smc_address, limit); - if (ret) - return ret; - - *value = RREG32(mmMP0PUB_IND_DATA_0); - - return 0; -} - -static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 value, u32 limit) -{ - int ret; - - ret = cz_set_smc_sram_address(adev, smc_address, limit); - if (ret) - return ret; - - WREG32(mmMP0PUB_IND_DATA_0, value); - - return 0; -} - -static int cz_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - - cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4); - - /*prepare toc buffers*/ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DriverDramAddrHi, - priv->toc_buffer.mc_addr_high); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DriverDramAddrLo, - priv->toc_buffer.mc_addr_low); - cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs); - - /*execute jobs*/ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_aram); - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_power_profiling_index); - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_initialize_index); - - return 0; -} - -/* - *Check if the FW has been loaded, SMU will not return if loading - *has not finished. - */ -static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_mask) -{ - int i; - uint32_t index = SMN_MP1_SRAM_START_ADDR + - SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - - WREG32(mmMP0PUB_IND_INDEX, index); - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask)) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) { - dev_err(adev->dev, - "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x", - fw_mask, RREG32(mmMP0PUB_IND_DATA)); - return -EINVAL; - } - - return 0; -} - -/* - * interfaces for different ip blocks to check firmware loading status - * 0 for success otherwise failed - */ -static int cz_smu_check_finished(struct amdgpu_device *adev, - enum AMDGPU_UCODE_ID id) -{ - switch (id) { - case AMDGPU_UCODE_ID_SDMA0: - if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_SDMA1: - if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_CE: - if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_PFP: - if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED) - return 0; - case AMDGPU_UCODE_ID_CP_ME: - if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_MEC1: - if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_MEC2: - if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_RLC_G: - if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_MAXIMUM: - default: - break; - } - - return 1; -} - -static int cz_load_mec_firmware(struct amdgpu_device *adev) -{ - struct amdgpu_firmware_info *ucode = - &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; - uint32_t reg_data; - uint32_t tmp; - - if (ucode->fw == NULL) - return -EINVAL; - - /* Disable MEC parsing/prefetching */ - tmp = RREG32(mmCP_MEC_CNTL); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - WREG32(mmCP_MEC_CNTL, tmp); - - tmp = RREG32(mmCP_CPC_IC_BASE_CNTL); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); - WREG32(mmCP_CPC_IC_BASE_CNTL, tmp); - - reg_data = lower_32_bits(ucode->mc_addr) & - REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); - WREG32(mmCP_CPC_IC_BASE_LO, reg_data); - - reg_data = upper_32_bits(ucode->mc_addr) & - REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); - WREG32(mmCP_CPC_IC_BASE_HI, reg_data); - - return 0; -} - -int cz_smu_start(struct amdgpu_device *adev) -{ - int ret = 0; - - uint32_t fw_to_check = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_JT1_MASK | - UCODE_ID_CP_MEC_JT2_MASK; - - if (adev->asic_type == CHIP_STONEY) - fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - - cz_smu_request_load_fw(adev); - ret = cz_smu_check_fw_load_finish(adev, fw_to_check); - if (ret) - return ret; - - /* manually load MEC firmware for CZ */ - if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) { - ret = cz_load_mec_firmware(adev); - if (ret) { - dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret); - return ret; - } - } - - /* setup fw load flag */ - adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED | - AMDGPU_SDMA1_UCODE_LOADED | - AMDGPU_CPCE_UCODE_LOADED | - AMDGPU_CPPFP_UCODE_LOADED | - AMDGPU_CPME_UCODE_LOADED | - AMDGPU_CPMEC1_UCODE_LOADED | - AMDGPU_CPMEC2_UCODE_LOADED | - AMDGPU_CPRLC_UCODE_LOADED; - - if (adev->asic_type == CHIP_STONEY) - adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED); - - return ret; -} - -static uint32_t cz_convert_fw_type(uint32_t fw_type) -{ - enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = AMDGPU_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = AMDGPU_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = AMDGPU_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = AMDGPU_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = AMDGPU_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = AMDGPU_UCODE_ID_CP_MEC1; - break; - case UCODE_ID_RLC_G: - result = AMDGPU_UCODE_ID_RLC_G; - break; - default: - DRM_ERROR("UCode type is out of range!"); - } - - return result; -} - -static uint8_t cz_smu_translate_firmware_enum_to_arg( - enum cz_scratch_entry firmware_enum) -{ - uint8_t ret = 0; - - switch (firmware_enum) { - case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0: - ret = UCODE_ID_SDMA0; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: - ret = UCODE_ID_SDMA1; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: - ret = UCODE_ID_CP_CE; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP: - ret = UCODE_ID_CP_PFP; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME: - ret = UCODE_ID_CP_ME; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: - ret = UCODE_ID_CP_MEC_JT1; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: - ret = UCODE_ID_CP_MEC_JT2; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: - ret = UCODE_ID_GMCON_RENG; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G: - ret = UCODE_ID_RLC_G; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: - ret = UCODE_ID_RLC_SCRATCH; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: - ret = UCODE_ID_RLC_SRM_ARAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: - ret = UCODE_ID_RLC_SRM_DRAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: - ret = UCODE_ID_DMCU_ERAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: - ret = UCODE_ID_DMCU_IRAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: - ret = TASK_ARG_INIT_MM_PWR_LOG; - break; - case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: - case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: - case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: - case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: - case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START: - case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: - ret = TASK_ARG_REG_MMIO; - break; - case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: - ret = TASK_ARG_INIT_CLK_TABLE; - break; - } - - return ret; -} - -static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - enum cz_scratch_entry firmware_enum, - struct cz_buffer_entry *entry) -{ - uint64_t gpu_addr; - uint32_t data_size; - uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum); - enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header; - - if (ucode->fw == NULL) - return -EINVAL; - - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) || - (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) { - gpu_addr += le32_to_cpu(header->jt_offset) << 2; - data_size = le32_to_cpu(header->jt_size) << 2; - } - - entry->mc_addr_low = lower_32_bits(gpu_addr); - entry->mc_addr_high = upper_32_bits(gpu_addr); - entry->data_size = data_size; - entry->firmware_ID = firmware_enum; - - return 0; -} - -static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev, - enum cz_scratch_entry scratch_type, - uint32_t size_in_byte, - struct cz_buffer_entry *entry) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) | - priv->smu_buffer.mc_addr_low; - mc_addr += size_in_byte; - - priv->smu_buffer_used_bytes += size_in_byte; - entry->data_size = size_in_byte; - entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes; - entry->mc_addr_low = lower_32_bits(mc_addr); - entry->mc_addr_high = upper_32_bits(mc_addr); - entry->firmware_ID = scratch_type; - - return 0; -} - -static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev, - enum cz_scratch_entry firmware_enum, - bool is_last) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; - - task->type = TASK_TYPE_UCODE_LOAD; - task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); - task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; - - for (i = 0; i < priv->driver_buffer_length; i++) - if (priv->driver_buffer[i].firmware_ID == firmware_enum) - break; - - if (i >= priv->driver_buffer_length) { - dev_err(adev->dev, "Invalid Firmware Type\n"); - return -EINVAL; - } - - task->addr.low = priv->driver_buffer[i].mc_addr_low; - task->addr.high = priv->driver_buffer[i].mc_addr_high; - task->size_bytes = priv->driver_buffer[i].data_size; - - return 0; -} - -static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev, - enum cz_scratch_entry firmware_enum, - uint8_t type, bool is_last) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; - - task->type = type; - task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); - task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; - - for (i = 0; i < priv->scratch_buffer_length; i++) - if (priv->scratch_buffer[i].firmware_ID == firmware_enum) - break; - - if (i >= priv->scratch_buffer_length) { - dev_err(adev->dev, "Invalid Firmware Type\n"); - return -EINVAL; - } - - task->addr.low = priv->scratch_buffer[i].mc_addr_low; - task->addr.high = priv->scratch_buffer[i].mc_addr_high; - task->size_bytes = priv->scratch_buffer[i].data_size; - - if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) { - struct cz_ih_meta_data *pIHReg_restore = - (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr; - pIHReg_restore->command = - METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; - } - - return 0; -} - -static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - priv->toc_entry_aram = priv->toc_entry_used_count; - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - TASK_TYPE_UCODE_SAVE, true); - - return 0; -} - -static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - - toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count; - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - TASK_TYPE_UCODE_SAVE, false); - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - TASK_TYPE_UCODE_SAVE, true); - - return 0; -} - -static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - - toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count; - - /* populate ucode */ - if (adev->firmware.smu_load) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (adev->asic_type == CHIP_STONEY) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - } else { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - } - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); - } - - /* populate scratch */ - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - TASK_TYPE_UCODE_LOAD, true); - - return 0; -} - -static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - priv->toc_entry_power_profiling_index = priv->toc_entry_used_count; - - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, - TASK_TYPE_INITIALIZE, true); - return 0; -} - -static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - priv->toc_entry_initialize_index = priv->toc_entry_used_count; - - if (adev->firmware.smu_load) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - if (adev->asic_type == CHIP_STONEY) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - } else { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); - } - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (adev->asic_type == CHIP_STONEY) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - } else { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - } - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); - } - - return 0; -} - -static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - priv->toc_entry_clock_table = priv->toc_entry_used_count; - - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, - TASK_TYPE_INITIALIZE, true); - - return 0; -} - -static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev) -{ - int i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - - for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) - toc->JobList[i] = (uint8_t)IGNORE_JOB; - - return 0; -} - -/* - * cz smu uninitialization - */ -int cz_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.smu_load) - amdgpu_ucode_fini_bo(adev); - - return 0; -} - -int cz_smu_download_pptable(struct amdgpu_device *adev, void **table) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - for (i = 0; i < priv->scratch_buffer_length; i++) - if (priv->scratch_buffer[i].firmware_ID == - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) - break; - - if (i >= priv->scratch_buffer_length) { - dev_err(adev->dev, "Invalid Scratch Type\n"); - return -EINVAL; - } - - *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr; - - /* prepare buffer for pptable */ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrHi, - priv->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrLo, - priv->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_clock_table); - - /* actual downloading */ - cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram); - - return 0; -} - -int cz_smu_upload_pptable(struct amdgpu_device *adev) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - for (i = 0; i < priv->scratch_buffer_length; i++) - if (priv->scratch_buffer[i].firmware_ID == - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) - break; - - if (i >= priv->scratch_buffer_length) { - dev_err(adev->dev, "Invalid Scratch Type\n"); - return -EINVAL; - } - - /* prepare SMU */ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrHi, - priv->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrLo, - priv->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_clock_table); - - /* actual uploading */ - cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu); - - return 0; -} - -/* - * cz smumgr functions initialization - */ -static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = { - .check_fw_load_finish = cz_smu_check_finished, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -/* - * cz smu initialization - */ -int cz_smu_init(struct amdgpu_device *adev) -{ - int ret = -EINVAL; - uint64_t mc_addr = 0; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; - void *toc_buf_ptr = NULL; - void *smu_buf_ptr = NULL; - - struct cz_smu_private_data *priv = - kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL); - if (priv == NULL) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = priv; - adev->smu.fw_flags = 0; - priv->toc_buffer.data_size = 4096; - - priv->smu_buffer.data_size = - ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + - ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + - ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + - ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + - ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); - - /* prepare toc buffer and smu buffer: - * 1. create amdgpu_bo for toc buffer and smu buffer - * 2. pin mc address - * 3. map kernel virtual address - */ - ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - toc_buf); - - if (ret) { - dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); - return ret; - } - - ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - smu_buf); - - if (ret) { - dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); - return ret; - } - - /* toc buffer reserve/pin/map */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret); - return ret; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret); - return ret; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) - goto smu_init_failed; - - amdgpu_bo_unreserve(adev->smu.toc_buf); - - priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr); - priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr); - priv->toc_buffer.kaddr = toc_buf_ptr; - - /* smu buffer reserve/pin/map */ - ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.smu_buf); - dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret); - return ret; - } - - ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret); - return ret; - } - - ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); - if (ret) - goto smu_init_failed; - - amdgpu_bo_unreserve(adev->smu.smu_buf); - - priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr); - priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr); - priv->smu_buffer.kaddr = smu_buf_ptr; - - if (adev->firmware.smu_load) { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - - if (adev->asic_type == CHIP_STONEY) { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } else { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (adev->asic_type == CHIP_STONEY) { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } else { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } - - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - UCODE_ID_RLC_SCRATCH_SIZE_BYTE, - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, - sizeof(struct SMU8_MultimediaPowerLogData), - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, - sizeof(struct SMU8_Fusion_ClkTable), - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - - cz_smu_initialize_toc_empty_job_list(adev); - cz_smu_construct_toc_for_rlc_aram_save(adev); - cz_smu_construct_toc_for_vddgfx_enter(adev); - cz_smu_construct_toc_for_vddgfx_exit(adev); - cz_smu_construct_toc_for_power_profiling(adev); - cz_smu_construct_toc_for_bootup(adev); - cz_smu_construct_toc_for_clock_table(adev); - /* init the smumgr functions */ - adev->smu.smumgr_funcs = &cz_smumgr_funcs; - - return 0; - -smu_init_failed: - amdgpu_bo_unref(toc_buf); - amdgpu_bo_unref(smu_buf); - - return ret; -} diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h deleted file mode 100644 index 026342fcf0f3..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __CZ_SMC_H__ -#define __CZ_SMC_H__ - -#define MAX_NUM_FIRMWARE 8 -#define MAX_NUM_SCRATCH 11 -#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 -#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 -#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 -#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) - -enum cz_scratch_entry { - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, - CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, - CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, - CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, - CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, - CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, - CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, - CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, - CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, - CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE -}; - -struct cz_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - enum cz_scratch_entry firmware_ID; -}; - -struct cz_register_index_data_pair { - uint32_t offset; - uint32_t value; -}; - -struct cz_ih_meta_data { - uint32_t command; - struct cz_register_index_data_pair register_index_value_pair[1]; -}; - -struct cz_smu_private_data { - uint8_t driver_buffer_length; - uint8_t scratch_buffer_length; - uint16_t toc_entry_used_count; - uint16_t toc_entry_initialize_index; - uint16_t toc_entry_power_profiling_index; - uint16_t toc_entry_aram; - uint16_t toc_entry_ih_register_restore_task_index; - uint16_t toc_entry_clock_table; - uint16_t ih_register_restore_task_size; - uint16_t smu_buffer_used_bytes; - - struct cz_buffer_entry toc_buffer; - struct cz_buffer_entry smu_buffer; - struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; - struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; -}; - -#endif