2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <drm/amdgpu_drm.h>
30 #include "pp_instance.h"
32 #include "cgs_common.h"
34 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
35 MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
36 MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
37 MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
38 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
39 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
40 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
41 MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
42 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
43 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
44 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
45 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
48 int smum_early_init(struct pp_instance *handle)
50 struct pp_smumgr *smumgr;
55 smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
59 smumgr->device = handle->device;
60 smumgr->chip_family = handle->chip_family;
61 smumgr->chip_id = handle->chip_id;
62 smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
63 smumgr->reload_fw = 1;
64 handle->smu_mgr = smumgr;
66 switch (smumgr->chip_family) {
67 case AMDGPU_FAMILY_CZ:
68 smumgr->smumgr_funcs = &cz_smu_funcs;
70 case AMDGPU_FAMILY_VI:
71 switch (smumgr->chip_id) {
73 smumgr->smumgr_funcs = &iceland_smu_funcs;
76 smumgr->smumgr_funcs = &tonga_smu_funcs;
79 smumgr->smumgr_funcs = &fiji_smu_funcs;
84 smumgr->smumgr_funcs = &polaris10_smu_funcs;
90 case AMDGPU_FAMILY_AI:
91 switch (smumgr->chip_id) {
93 smumgr->smumgr_funcs = &vega10_smu_funcs;
99 case AMDGPU_FAMILY_RV:
100 switch (smumgr->chip_id) {
102 smumgr->smumgr_funcs = &rv_smu_funcs;
116 int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
117 void *input, void *output, void *storage, int result)
119 if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
120 return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
125 int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
126 void *input, void *output, void *storage, int result)
128 if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
129 return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
134 int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
137 if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
138 return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
143 int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
146 if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
147 return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
152 uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
154 if (NULL != smumgr->smumgr_funcs->get_offsetof)
155 return smumgr->smumgr_funcs->get_offsetof(type, member);
160 int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
162 if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
163 return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
167 int smum_get_argument(struct pp_smumgr *smumgr)
169 if (NULL != smumgr->smumgr_funcs->get_argument)
170 return smumgr->smumgr_funcs->get_argument(smumgr);
175 uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
177 if (NULL != smumgr->smumgr_funcs->get_mac_definition)
178 return smumgr->smumgr_funcs->get_mac_definition(value);
183 int smum_download_powerplay_table(struct pp_smumgr *smumgr,
186 if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
187 return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
192 int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
194 if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
195 return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
200 int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
202 if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
205 return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
208 int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
209 uint16_t msg, uint32_t parameter)
211 if (smumgr == NULL ||
212 smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
214 return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
215 smumgr, msg, parameter);
219 * Returns once the part of the register indicated by the mask has
220 * reached the given value.
222 int smum_wait_on_register(struct pp_smumgr *smumgr,
224 uint32_t value, uint32_t mask)
229 if (smumgr == NULL || smumgr->device == NULL)
232 for (i = 0; i < smumgr->usec_timeout; i++) {
233 cur_value = cgs_read_register(smumgr->device, index);
234 if ((cur_value & mask) == (value & mask))
239 /* timeout means wrong logic*/
240 if (i == smumgr->usec_timeout)
246 int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
248 uint32_t value, uint32_t mask)
256 for (i = 0; i < smumgr->usec_timeout; i++) {
257 cur_value = cgs_read_register(smumgr->device,
259 if ((cur_value & mask) != (value & mask))
264 /* timeout means wrong logic */
265 if (i == smumgr->usec_timeout)
273 * Returns once the part of the register indicated by the mask
274 * has reached the given value.The indirect space is described by
275 * giving the memory-mapped index of the indirect index register.
277 int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
278 uint32_t indirect_port,
283 if (smumgr == NULL || smumgr->device == NULL)
286 cgs_write_register(smumgr->device, indirect_port, index);
287 return smum_wait_on_register(smumgr, indirect_port + 1,
291 void smum_wait_for_indirect_register_unequal(
292 struct pp_smumgr *smumgr,
293 uint32_t indirect_port,
298 if (smumgr == NULL || smumgr->device == NULL)
300 cgs_write_register(smumgr->device, indirect_port, index);
301 smum_wait_for_register_unequal(smumgr, indirect_port + 1,
305 int smu_allocate_memory(void *device, uint32_t size,
306 enum cgs_gpu_mem_type type,
307 uint32_t byte_align, uint64_t *mc_addr,
308 void **kptr, void *handle)
311 cgs_handle_t cgs_handle;
313 if (device == NULL || handle == NULL ||
314 mc_addr == NULL || kptr == NULL)
317 ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
318 0, 0, (cgs_handle_t *)handle);
322 cgs_handle = *(cgs_handle_t *)handle;
324 ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
328 ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
335 cgs_gunmap_gpu_mem(device, cgs_handle);
338 cgs_free_gpu_mem(device, cgs_handle);
342 int smu_free_memory(void *device, void *handle)
344 cgs_handle_t cgs_handle = (cgs_handle_t)handle;
346 if (device == NULL || handle == NULL)
349 cgs_kunmap_gpu_mem(device, cgs_handle);
350 cgs_gunmap_gpu_mem(device, cgs_handle);
351 cgs_free_gpu_mem(device, cgs_handle);
356 int smum_init_smc_table(struct pp_hwmgr *hwmgr)
358 if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
359 return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
364 int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
366 if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
367 return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
372 int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
374 if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
375 return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
380 /*this interface is needed by island ci/vi */
381 int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
383 if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
384 return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
389 bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
391 if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
392 return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
397 int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
398 struct amd_pp_profile *request)
400 if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels)
401 return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels(