]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/ci_dpm.c
platform/x86: intel_telemetry_debugfs: fix oops when load/unload module
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE               4
65 #define VOLTAGE_VID_OFFSET_SCALE1    625
66 #define VOLTAGE_VID_OFFSET_SCALE2    100
67
68 static const struct ci_pt_defaults defaults_hawaii_xt =
69 {
70         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
72         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 };
74
75 static const struct ci_pt_defaults defaults_hawaii_pro =
76 {
77         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
79         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt =
83 {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89 #if 0
90 static const struct ci_pt_defaults defaults_bonaire_pro =
91 {
92         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
94         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95 };
96 #endif
97
98 static const struct ci_pt_defaults defaults_saturn_xt =
99 {
100         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
102         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103 };
104
105 #if 0
106 static const struct ci_pt_defaults defaults_saturn_pro =
107 {
108         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
110         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111 };
112 #endif
113
114 static const struct ci_pt_config_reg didt_config_ci[] =
115 {
116         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188         { 0xFFFFFFFF }
189 };
190
191 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192 {
193         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194 }
195
196 #define MC_CG_ARB_FREQ_F0           0x0a
197 #define MC_CG_ARB_FREQ_F1           0x0b
198 #define MC_CG_ARB_FREQ_F2           0x0c
199 #define MC_CG_ARB_FREQ_F3           0x0d
200
201 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202                                        u32 arb_freq_src, u32 arb_freq_dest)
203 {
204         u32 mc_arb_dram_timing;
205         u32 mc_arb_dram_timing2;
206         u32 burst_time;
207         u32 mc_cg_config;
208
209         switch (arb_freq_src) {
210         case MC_CG_ARB_FREQ_F0:
211                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
212                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214                          MC_ARB_BURST_TIME__STATE0__SHIFT;
215                 break;
216         case MC_CG_ARB_FREQ_F1:
217                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
218                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220                          MC_ARB_BURST_TIME__STATE1__SHIFT;
221                 break;
222         default:
223                 return -EINVAL;
224         }
225
226         switch (arb_freq_dest) {
227         case MC_CG_ARB_FREQ_F0:
228                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231                         ~MC_ARB_BURST_TIME__STATE0_MASK);
232                 break;
233         case MC_CG_ARB_FREQ_F1:
234                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237                         ~MC_ARB_BURST_TIME__STATE1_MASK);
238                 break;
239         default:
240                 return -EINVAL;
241         }
242
243         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244         WREG32(mmMC_CG_CONFIG, mc_cg_config);
245         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248         return 0;
249 }
250
251 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252 {
253         u8 mc_para_index;
254
255         if (memory_clock < 10000)
256                 mc_para_index = 0;
257         else if (memory_clock >= 80000)
258                 mc_para_index = 0x0f;
259         else
260                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261         return mc_para_index;
262 }
263
264 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265 {
266         u8 mc_para_index;
267
268         if (strobe_mode) {
269                 if (memory_clock < 12500)
270                         mc_para_index = 0x00;
271                 else if (memory_clock > 47500)
272                         mc_para_index = 0x0f;
273                 else
274                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
275         } else {
276                 if (memory_clock < 65000)
277                         mc_para_index = 0x00;
278                 else if (memory_clock > 135000)
279                         mc_para_index = 0x0f;
280                 else
281                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
282         }
283         return mc_para_index;
284 }
285
286 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287                                                      u32 max_voltage_steps,
288                                                      struct atom_voltage_table *voltage_table)
289 {
290         unsigned int i, diff;
291
292         if (voltage_table->count <= max_voltage_steps)
293                 return;
294
295         diff = voltage_table->count - max_voltage_steps;
296
297         for (i = 0; i < max_voltage_steps; i++)
298                 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300         voltage_table->count = max_voltage_steps;
301 }
302
303 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304                                          struct atom_voltage_table_entry *voltage_table,
305                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308                                        u32 target_tdp);
309 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314                                                              PPSMC_Msg msg, u32 parameter);
315 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319 {
320         struct ci_power_info *pi = adev->pm.dpm.priv;
321
322         return pi;
323 }
324
325 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326 {
327         struct ci_ps *ps = rps->ps_priv;
328
329         return ps;
330 }
331
332 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333 {
334         struct ci_power_info *pi = ci_get_pi(adev);
335
336         switch (adev->pdev->device) {
337         case 0x6649:
338         case 0x6650:
339         case 0x6651:
340         case 0x6658:
341         case 0x665C:
342         case 0x665D:
343         default:
344                 pi->powertune_defaults = &defaults_bonaire_xt;
345                 break;
346         case 0x6640:
347         case 0x6641:
348         case 0x6646:
349         case 0x6647:
350                 pi->powertune_defaults = &defaults_saturn_xt;
351                 break;
352         case 0x67B8:
353         case 0x67B0:
354                 pi->powertune_defaults = &defaults_hawaii_xt;
355                 break;
356         case 0x67BA:
357         case 0x67B1:
358                 pi->powertune_defaults = &defaults_hawaii_pro;
359                 break;
360         case 0x67A0:
361         case 0x67A1:
362         case 0x67A2:
363         case 0x67A8:
364         case 0x67A9:
365         case 0x67AA:
366         case 0x67B9:
367         case 0x67BE:
368                 pi->powertune_defaults = &defaults_bonaire_xt;
369                 break;
370         }
371
372         pi->dte_tj_offset = 0;
373
374         pi->caps_power_containment = true;
375         pi->caps_cac = false;
376         pi->caps_sq_ramping = false;
377         pi->caps_db_ramping = false;
378         pi->caps_td_ramping = false;
379         pi->caps_tcp_ramping = false;
380
381         if (pi->caps_power_containment) {
382                 pi->caps_cac = true;
383                 if (adev->asic_type == CHIP_HAWAII)
384                         pi->enable_bapm_feature = false;
385                 else
386                         pi->enable_bapm_feature = true;
387                 pi->enable_tdc_limit_feature = true;
388                 pi->enable_pkg_pwr_tracking_feature = true;
389         }
390 }
391
392 static u8 ci_convert_to_vid(u16 vddc)
393 {
394         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395 }
396
397 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398 {
399         struct ci_power_info *pi = ci_get_pi(adev);
400         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403         u32 i;
404
405         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406                 return -EINVAL;
407         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408                 return -EINVAL;
409         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411                 return -EINVAL;
412
413         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418                 } else {
419                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421                 }
422         }
423         return 0;
424 }
425
426 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427 {
428         struct ci_power_info *pi = ci_get_pi(adev);
429         u8 *vid = pi->smc_powertune_table.VddCVid;
430         u32 i;
431
432         if (pi->vddc_voltage_table.count > 8)
433                 return -EINVAL;
434
435         for (i = 0; i < pi->vddc_voltage_table.count; i++)
436                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438         return 0;
439 }
440
441 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442 {
443         struct ci_power_info *pi = ci_get_pi(adev);
444         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451         return 0;
452 }
453
454 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455 {
456         struct ci_power_info *pi = ci_get_pi(adev);
457         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458         u16 tdc_limit;
459
460         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466         return 0;
467 }
468
469 static int ci_populate_dw8(struct amdgpu_device *adev)
470 {
471         struct ci_power_info *pi = ci_get_pi(adev);
472         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473         int ret;
474
475         ret = amdgpu_ci_read_smc_sram_dword(adev,
476                                      SMU7_FIRMWARE_HEADER_LOCATION +
477                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
478                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480                                      pi->sram_end);
481         if (ret)
482                 return -EINVAL;
483         else
484                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486         return 0;
487 }
488
489 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490 {
491         struct ci_power_info *pi = ci_get_pi(adev);
492
493         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495                 adev->pm.dpm.fan.fan_output_sensitivity =
496                         adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501         return 0;
502 }
503
504 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505 {
506         struct ci_power_info *pi = ci_get_pi(adev);
507         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509         int i, min, max;
510
511         min = max = hi_vid[0];
512         for (i = 0; i < 8; i++) {
513                 if (0 != hi_vid[i]) {
514                         if (min > hi_vid[i])
515                                 min = hi_vid[i];
516                         if (max < hi_vid[i])
517                                 max = hi_vid[i];
518                 }
519
520                 if (0 != lo_vid[i]) {
521                         if (min > lo_vid[i])
522                                 min = lo_vid[i];
523                         if (max < lo_vid[i])
524                                 max = lo_vid[i];
525                 }
526         }
527
528         if ((min == 0) || (max == 0))
529                 return -EINVAL;
530         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533         return 0;
534 }
535
536 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537 {
538         struct ci_power_info *pi = ci_get_pi(adev);
539         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541         struct amdgpu_cac_tdp_table *cac_tdp_table =
542                 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550         return 0;
551 }
552
553 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554 {
555         struct ci_power_info *pi = ci_get_pi(adev);
556         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
558         struct amdgpu_cac_tdp_table *cac_tdp_table =
559                 adev->pm.dpm.dyn_state.cac_tdp_table;
560         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561         int i, j, k;
562         const u16 *def1;
563         const u16 *def2;
564
565         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569         dpm_table->GpuTjMax =
570                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571         dpm_table->GpuTjHyst = 8;
572
573         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575         if (ppm) {
576                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578         } else {
579                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581         }
582
583         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584         def1 = pt_defaults->bapmti_r;
585         def2 = pt_defaults->bapmti_rc;
586
587         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
590                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592                                 def1++;
593                                 def2++;
594                         }
595                 }
596         }
597
598         return 0;
599 }
600
601 static int ci_populate_pm_base(struct amdgpu_device *adev)
602 {
603         struct ci_power_info *pi = ci_get_pi(adev);
604         u32 pm_fuse_table_offset;
605         int ret;
606
607         if (pi->caps_power_containment) {
608                 ret = amdgpu_ci_read_smc_sram_dword(adev,
609                                              SMU7_FIRMWARE_HEADER_LOCATION +
610                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
611                                              &pm_fuse_table_offset, pi->sram_end);
612                 if (ret)
613                         return ret;
614                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615                 if (ret)
616                         return ret;
617                 ret = ci_populate_vddc_vid(adev);
618                 if (ret)
619                         return ret;
620                 ret = ci_populate_svi_load_line(adev);
621                 if (ret)
622                         return ret;
623                 ret = ci_populate_tdc_limit(adev);
624                 if (ret)
625                         return ret;
626                 ret = ci_populate_dw8(adev);
627                 if (ret)
628                         return ret;
629                 ret = ci_populate_fuzzy_fan(adev);
630                 if (ret)
631                         return ret;
632                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633                 if (ret)
634                         return ret;
635                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636                 if (ret)
637                         return ret;
638                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639                                            (u8 *)&pi->smc_powertune_table,
640                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641                 if (ret)
642                         return ret;
643         }
644
645         return 0;
646 }
647
648 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649 {
650         struct ci_power_info *pi = ci_get_pi(adev);
651         u32 data;
652
653         if (pi->caps_sq_ramping) {
654                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655                 if (enable)
656                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657                 else
658                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660         }
661
662         if (pi->caps_db_ramping) {
663                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664                 if (enable)
665                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666                 else
667                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669         }
670
671         if (pi->caps_td_ramping) {
672                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673                 if (enable)
674                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675                 else
676                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678         }
679
680         if (pi->caps_tcp_ramping) {
681                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682                 if (enable)
683                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684                 else
685                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687         }
688 }
689
690 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691                                           const struct ci_pt_config_reg *cac_config_regs)
692 {
693         const struct ci_pt_config_reg *config_regs = cac_config_regs;
694         u32 data;
695         u32 cache = 0;
696
697         if (config_regs == NULL)
698                 return -EINVAL;
699
700         while (config_regs->offset != 0xFFFFFFFF) {
701                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703                 } else {
704                         switch (config_regs->type) {
705                         case CISLANDS_CONFIGREG_SMC_IND:
706                                 data = RREG32_SMC(config_regs->offset);
707                                 break;
708                         case CISLANDS_CONFIGREG_DIDT_IND:
709                                 data = RREG32_DIDT(config_regs->offset);
710                                 break;
711                         default:
712                                 data = RREG32(config_regs->offset);
713                                 break;
714                         }
715
716                         data &= ~config_regs->mask;
717                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718                         data |= cache;
719
720                         switch (config_regs->type) {
721                         case CISLANDS_CONFIGREG_SMC_IND:
722                                 WREG32_SMC(config_regs->offset, data);
723                                 break;
724                         case CISLANDS_CONFIGREG_DIDT_IND:
725                                 WREG32_DIDT(config_regs->offset, data);
726                                 break;
727                         default:
728                                 WREG32(config_regs->offset, data);
729                                 break;
730                         }
731                         cache = 0;
732                 }
733                 config_regs++;
734         }
735         return 0;
736 }
737
738 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739 {
740         struct ci_power_info *pi = ci_get_pi(adev);
741         int ret;
742
743         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744             pi->caps_td_ramping || pi->caps_tcp_ramping) {
745                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
746
747                 if (enable) {
748                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
749                         if (ret) {
750                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
751                                 return ret;
752                         }
753                 }
754
755                 ci_do_enable_didt(adev, enable);
756
757                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
758         }
759
760         return 0;
761 }
762
763 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764 {
765         struct ci_power_info *pi = ci_get_pi(adev);
766         PPSMC_Result smc_result;
767         int ret = 0;
768
769         if (enable) {
770                 pi->power_containment_features = 0;
771                 if (pi->caps_power_containment) {
772                         if (pi->enable_bapm_feature) {
773                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774                                 if (smc_result != PPSMC_Result_OK)
775                                         ret = -EINVAL;
776                                 else
777                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778                         }
779
780                         if (pi->enable_tdc_limit_feature) {
781                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782                                 if (smc_result != PPSMC_Result_OK)
783                                         ret = -EINVAL;
784                                 else
785                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786                         }
787
788                         if (pi->enable_pkg_pwr_tracking_feature) {
789                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790                                 if (smc_result != PPSMC_Result_OK) {
791                                         ret = -EINVAL;
792                                 } else {
793                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
794                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
795                                         u32 default_pwr_limit =
796                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800                                         ci_set_power_limit(adev, default_pwr_limit);
801                                 }
802                         }
803                 }
804         } else {
805                 if (pi->caps_power_containment && pi->power_containment_features) {
806                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814                         pi->power_containment_features = 0;
815                 }
816         }
817
818         return ret;
819 }
820
821 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822 {
823         struct ci_power_info *pi = ci_get_pi(adev);
824         PPSMC_Result smc_result;
825         int ret = 0;
826
827         if (pi->caps_cac) {
828                 if (enable) {
829                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830                         if (smc_result != PPSMC_Result_OK) {
831                                 ret = -EINVAL;
832                                 pi->cac_enabled = false;
833                         } else {
834                                 pi->cac_enabled = true;
835                         }
836                 } else if (pi->cac_enabled) {
837                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838                         pi->cac_enabled = false;
839                 }
840         }
841
842         return ret;
843 }
844
845 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846                                             bool enable)
847 {
848         struct ci_power_info *pi = ci_get_pi(adev);
849         PPSMC_Result smc_result = PPSMC_Result_OK;
850
851         if (pi->thermal_sclk_dpm_enabled) {
852                 if (enable)
853                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854                 else
855                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856         }
857
858         if (smc_result == PPSMC_Result_OK)
859                 return 0;
860         else
861                 return -EINVAL;
862 }
863
864 static int ci_power_control_set_level(struct amdgpu_device *adev)
865 {
866         struct ci_power_info *pi = ci_get_pi(adev);
867         struct amdgpu_cac_tdp_table *cac_tdp_table =
868                 adev->pm.dpm.dyn_state.cac_tdp_table;
869         s32 adjust_percent;
870         s32 target_tdp;
871         int ret = 0;
872         bool adjust_polarity = false; /* ??? */
873
874         if (pi->caps_power_containment) {
875                 adjust_percent = adjust_polarity ?
876                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877                 target_tdp = ((100 + adjust_percent) *
878                               (s32)cac_tdp_table->configurable_tdp) / 100;
879
880                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881         }
882
883         return ret;
884 }
885
886 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
887 {
888         struct ci_power_info *pi = ci_get_pi(adev);
889
890         pi->uvd_power_gated = gate;
891
892         if (gate) {
893                 /* stop the UVD block */
894                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
895                                                         AMD_PG_STATE_GATE);
896                 ci_update_uvd_dpm(adev, gate);
897         } else {
898                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
899                                                         AMD_PG_STATE_UNGATE);
900                 ci_update_uvd_dpm(adev, gate);
901         }
902 }
903
904 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
905 {
906         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908
909         if (vblank_time < switch_limit)
910                 return true;
911         else
912                 return false;
913
914 }
915
916 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
917                                         struct amdgpu_ps *rps)
918 {
919         struct ci_ps *ps = ci_get_ps(rps);
920         struct ci_power_info *pi = ci_get_pi(adev);
921         struct amdgpu_clock_and_voltage_limits *max_limits;
922         bool disable_mclk_switching;
923         u32 sclk, mclk;
924         int i;
925
926         if (rps->vce_active) {
927                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
928                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
929         } else {
930                 rps->evclk = 0;
931                 rps->ecclk = 0;
932         }
933
934         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
935             ci_dpm_vblank_too_short(adev))
936                 disable_mclk_switching = true;
937         else
938                 disable_mclk_switching = false;
939
940         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
941                 pi->battery_state = true;
942         else
943                 pi->battery_state = false;
944
945         if (adev->pm.dpm.ac_power)
946                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
947         else
948                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
949
950         if (adev->pm.dpm.ac_power == false) {
951                 for (i = 0; i < ps->performance_level_count; i++) {
952                         if (ps->performance_levels[i].mclk > max_limits->mclk)
953                                 ps->performance_levels[i].mclk = max_limits->mclk;
954                         if (ps->performance_levels[i].sclk > max_limits->sclk)
955                                 ps->performance_levels[i].sclk = max_limits->sclk;
956                 }
957         }
958
959         /* XXX validate the min clocks required for display */
960
961         if (disable_mclk_switching) {
962                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
963                 sclk = ps->performance_levels[0].sclk;
964         } else {
965                 mclk = ps->performance_levels[0].mclk;
966                 sclk = ps->performance_levels[0].sclk;
967         }
968
969         if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
970                 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
971
972         if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
973                 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
974
975         if (rps->vce_active) {
976                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
977                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
978                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
979                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
980         }
981
982         ps->performance_levels[0].sclk = sclk;
983         ps->performance_levels[0].mclk = mclk;
984
985         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
986                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
987
988         if (disable_mclk_switching) {
989                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
990                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
991         } else {
992                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
993                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
994         }
995 }
996
997 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
998                                             int min_temp, int max_temp)
999 {
1000         int low_temp = 0 * 1000;
1001         int high_temp = 255 * 1000;
1002         u32 tmp;
1003
1004         if (low_temp < min_temp)
1005                 low_temp = min_temp;
1006         if (high_temp > max_temp)
1007                 high_temp = max_temp;
1008         if (high_temp < low_temp) {
1009                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1010                 return -EINVAL;
1011         }
1012
1013         tmp = RREG32_SMC(ixCG_THERMAL_INT);
1014         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1015         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1016                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1017         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1018
1019 #if 0
1020         /* XXX: need to figure out how to handle this properly */
1021         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1022         tmp &= DIG_THERM_DPM_MASK;
1023         tmp |= DIG_THERM_DPM(high_temp / 1000);
1024         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1025 #endif
1026
1027         adev->pm.dpm.thermal.min_temp = low_temp;
1028         adev->pm.dpm.thermal.max_temp = high_temp;
1029         return 0;
1030 }
1031
1032 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1033                                    bool enable)
1034 {
1035         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1036         PPSMC_Result result;
1037
1038         if (enable) {
1039                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1040                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1041                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1042                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1043                 if (result != PPSMC_Result_OK) {
1044                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1045                         return -EINVAL;
1046                 }
1047         } else {
1048                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1049                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1050                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1051                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1052                 if (result != PPSMC_Result_OK) {
1053                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1054                         return -EINVAL;
1055                 }
1056         }
1057
1058         return 0;
1059 }
1060
1061 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1062 {
1063         struct ci_power_info *pi = ci_get_pi(adev);
1064         u32 tmp;
1065
1066         if (pi->fan_ctrl_is_in_default_mode) {
1067                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1068                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1069                 pi->fan_ctrl_default_mode = tmp;
1070                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1071                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1072                 pi->t_min = tmp;
1073                 pi->fan_ctrl_is_in_default_mode = false;
1074         }
1075
1076         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1077         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1078         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1079
1080         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1081         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1082         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1083 }
1084
1085 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1086 {
1087         struct ci_power_info *pi = ci_get_pi(adev);
1088         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1089         u32 duty100;
1090         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1091         u16 fdo_min, slope1, slope2;
1092         u32 reference_clock, tmp;
1093         int ret;
1094         u64 tmp64;
1095
1096         if (!pi->fan_table_start) {
1097                 adev->pm.dpm.fan.ucode_fan_control = false;
1098                 return 0;
1099         }
1100
1101         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1102                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1103
1104         if (duty100 == 0) {
1105                 adev->pm.dpm.fan.ucode_fan_control = false;
1106                 return 0;
1107         }
1108
1109         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1110         do_div(tmp64, 10000);
1111         fdo_min = (u16)tmp64;
1112
1113         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1114         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1115
1116         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1117         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1118
1119         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1120         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1121
1122         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1123         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1124         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1125
1126         fan_table.Slope1 = cpu_to_be16(slope1);
1127         fan_table.Slope2 = cpu_to_be16(slope2);
1128
1129         fan_table.FdoMin = cpu_to_be16(fdo_min);
1130
1131         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1132
1133         fan_table.HystUp = cpu_to_be16(1);
1134
1135         fan_table.HystSlope = cpu_to_be16(1);
1136
1137         fan_table.TempRespLim = cpu_to_be16(5);
1138
1139         reference_clock = amdgpu_asic_get_xclk(adev);
1140
1141         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1142                                                reference_clock) / 1600);
1143
1144         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1145
1146         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1147                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1148         fan_table.TempSrc = (uint8_t)tmp;
1149
1150         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1151                                           pi->fan_table_start,
1152                                           (u8 *)(&fan_table),
1153                                           sizeof(fan_table),
1154                                           pi->sram_end);
1155
1156         if (ret) {
1157                 DRM_ERROR("Failed to load fan table to the SMC.");
1158                 adev->pm.dpm.fan.ucode_fan_control = false;
1159         }
1160
1161         return 0;
1162 }
1163
1164 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1165 {
1166         struct ci_power_info *pi = ci_get_pi(adev);
1167         PPSMC_Result ret;
1168
1169         if (pi->caps_od_fuzzy_fan_control_support) {
1170                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1171                                                                PPSMC_StartFanControl,
1172                                                                FAN_CONTROL_FUZZY);
1173                 if (ret != PPSMC_Result_OK)
1174                         return -EINVAL;
1175                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1176                                                                PPSMC_MSG_SetFanPwmMax,
1177                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1178                 if (ret != PPSMC_Result_OK)
1179                         return -EINVAL;
1180         } else {
1181                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1182                                                                PPSMC_StartFanControl,
1183                                                                FAN_CONTROL_TABLE);
1184                 if (ret != PPSMC_Result_OK)
1185                         return -EINVAL;
1186         }
1187
1188         pi->fan_is_controlled_by_smc = true;
1189         return 0;
1190 }
1191
1192
1193 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1194 {
1195         PPSMC_Result ret;
1196         struct ci_power_info *pi = ci_get_pi(adev);
1197
1198         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1199         if (ret == PPSMC_Result_OK) {
1200                 pi->fan_is_controlled_by_smc = false;
1201                 return 0;
1202         } else {
1203                 return -EINVAL;
1204         }
1205 }
1206
1207 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1208                                         u32 *speed)
1209 {
1210         u32 duty, duty100;
1211         u64 tmp64;
1212
1213         if (adev->pm.no_fan)
1214                 return -ENOENT;
1215
1216         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1217                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1218         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1219                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1220
1221         if (duty100 == 0)
1222                 return -EINVAL;
1223
1224         tmp64 = (u64)duty * 100;
1225         do_div(tmp64, duty100);
1226         *speed = (u32)tmp64;
1227
1228         if (*speed > 100)
1229                 *speed = 100;
1230
1231         return 0;
1232 }
1233
1234 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1235                                         u32 speed)
1236 {
1237         u32 tmp;
1238         u32 duty, duty100;
1239         u64 tmp64;
1240         struct ci_power_info *pi = ci_get_pi(adev);
1241
1242         if (adev->pm.no_fan)
1243                 return -ENOENT;
1244
1245         if (pi->fan_is_controlled_by_smc)
1246                 return -EINVAL;
1247
1248         if (speed > 100)
1249                 return -EINVAL;
1250
1251         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1252                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1253
1254         if (duty100 == 0)
1255                 return -EINVAL;
1256
1257         tmp64 = (u64)speed * duty100;
1258         do_div(tmp64, 100);
1259         duty = (u32)tmp64;
1260
1261         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1262         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1263         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1264
1265         return 0;
1266 }
1267
1268 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1269 {
1270         switch (mode) {
1271         case AMD_FAN_CTRL_NONE:
1272                 if (adev->pm.dpm.fan.ucode_fan_control)
1273                         ci_fan_ctrl_stop_smc_fan_control(adev);
1274                 ci_dpm_set_fan_speed_percent(adev, 100);
1275                 break;
1276         case AMD_FAN_CTRL_MANUAL:
1277                 if (adev->pm.dpm.fan.ucode_fan_control)
1278                         ci_fan_ctrl_stop_smc_fan_control(adev);
1279                 break;
1280         case AMD_FAN_CTRL_AUTO:
1281                 if (adev->pm.dpm.fan.ucode_fan_control)
1282                         ci_thermal_start_smc_fan_control(adev);
1283                 break;
1284         default:
1285                 break;
1286         }
1287 }
1288
1289 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1290 {
1291         struct ci_power_info *pi = ci_get_pi(adev);
1292
1293         if (pi->fan_is_controlled_by_smc)
1294                 return AMD_FAN_CTRL_AUTO;
1295         else
1296                 return AMD_FAN_CTRL_MANUAL;
1297 }
1298
1299 #if 0
1300 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1301                                          u32 *speed)
1302 {
1303         u32 tach_period;
1304         u32 xclk = amdgpu_asic_get_xclk(adev);
1305
1306         if (adev->pm.no_fan)
1307                 return -ENOENT;
1308
1309         if (adev->pm.fan_pulses_per_revolution == 0)
1310                 return -ENOENT;
1311
1312         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1313                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1314         if (tach_period == 0)
1315                 return -ENOENT;
1316
1317         *speed = 60 * xclk * 10000 / tach_period;
1318
1319         return 0;
1320 }
1321
1322 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1323                                          u32 speed)
1324 {
1325         u32 tach_period, tmp;
1326         u32 xclk = amdgpu_asic_get_xclk(adev);
1327
1328         if (adev->pm.no_fan)
1329                 return -ENOENT;
1330
1331         if (adev->pm.fan_pulses_per_revolution == 0)
1332                 return -ENOENT;
1333
1334         if ((speed < adev->pm.fan_min_rpm) ||
1335             (speed > adev->pm.fan_max_rpm))
1336                 return -EINVAL;
1337
1338         if (adev->pm.dpm.fan.ucode_fan_control)
1339                 ci_fan_ctrl_stop_smc_fan_control(adev);
1340
1341         tach_period = 60 * xclk * 10000 / (8 * speed);
1342         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1343         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1344         WREG32_SMC(CG_TACH_CTRL, tmp);
1345
1346         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1347
1348         return 0;
1349 }
1350 #endif
1351
1352 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1353 {
1354         struct ci_power_info *pi = ci_get_pi(adev);
1355         u32 tmp;
1356
1357         if (!pi->fan_ctrl_is_in_default_mode) {
1358                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1359                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1360                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1361
1362                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1363                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1364                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1365                 pi->fan_ctrl_is_in_default_mode = true;
1366         }
1367 }
1368
1369 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1370 {
1371         if (adev->pm.dpm.fan.ucode_fan_control) {
1372                 ci_fan_ctrl_start_smc_fan_control(adev);
1373                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1374         }
1375 }
1376
1377 static void ci_thermal_initialize(struct amdgpu_device *adev)
1378 {
1379         u32 tmp;
1380
1381         if (adev->pm.fan_pulses_per_revolution) {
1382                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1383                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1384                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1385                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1386         }
1387
1388         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1389         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1390         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1391 }
1392
1393 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1394 {
1395         int ret;
1396
1397         ci_thermal_initialize(adev);
1398         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1399         if (ret)
1400                 return ret;
1401         ret = ci_thermal_enable_alert(adev, true);
1402         if (ret)
1403                 return ret;
1404         if (adev->pm.dpm.fan.ucode_fan_control) {
1405                 ret = ci_thermal_setup_fan_table(adev);
1406                 if (ret)
1407                         return ret;
1408                 ci_thermal_start_smc_fan_control(adev);
1409         }
1410
1411         return 0;
1412 }
1413
1414 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1415 {
1416         if (!adev->pm.no_fan)
1417                 ci_fan_ctrl_set_default_mode(adev);
1418 }
1419
1420 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1421                                      u16 reg_offset, u32 *value)
1422 {
1423         struct ci_power_info *pi = ci_get_pi(adev);
1424
1425         return amdgpu_ci_read_smc_sram_dword(adev,
1426                                       pi->soft_regs_start + reg_offset,
1427                                       value, pi->sram_end);
1428 }
1429
1430 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1431                                       u16 reg_offset, u32 value)
1432 {
1433         struct ci_power_info *pi = ci_get_pi(adev);
1434
1435         return amdgpu_ci_write_smc_sram_dword(adev,
1436                                        pi->soft_regs_start + reg_offset,
1437                                        value, pi->sram_end);
1438 }
1439
1440 static void ci_init_fps_limits(struct amdgpu_device *adev)
1441 {
1442         struct ci_power_info *pi = ci_get_pi(adev);
1443         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1444
1445         if (pi->caps_fps) {
1446                 u16 tmp;
1447
1448                 tmp = 45;
1449                 table->FpsHighT = cpu_to_be16(tmp);
1450
1451                 tmp = 30;
1452                 table->FpsLowT = cpu_to_be16(tmp);
1453         }
1454 }
1455
1456 static int ci_update_sclk_t(struct amdgpu_device *adev)
1457 {
1458         struct ci_power_info *pi = ci_get_pi(adev);
1459         int ret = 0;
1460         u32 low_sclk_interrupt_t = 0;
1461
1462         if (pi->caps_sclk_throttle_low_notification) {
1463                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1464
1465                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1466                                            pi->dpm_table_start +
1467                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1468                                            (u8 *)&low_sclk_interrupt_t,
1469                                            sizeof(u32), pi->sram_end);
1470
1471         }
1472
1473         return ret;
1474 }
1475
1476 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1477 {
1478         struct ci_power_info *pi = ci_get_pi(adev);
1479         u16 leakage_id, virtual_voltage_id;
1480         u16 vddc, vddci;
1481         int i;
1482
1483         pi->vddc_leakage.count = 0;
1484         pi->vddci_leakage.count = 0;
1485
1486         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1487                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1488                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1489                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1490                                 continue;
1491                         if (vddc != 0 && vddc != virtual_voltage_id) {
1492                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1493                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1494                                 pi->vddc_leakage.count++;
1495                         }
1496                 }
1497         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1498                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1499                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1500                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1501                                                                                      virtual_voltage_id,
1502                                                                                      leakage_id) == 0) {
1503                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1504                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1505                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1506                                         pi->vddc_leakage.count++;
1507                                 }
1508                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1509                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1510                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1511                                         pi->vddci_leakage.count++;
1512                                 }
1513                         }
1514                 }
1515         }
1516 }
1517
1518 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1519 {
1520         struct ci_power_info *pi = ci_get_pi(adev);
1521         bool want_thermal_protection;
1522         enum amdgpu_dpm_event_src dpm_event_src;
1523         u32 tmp;
1524
1525         switch (sources) {
1526         case 0:
1527         default:
1528                 want_thermal_protection = false;
1529                 break;
1530         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1531                 want_thermal_protection = true;
1532                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1533                 break;
1534         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1535                 want_thermal_protection = true;
1536                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1537                 break;
1538         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1539               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1540                 want_thermal_protection = true;
1541                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1542                 break;
1543         }
1544
1545         if (want_thermal_protection) {
1546 #if 0
1547                 /* XXX: need to figure out how to handle this properly */
1548                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1549                 tmp &= DPM_EVENT_SRC_MASK;
1550                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1551                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1552 #endif
1553
1554                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1555                 if (pi->thermal_protection)
1556                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1557                 else
1558                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1559                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1560         } else {
1561                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1562                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1563                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1564         }
1565 }
1566
1567 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1568                                            enum amdgpu_dpm_auto_throttle_src source,
1569                                            bool enable)
1570 {
1571         struct ci_power_info *pi = ci_get_pi(adev);
1572
1573         if (enable) {
1574                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1575                         pi->active_auto_throttle_sources |= 1 << source;
1576                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1577                 }
1578         } else {
1579                 if (pi->active_auto_throttle_sources & (1 << source)) {
1580                         pi->active_auto_throttle_sources &= ~(1 << source);
1581                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1582                 }
1583         }
1584 }
1585
1586 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1587 {
1588         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1589                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1590 }
1591
1592 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1593 {
1594         struct ci_power_info *pi = ci_get_pi(adev);
1595         PPSMC_Result smc_result;
1596
1597         if (!pi->need_update_smu7_dpm_table)
1598                 return 0;
1599
1600         if ((!pi->sclk_dpm_key_disabled) &&
1601             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1602                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1603                 if (smc_result != PPSMC_Result_OK)
1604                         return -EINVAL;
1605         }
1606
1607         if ((!pi->mclk_dpm_key_disabled) &&
1608             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1609                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1610                 if (smc_result != PPSMC_Result_OK)
1611                         return -EINVAL;
1612         }
1613
1614         pi->need_update_smu7_dpm_table = 0;
1615         return 0;
1616 }
1617
1618 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1619 {
1620         struct ci_power_info *pi = ci_get_pi(adev);
1621         PPSMC_Result smc_result;
1622
1623         if (enable) {
1624                 if (!pi->sclk_dpm_key_disabled) {
1625                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1626                         if (smc_result != PPSMC_Result_OK)
1627                                 return -EINVAL;
1628                 }
1629
1630                 if (!pi->mclk_dpm_key_disabled) {
1631                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1632                         if (smc_result != PPSMC_Result_OK)
1633                                 return -EINVAL;
1634
1635                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1636                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1637
1638                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1639                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1640                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1641
1642                         udelay(10);
1643
1644                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1645                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1646                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1647                 }
1648         } else {
1649                 if (!pi->sclk_dpm_key_disabled) {
1650                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1651                         if (smc_result != PPSMC_Result_OK)
1652                                 return -EINVAL;
1653                 }
1654
1655                 if (!pi->mclk_dpm_key_disabled) {
1656                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1657                         if (smc_result != PPSMC_Result_OK)
1658                                 return -EINVAL;
1659                 }
1660         }
1661
1662         return 0;
1663 }
1664
1665 static int ci_start_dpm(struct amdgpu_device *adev)
1666 {
1667         struct ci_power_info *pi = ci_get_pi(adev);
1668         PPSMC_Result smc_result;
1669         int ret;
1670         u32 tmp;
1671
1672         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1673         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1674         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1675
1676         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1677         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1678         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1679
1680         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1681
1682         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1683
1684         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1685         if (smc_result != PPSMC_Result_OK)
1686                 return -EINVAL;
1687
1688         ret = ci_enable_sclk_mclk_dpm(adev, true);
1689         if (ret)
1690                 return ret;
1691
1692         if (!pi->pcie_dpm_key_disabled) {
1693                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1694                 if (smc_result != PPSMC_Result_OK)
1695                         return -EINVAL;
1696         }
1697
1698         return 0;
1699 }
1700
1701 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1702 {
1703         struct ci_power_info *pi = ci_get_pi(adev);
1704         PPSMC_Result smc_result;
1705
1706         if (!pi->need_update_smu7_dpm_table)
1707                 return 0;
1708
1709         if ((!pi->sclk_dpm_key_disabled) &&
1710             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1711                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1712                 if (smc_result != PPSMC_Result_OK)
1713                         return -EINVAL;
1714         }
1715
1716         if ((!pi->mclk_dpm_key_disabled) &&
1717             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1718                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1719                 if (smc_result != PPSMC_Result_OK)
1720                         return -EINVAL;
1721         }
1722
1723         return 0;
1724 }
1725
1726 static int ci_stop_dpm(struct amdgpu_device *adev)
1727 {
1728         struct ci_power_info *pi = ci_get_pi(adev);
1729         PPSMC_Result smc_result;
1730         int ret;
1731         u32 tmp;
1732
1733         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1734         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1735         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1736
1737         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1738         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1739         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1740
1741         if (!pi->pcie_dpm_key_disabled) {
1742                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1743                 if (smc_result != PPSMC_Result_OK)
1744                         return -EINVAL;
1745         }
1746
1747         ret = ci_enable_sclk_mclk_dpm(adev, false);
1748         if (ret)
1749                 return ret;
1750
1751         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1752         if (smc_result != PPSMC_Result_OK)
1753                 return -EINVAL;
1754
1755         return 0;
1756 }
1757
1758 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1759 {
1760         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1761
1762         if (enable)
1763                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1764         else
1765                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1766         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1767 }
1768
1769 #if 0
1770 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1771                                         bool ac_power)
1772 {
1773         struct ci_power_info *pi = ci_get_pi(adev);
1774         struct amdgpu_cac_tdp_table *cac_tdp_table =
1775                 adev->pm.dpm.dyn_state.cac_tdp_table;
1776         u32 power_limit;
1777
1778         if (ac_power)
1779                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1780         else
1781                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1782
1783         ci_set_power_limit(adev, power_limit);
1784
1785         if (pi->caps_automatic_dc_transition) {
1786                 if (ac_power)
1787                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1788                 else
1789                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1790         }
1791
1792         return 0;
1793 }
1794 #endif
1795
1796 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1797                                                       PPSMC_Msg msg, u32 parameter)
1798 {
1799         WREG32(mmSMC_MSG_ARG_0, parameter);
1800         return amdgpu_ci_send_msg_to_smc(adev, msg);
1801 }
1802
1803 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1804                                                         PPSMC_Msg msg, u32 *parameter)
1805 {
1806         PPSMC_Result smc_result;
1807
1808         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1809
1810         if ((smc_result == PPSMC_Result_OK) && parameter)
1811                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1812
1813         return smc_result;
1814 }
1815
1816 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1817 {
1818         struct ci_power_info *pi = ci_get_pi(adev);
1819
1820         if (!pi->sclk_dpm_key_disabled) {
1821                 PPSMC_Result smc_result =
1822                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1823                 if (smc_result != PPSMC_Result_OK)
1824                         return -EINVAL;
1825         }
1826
1827         return 0;
1828 }
1829
1830 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1831 {
1832         struct ci_power_info *pi = ci_get_pi(adev);
1833
1834         if (!pi->mclk_dpm_key_disabled) {
1835                 PPSMC_Result smc_result =
1836                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1837                 if (smc_result != PPSMC_Result_OK)
1838                         return -EINVAL;
1839         }
1840
1841         return 0;
1842 }
1843
1844 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1845 {
1846         struct ci_power_info *pi = ci_get_pi(adev);
1847
1848         if (!pi->pcie_dpm_key_disabled) {
1849                 PPSMC_Result smc_result =
1850                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1851                 if (smc_result != PPSMC_Result_OK)
1852                         return -EINVAL;
1853         }
1854
1855         return 0;
1856 }
1857
1858 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1859 {
1860         struct ci_power_info *pi = ci_get_pi(adev);
1861
1862         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1863                 PPSMC_Result smc_result =
1864                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1865                 if (smc_result != PPSMC_Result_OK)
1866                         return -EINVAL;
1867         }
1868
1869         return 0;
1870 }
1871
1872 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1873                                        u32 target_tdp)
1874 {
1875         PPSMC_Result smc_result =
1876                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1877         if (smc_result != PPSMC_Result_OK)
1878                 return -EINVAL;
1879         return 0;
1880 }
1881
1882 #if 0
1883 static int ci_set_boot_state(struct amdgpu_device *adev)
1884 {
1885         return ci_enable_sclk_mclk_dpm(adev, false);
1886 }
1887 #endif
1888
1889 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1890 {
1891         u32 sclk_freq;
1892         PPSMC_Result smc_result =
1893                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1894                                                     PPSMC_MSG_API_GetSclkFrequency,
1895                                                     &sclk_freq);
1896         if (smc_result != PPSMC_Result_OK)
1897                 sclk_freq = 0;
1898
1899         return sclk_freq;
1900 }
1901
1902 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1903 {
1904         u32 mclk_freq;
1905         PPSMC_Result smc_result =
1906                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1907                                                     PPSMC_MSG_API_GetMclkFrequency,
1908                                                     &mclk_freq);
1909         if (smc_result != PPSMC_Result_OK)
1910                 mclk_freq = 0;
1911
1912         return mclk_freq;
1913 }
1914
1915 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1916 {
1917         int i;
1918
1919         amdgpu_ci_program_jump_on_start(adev);
1920         amdgpu_ci_start_smc_clock(adev);
1921         amdgpu_ci_start_smc(adev);
1922         for (i = 0; i < adev->usec_timeout; i++) {
1923                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1924                         break;
1925         }
1926 }
1927
1928 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1929 {
1930         amdgpu_ci_reset_smc(adev);
1931         amdgpu_ci_stop_smc_clock(adev);
1932 }
1933
1934 static int ci_process_firmware_header(struct amdgpu_device *adev)
1935 {
1936         struct ci_power_info *pi = ci_get_pi(adev);
1937         u32 tmp;
1938         int ret;
1939
1940         ret = amdgpu_ci_read_smc_sram_dword(adev,
1941                                      SMU7_FIRMWARE_HEADER_LOCATION +
1942                                      offsetof(SMU7_Firmware_Header, DpmTable),
1943                                      &tmp, pi->sram_end);
1944         if (ret)
1945                 return ret;
1946
1947         pi->dpm_table_start = tmp;
1948
1949         ret = amdgpu_ci_read_smc_sram_dword(adev,
1950                                      SMU7_FIRMWARE_HEADER_LOCATION +
1951                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1952                                      &tmp, pi->sram_end);
1953         if (ret)
1954                 return ret;
1955
1956         pi->soft_regs_start = tmp;
1957
1958         ret = amdgpu_ci_read_smc_sram_dword(adev,
1959                                      SMU7_FIRMWARE_HEADER_LOCATION +
1960                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1961                                      &tmp, pi->sram_end);
1962         if (ret)
1963                 return ret;
1964
1965         pi->mc_reg_table_start = tmp;
1966
1967         ret = amdgpu_ci_read_smc_sram_dword(adev,
1968                                      SMU7_FIRMWARE_HEADER_LOCATION +
1969                                      offsetof(SMU7_Firmware_Header, FanTable),
1970                                      &tmp, pi->sram_end);
1971         if (ret)
1972                 return ret;
1973
1974         pi->fan_table_start = tmp;
1975
1976         ret = amdgpu_ci_read_smc_sram_dword(adev,
1977                                      SMU7_FIRMWARE_HEADER_LOCATION +
1978                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1979                                      &tmp, pi->sram_end);
1980         if (ret)
1981                 return ret;
1982
1983         pi->arb_table_start = tmp;
1984
1985         return 0;
1986 }
1987
1988 static void ci_read_clock_registers(struct amdgpu_device *adev)
1989 {
1990         struct ci_power_info *pi = ci_get_pi(adev);
1991
1992         pi->clock_registers.cg_spll_func_cntl =
1993                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1994         pi->clock_registers.cg_spll_func_cntl_2 =
1995                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1996         pi->clock_registers.cg_spll_func_cntl_3 =
1997                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1998         pi->clock_registers.cg_spll_func_cntl_4 =
1999                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2000         pi->clock_registers.cg_spll_spread_spectrum =
2001                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2002         pi->clock_registers.cg_spll_spread_spectrum_2 =
2003                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2004         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2005         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2006         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2007         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2008         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2009         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2010         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2011         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2012         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2013 }
2014
2015 static void ci_init_sclk_t(struct amdgpu_device *adev)
2016 {
2017         struct ci_power_info *pi = ci_get_pi(adev);
2018
2019         pi->low_sclk_interrupt_t = 0;
2020 }
2021
2022 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2023                                          bool enable)
2024 {
2025         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2026
2027         if (enable)
2028                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2029         else
2030                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2031         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2032 }
2033
2034 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2035 {
2036         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2037
2038         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2039
2040         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2041 }
2042
2043 #if 0
2044 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2045 {
2046
2047         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2048
2049         udelay(25000);
2050
2051         return 0;
2052 }
2053
2054 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2055 {
2056         int i;
2057
2058         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2059
2060         udelay(7000);
2061
2062         for (i = 0; i < adev->usec_timeout; i++) {
2063                 if (RREG32(mmSMC_RESP_0) == 1)
2064                         break;
2065                 udelay(1000);
2066         }
2067
2068         return 0;
2069 }
2070 #endif
2071
2072 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2073                                         bool has_display)
2074 {
2075         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2076
2077         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2078 }
2079
2080 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2081                                       bool enable)
2082 {
2083         struct ci_power_info *pi = ci_get_pi(adev);
2084
2085         if (enable) {
2086                 if (pi->caps_sclk_ds) {
2087                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2088                                 return -EINVAL;
2089                 } else {
2090                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2091                                 return -EINVAL;
2092                 }
2093         } else {
2094                 if (pi->caps_sclk_ds) {
2095                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2096                                 return -EINVAL;
2097                 }
2098         }
2099
2100         return 0;
2101 }
2102
2103 static void ci_program_display_gap(struct amdgpu_device *adev)
2104 {
2105         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2106         u32 pre_vbi_time_in_us;
2107         u32 frame_time_in_us;
2108         u32 ref_clock = adev->clock.spll.reference_freq;
2109         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2110         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2111
2112         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2113         if (adev->pm.dpm.new_active_crtc_count > 0)
2114                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2115         else
2116                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2117         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2118
2119         if (refresh_rate == 0)
2120                 refresh_rate = 60;
2121         if (vblank_time == 0xffffffff)
2122                 vblank_time = 500;
2123         frame_time_in_us = 1000000 / refresh_rate;
2124         pre_vbi_time_in_us =
2125                 frame_time_in_us - 200 - vblank_time;
2126         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2127
2128         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2129         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2130         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2131
2132
2133         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2134
2135 }
2136
2137 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2138 {
2139         struct ci_power_info *pi = ci_get_pi(adev);
2140         u32 tmp;
2141
2142         if (enable) {
2143                 if (pi->caps_sclk_ss_support) {
2144                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2145                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2146                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2147                 }
2148         } else {
2149                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2150                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2151                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2152
2153                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2154                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2155                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2156         }
2157 }
2158
2159 static void ci_program_sstp(struct amdgpu_device *adev)
2160 {
2161         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2162         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2163          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2164 }
2165
2166 static void ci_enable_display_gap(struct amdgpu_device *adev)
2167 {
2168         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2169
2170         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2171                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2172         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2173                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2174
2175         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2176 }
2177
2178 static void ci_program_vc(struct amdgpu_device *adev)
2179 {
2180         u32 tmp;
2181
2182         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2183         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2184         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2185
2186         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2187         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2188         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2189         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2190         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2191         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2192         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2193         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2194 }
2195
2196 static void ci_clear_vc(struct amdgpu_device *adev)
2197 {
2198         u32 tmp;
2199
2200         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2201         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2202         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2203
2204         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2205         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2206         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2207         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2208         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2209         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2210         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2211         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2212 }
2213
2214 static int ci_upload_firmware(struct amdgpu_device *adev)
2215 {
2216         int i, ret;
2217
2218         if (amdgpu_ci_is_smc_running(adev)) {
2219                 DRM_INFO("smc is running, no need to load smc firmware\n");
2220                 return 0;
2221         }
2222
2223         for (i = 0; i < adev->usec_timeout; i++) {
2224                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2225                         break;
2226         }
2227         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2228
2229         amdgpu_ci_stop_smc_clock(adev);
2230         amdgpu_ci_reset_smc(adev);
2231
2232         ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
2233
2234         return ret;
2235
2236 }
2237
2238 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2239                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2240                                      struct atom_voltage_table *voltage_table)
2241 {
2242         u32 i;
2243
2244         if (voltage_dependency_table == NULL)
2245                 return -EINVAL;
2246
2247         voltage_table->mask_low = 0;
2248         voltage_table->phase_delay = 0;
2249
2250         voltage_table->count = voltage_dependency_table->count;
2251         for (i = 0; i < voltage_table->count; i++) {
2252                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2253                 voltage_table->entries[i].smio_low = 0;
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2260 {
2261         struct ci_power_info *pi = ci_get_pi(adev);
2262         int ret;
2263
2264         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2265                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2266                                                         VOLTAGE_OBJ_GPIO_LUT,
2267                                                         &pi->vddc_voltage_table);
2268                 if (ret)
2269                         return ret;
2270         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2271                 ret = ci_get_svi2_voltage_table(adev,
2272                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2273                                                 &pi->vddc_voltage_table);
2274                 if (ret)
2275                         return ret;
2276         }
2277
2278         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2279                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2280                                                          &pi->vddc_voltage_table);
2281
2282         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2283                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2284                                                         VOLTAGE_OBJ_GPIO_LUT,
2285                                                         &pi->vddci_voltage_table);
2286                 if (ret)
2287                         return ret;
2288         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2289                 ret = ci_get_svi2_voltage_table(adev,
2290                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2291                                                 &pi->vddci_voltage_table);
2292                 if (ret)
2293                         return ret;
2294         }
2295
2296         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2297                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2298                                                          &pi->vddci_voltage_table);
2299
2300         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2301                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2302                                                         VOLTAGE_OBJ_GPIO_LUT,
2303                                                         &pi->mvdd_voltage_table);
2304                 if (ret)
2305                         return ret;
2306         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2307                 ret = ci_get_svi2_voltage_table(adev,
2308                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2309                                                 &pi->mvdd_voltage_table);
2310                 if (ret)
2311                         return ret;
2312         }
2313
2314         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2315                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2316                                                          &pi->mvdd_voltage_table);
2317
2318         return 0;
2319 }
2320
2321 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2322                                           struct atom_voltage_table_entry *voltage_table,
2323                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2324 {
2325         int ret;
2326
2327         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2328                                             &smc_voltage_table->StdVoltageHiSidd,
2329                                             &smc_voltage_table->StdVoltageLoSidd);
2330
2331         if (ret) {
2332                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2333                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2334         }
2335
2336         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2337         smc_voltage_table->StdVoltageHiSidd =
2338                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2339         smc_voltage_table->StdVoltageLoSidd =
2340                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2341 }
2342
2343 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2344                                       SMU7_Discrete_DpmTable *table)
2345 {
2346         struct ci_power_info *pi = ci_get_pi(adev);
2347         unsigned int count;
2348
2349         table->VddcLevelCount = pi->vddc_voltage_table.count;
2350         for (count = 0; count < table->VddcLevelCount; count++) {
2351                 ci_populate_smc_voltage_table(adev,
2352                                               &pi->vddc_voltage_table.entries[count],
2353                                               &table->VddcLevel[count]);
2354
2355                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2356                         table->VddcLevel[count].Smio |=
2357                                 pi->vddc_voltage_table.entries[count].smio_low;
2358                 else
2359                         table->VddcLevel[count].Smio = 0;
2360         }
2361         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2362
2363         return 0;
2364 }
2365
2366 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2367                                        SMU7_Discrete_DpmTable *table)
2368 {
2369         unsigned int count;
2370         struct ci_power_info *pi = ci_get_pi(adev);
2371
2372         table->VddciLevelCount = pi->vddci_voltage_table.count;
2373         for (count = 0; count < table->VddciLevelCount; count++) {
2374                 ci_populate_smc_voltage_table(adev,
2375                                               &pi->vddci_voltage_table.entries[count],
2376                                               &table->VddciLevel[count]);
2377
2378                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2379                         table->VddciLevel[count].Smio |=
2380                                 pi->vddci_voltage_table.entries[count].smio_low;
2381                 else
2382                         table->VddciLevel[count].Smio = 0;
2383         }
2384         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2385
2386         return 0;
2387 }
2388
2389 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2390                                       SMU7_Discrete_DpmTable *table)
2391 {
2392         struct ci_power_info *pi = ci_get_pi(adev);
2393         unsigned int count;
2394
2395         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2396         for (count = 0; count < table->MvddLevelCount; count++) {
2397                 ci_populate_smc_voltage_table(adev,
2398                                               &pi->mvdd_voltage_table.entries[count],
2399                                               &table->MvddLevel[count]);
2400
2401                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2402                         table->MvddLevel[count].Smio |=
2403                                 pi->mvdd_voltage_table.entries[count].smio_low;
2404                 else
2405                         table->MvddLevel[count].Smio = 0;
2406         }
2407         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2408
2409         return 0;
2410 }
2411
2412 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2413                                           SMU7_Discrete_DpmTable *table)
2414 {
2415         int ret;
2416
2417         ret = ci_populate_smc_vddc_table(adev, table);
2418         if (ret)
2419                 return ret;
2420
2421         ret = ci_populate_smc_vddci_table(adev, table);
2422         if (ret)
2423                 return ret;
2424
2425         ret = ci_populate_smc_mvdd_table(adev, table);
2426         if (ret)
2427                 return ret;
2428
2429         return 0;
2430 }
2431
2432 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2433                                   SMU7_Discrete_VoltageLevel *voltage)
2434 {
2435         struct ci_power_info *pi = ci_get_pi(adev);
2436         u32 i = 0;
2437
2438         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2439                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2440                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2441                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2442                                 break;
2443                         }
2444                 }
2445
2446                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2447                         return -EINVAL;
2448         }
2449
2450         return -EINVAL;
2451 }
2452
2453 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2454                                          struct atom_voltage_table_entry *voltage_table,
2455                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2456 {
2457         u16 v_index, idx;
2458         bool voltage_found = false;
2459         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2460         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2461
2462         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2463                 return -EINVAL;
2464
2465         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2466                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2467                         if (voltage_table->value ==
2468                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2469                                 voltage_found = true;
2470                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2471                                         idx = v_index;
2472                                 else
2473                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2474                                 *std_voltage_lo_sidd =
2475                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2476                                 *std_voltage_hi_sidd =
2477                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2478                                 break;
2479                         }
2480                 }
2481
2482                 if (!voltage_found) {
2483                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2484                                 if (voltage_table->value <=
2485                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2486                                         voltage_found = true;
2487                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2488                                                 idx = v_index;
2489                                         else
2490                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2491                                         *std_voltage_lo_sidd =
2492                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2493                                         *std_voltage_hi_sidd =
2494                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2495                                         break;
2496                                 }
2497                         }
2498                 }
2499         }
2500
2501         return 0;
2502 }
2503
2504 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2505                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2506                                                   u32 sclk,
2507                                                   u32 *phase_shedding)
2508 {
2509         unsigned int i;
2510
2511         *phase_shedding = 1;
2512
2513         for (i = 0; i < limits->count; i++) {
2514                 if (sclk < limits->entries[i].sclk) {
2515                         *phase_shedding = i;
2516                         break;
2517                 }
2518         }
2519 }
2520
2521 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2522                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2523                                                   u32 mclk,
2524                                                   u32 *phase_shedding)
2525 {
2526         unsigned int i;
2527
2528         *phase_shedding = 1;
2529
2530         for (i = 0; i < limits->count; i++) {
2531                 if (mclk < limits->entries[i].mclk) {
2532                         *phase_shedding = i;
2533                         break;
2534                 }
2535         }
2536 }
2537
2538 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2539 {
2540         struct ci_power_info *pi = ci_get_pi(adev);
2541         u32 tmp;
2542         int ret;
2543
2544         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2545                                      &tmp, pi->sram_end);
2546         if (ret)
2547                 return ret;
2548
2549         tmp &= 0x00FFFFFF;
2550         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2551
2552         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2553                                        tmp, pi->sram_end);
2554 }
2555
2556 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2557                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2558                                          u32 clock, u32 *voltage)
2559 {
2560         u32 i = 0;
2561
2562         if (allowed_clock_voltage_table->count == 0)
2563                 return -EINVAL;
2564
2565         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2566                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2567                         *voltage = allowed_clock_voltage_table->entries[i].v;
2568                         return 0;
2569                 }
2570         }
2571
2572         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2573
2574         return 0;
2575 }
2576
2577 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2578 {
2579         u32 i;
2580         u32 tmp;
2581         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2582
2583         if (sclk < min)
2584                 return 0;
2585
2586         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2587                 tmp = sclk >> i;
2588                 if (tmp >= min || i == 0)
2589                         break;
2590         }
2591
2592         return (u8)i;
2593 }
2594
2595 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2596 {
2597         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2598 }
2599
2600 static int ci_reset_to_default(struct amdgpu_device *adev)
2601 {
2602         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2603                 0 : -EINVAL;
2604 }
2605
2606 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2607 {
2608         u32 tmp;
2609
2610         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2611
2612         if (tmp == MC_CG_ARB_FREQ_F0)
2613                 return 0;
2614
2615         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2616 }
2617
2618 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2619                                         const u32 engine_clock,
2620                                         const u32 memory_clock,
2621                                         u32 *dram_timimg2)
2622 {
2623         bool patch;
2624         u32 tmp, tmp2;
2625
2626         tmp = RREG32(mmMC_SEQ_MISC0);
2627         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2628
2629         if (patch &&
2630             ((adev->pdev->device == 0x67B0) ||
2631              (adev->pdev->device == 0x67B1))) {
2632                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2633                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2634                         *dram_timimg2 &= ~0x00ff0000;
2635                         *dram_timimg2 |= tmp2 << 16;
2636                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2637                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2638                         *dram_timimg2 &= ~0x00ff0000;
2639                         *dram_timimg2 |= tmp2 << 16;
2640                 }
2641         }
2642 }
2643
2644 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2645                                                 u32 sclk,
2646                                                 u32 mclk,
2647                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2648 {
2649         u32 dram_timing;
2650         u32 dram_timing2;
2651         u32 burst_time;
2652
2653         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2654
2655         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2656         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2657         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2658
2659         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2660
2661         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2662         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2663         arb_regs->McArbBurstTime = (u8)burst_time;
2664
2665         return 0;
2666 }
2667
2668 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2669 {
2670         struct ci_power_info *pi = ci_get_pi(adev);
2671         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2672         u32 i, j;
2673         int ret =  0;
2674
2675         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2676
2677         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2678                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2679                         ret = ci_populate_memory_timing_parameters(adev,
2680                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2681                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2682                                                                    &arb_regs.entries[i][j]);
2683                         if (ret)
2684                                 break;
2685                 }
2686         }
2687
2688         if (ret == 0)
2689                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2690                                            pi->arb_table_start,
2691                                            (u8 *)&arb_regs,
2692                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2693                                            pi->sram_end);
2694
2695         return ret;
2696 }
2697
2698 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2699 {
2700         struct ci_power_info *pi = ci_get_pi(adev);
2701
2702         if (pi->need_update_smu7_dpm_table == 0)
2703                 return 0;
2704
2705         return ci_do_program_memory_timing_parameters(adev);
2706 }
2707
2708 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2709                                           struct amdgpu_ps *amdgpu_boot_state)
2710 {
2711         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2712         struct ci_power_info *pi = ci_get_pi(adev);
2713         u32 level = 0;
2714
2715         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2716                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2717                     boot_state->performance_levels[0].sclk) {
2718                         pi->smc_state_table.GraphicsBootLevel = level;
2719                         break;
2720                 }
2721         }
2722
2723         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2724                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2725                     boot_state->performance_levels[0].mclk) {
2726                         pi->smc_state_table.MemoryBootLevel = level;
2727                         break;
2728                 }
2729         }
2730 }
2731
2732 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2733 {
2734         u32 i;
2735         u32 mask_value = 0;
2736
2737         for (i = dpm_table->count; i > 0; i--) {
2738                 mask_value = mask_value << 1;
2739                 if (dpm_table->dpm_levels[i-1].enabled)
2740                         mask_value |= 0x1;
2741                 else
2742                         mask_value &= 0xFFFFFFFE;
2743         }
2744
2745         return mask_value;
2746 }
2747
2748 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2749                                        SMU7_Discrete_DpmTable *table)
2750 {
2751         struct ci_power_info *pi = ci_get_pi(adev);
2752         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2753         u32 i;
2754
2755         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2756                 table->LinkLevel[i].PcieGenSpeed =
2757                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2758                 table->LinkLevel[i].PcieLaneCount =
2759                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2760                 table->LinkLevel[i].EnabledForActivity = 1;
2761                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2762                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2763         }
2764
2765         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2766         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2767                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2768 }
2769
2770 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2771                                      SMU7_Discrete_DpmTable *table)
2772 {
2773         u32 count;
2774         struct atom_clock_dividers dividers;
2775         int ret = -EINVAL;
2776
2777         table->UvdLevelCount =
2778                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2779
2780         for (count = 0; count < table->UvdLevelCount; count++) {
2781                 table->UvdLevel[count].VclkFrequency =
2782                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2783                 table->UvdLevel[count].DclkFrequency =
2784                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2785                 table->UvdLevel[count].MinVddc =
2786                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2787                 table->UvdLevel[count].MinVddcPhases = 1;
2788
2789                 ret = amdgpu_atombios_get_clock_dividers(adev,
2790                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2791                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2792                 if (ret)
2793                         return ret;
2794
2795                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2796
2797                 ret = amdgpu_atombios_get_clock_dividers(adev,
2798                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2799                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2800                 if (ret)
2801                         return ret;
2802
2803                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2804
2805                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2806                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2807                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2808         }
2809
2810         return ret;
2811 }
2812
2813 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2814                                      SMU7_Discrete_DpmTable *table)
2815 {
2816         u32 count;
2817         struct atom_clock_dividers dividers;
2818         int ret = -EINVAL;
2819
2820         table->VceLevelCount =
2821                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2822
2823         for (count = 0; count < table->VceLevelCount; count++) {
2824                 table->VceLevel[count].Frequency =
2825                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2826                 table->VceLevel[count].MinVoltage =
2827                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2828                 table->VceLevel[count].MinPhases = 1;
2829
2830                 ret = amdgpu_atombios_get_clock_dividers(adev,
2831                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2832                                                          table->VceLevel[count].Frequency, false, &dividers);
2833                 if (ret)
2834                         return ret;
2835
2836                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2837
2838                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2839                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2840         }
2841
2842         return ret;
2843
2844 }
2845
2846 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2847                                      SMU7_Discrete_DpmTable *table)
2848 {
2849         u32 count;
2850         struct atom_clock_dividers dividers;
2851         int ret = -EINVAL;
2852
2853         table->AcpLevelCount = (u8)
2854                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2855
2856         for (count = 0; count < table->AcpLevelCount; count++) {
2857                 table->AcpLevel[count].Frequency =
2858                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2859                 table->AcpLevel[count].MinVoltage =
2860                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2861                 table->AcpLevel[count].MinPhases = 1;
2862
2863                 ret = amdgpu_atombios_get_clock_dividers(adev,
2864                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2865                                                          table->AcpLevel[count].Frequency, false, &dividers);
2866                 if (ret)
2867                         return ret;
2868
2869                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2870
2871                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2872                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2873         }
2874
2875         return ret;
2876 }
2877
2878 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2879                                       SMU7_Discrete_DpmTable *table)
2880 {
2881         u32 count;
2882         struct atom_clock_dividers dividers;
2883         int ret = -EINVAL;
2884
2885         table->SamuLevelCount =
2886                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2887
2888         for (count = 0; count < table->SamuLevelCount; count++) {
2889                 table->SamuLevel[count].Frequency =
2890                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2891                 table->SamuLevel[count].MinVoltage =
2892                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2893                 table->SamuLevel[count].MinPhases = 1;
2894
2895                 ret = amdgpu_atombios_get_clock_dividers(adev,
2896                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2897                                                          table->SamuLevel[count].Frequency, false, &dividers);
2898                 if (ret)
2899                         return ret;
2900
2901                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2902
2903                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2904                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2905         }
2906
2907         return ret;
2908 }
2909
2910 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2911                                     u32 memory_clock,
2912                                     SMU7_Discrete_MemoryLevel *mclk,
2913                                     bool strobe_mode,
2914                                     bool dll_state_on)
2915 {
2916         struct ci_power_info *pi = ci_get_pi(adev);
2917         u32  dll_cntl = pi->clock_registers.dll_cntl;
2918         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2919         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2920         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2921         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2922         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2923         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2924         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2925         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2926         struct atom_mpll_param mpll_param;
2927         int ret;
2928
2929         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2930         if (ret)
2931                 return ret;
2932
2933         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2934         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2935
2936         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2937                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2938         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2939                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2940                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2941
2942         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2943         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2944
2945         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2946                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2947                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2948                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2949                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2950         }
2951
2952         if (pi->caps_mclk_ss_support) {
2953                 struct amdgpu_atom_ss ss;
2954                 u32 freq_nom;
2955                 u32 tmp;
2956                 u32 reference_clock = adev->clock.mpll.reference_freq;
2957
2958                 if (mpll_param.qdr == 1)
2959                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2960                 else
2961                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2962
2963                 tmp = (freq_nom / reference_clock);
2964                 tmp = tmp * tmp;
2965                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2966                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2967                         u32 clks = reference_clock * 5 / ss.rate;
2968                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2969
2970                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2971                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2972
2973                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2974                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2975                 }
2976         }
2977
2978         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2979         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2980
2981         if (dll_state_on)
2982                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2983                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2984         else
2985                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2986                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2987
2988         mclk->MclkFrequency = memory_clock;
2989         mclk->MpllFuncCntl = mpll_func_cntl;
2990         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2991         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2992         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2993         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2994         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2995         mclk->DllCntl = dll_cntl;
2996         mclk->MpllSs1 = mpll_ss1;
2997         mclk->MpllSs2 = mpll_ss2;
2998
2999         return 0;
3000 }
3001
3002 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3003                                            u32 memory_clock,
3004                                            SMU7_Discrete_MemoryLevel *memory_level)
3005 {
3006         struct ci_power_info *pi = ci_get_pi(adev);
3007         int ret;
3008         bool dll_state_on;
3009
3010         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3011                 ret = ci_get_dependency_volt_by_clk(adev,
3012                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3013                                                     memory_clock, &memory_level->MinVddc);
3014                 if (ret)
3015                         return ret;
3016         }
3017
3018         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3019                 ret = ci_get_dependency_volt_by_clk(adev,
3020                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3021                                                     memory_clock, &memory_level->MinVddci);
3022                 if (ret)
3023                         return ret;
3024         }
3025
3026         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3027                 ret = ci_get_dependency_volt_by_clk(adev,
3028                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3029                                                     memory_clock, &memory_level->MinMvdd);
3030                 if (ret)
3031                         return ret;
3032         }
3033
3034         memory_level->MinVddcPhases = 1;
3035
3036         if (pi->vddc_phase_shed_control)
3037                 ci_populate_phase_value_based_on_mclk(adev,
3038                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3039                                                       memory_clock,
3040                                                       &memory_level->MinVddcPhases);
3041
3042         memory_level->EnabledForActivity = 1;
3043         memory_level->EnabledForThrottle = 1;
3044         memory_level->UpH = 0;
3045         memory_level->DownH = 100;
3046         memory_level->VoltageDownH = 0;
3047         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3048
3049         memory_level->StutterEnable = false;
3050         memory_level->StrobeEnable = false;
3051         memory_level->EdcReadEnable = false;
3052         memory_level->EdcWriteEnable = false;
3053         memory_level->RttEnable = false;
3054
3055         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3056
3057         if (pi->mclk_stutter_mode_threshold &&
3058             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3059             (!pi->uvd_enabled) &&
3060             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3061             (adev->pm.dpm.new_active_crtc_count <= 2))
3062                 memory_level->StutterEnable = true;
3063
3064         if (pi->mclk_strobe_mode_threshold &&
3065             (memory_clock <= pi->mclk_strobe_mode_threshold))
3066                 memory_level->StrobeEnable = 1;
3067
3068         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3069                 memory_level->StrobeRatio =
3070                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3071                 if (pi->mclk_edc_enable_threshold &&
3072                     (memory_clock > pi->mclk_edc_enable_threshold))
3073                         memory_level->EdcReadEnable = true;
3074
3075                 if (pi->mclk_edc_wr_enable_threshold &&
3076                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3077                         memory_level->EdcWriteEnable = true;
3078
3079                 if (memory_level->StrobeEnable) {
3080                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3081                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3082                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3083                         else
3084                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3085                 } else {
3086                         dll_state_on = pi->dll_default_on;
3087                 }
3088         } else {
3089                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3090                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3091         }
3092
3093         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3094         if (ret)
3095                 return ret;
3096
3097         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3098         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3099         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3100         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3101
3102         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3103         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3104         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3105         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3106         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3107         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3108         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3109         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3110         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3111         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3112         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3113
3114         return 0;
3115 }
3116
3117 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3118                                       SMU7_Discrete_DpmTable *table)
3119 {
3120         struct ci_power_info *pi = ci_get_pi(adev);
3121         struct atom_clock_dividers dividers;
3122         SMU7_Discrete_VoltageLevel voltage_level;
3123         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3124         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3125         u32 dll_cntl = pi->clock_registers.dll_cntl;
3126         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3127         int ret;
3128
3129         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3130
3131         if (pi->acpi_vddc)
3132                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3133         else
3134                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3135
3136         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3137
3138         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3139
3140         ret = amdgpu_atombios_get_clock_dividers(adev,
3141                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3142                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3143         if (ret)
3144                 return ret;
3145
3146         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3147         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3148         table->ACPILevel.DeepSleepDivId = 0;
3149
3150         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3151         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3152
3153         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3154         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3155
3156         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3157         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3158         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3159         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3160         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3161         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3162         table->ACPILevel.CcPwrDynRm = 0;
3163         table->ACPILevel.CcPwrDynRm1 = 0;
3164
3165         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3166         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3167         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3168         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3169         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3170         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3171         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3172         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3173         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3174         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3175         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3176
3177         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3178         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3179
3180         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3181                 if (pi->acpi_vddci)
3182                         table->MemoryACPILevel.MinVddci =
3183                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3184                 else
3185                         table->MemoryACPILevel.MinVddci =
3186                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3187         }
3188
3189         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3190                 table->MemoryACPILevel.MinMvdd = 0;
3191         else
3192                 table->MemoryACPILevel.MinMvdd =
3193                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3194
3195         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3196                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3197         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3198                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3199
3200         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3201
3202         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3203         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3204         table->MemoryACPILevel.MpllAdFuncCntl =
3205                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3206         table->MemoryACPILevel.MpllDqFuncCntl =
3207                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3208         table->MemoryACPILevel.MpllFuncCntl =
3209                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3210         table->MemoryACPILevel.MpllFuncCntl_1 =
3211                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3212         table->MemoryACPILevel.MpllFuncCntl_2 =
3213                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3214         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3215         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3216
3217         table->MemoryACPILevel.EnabledForThrottle = 0;
3218         table->MemoryACPILevel.EnabledForActivity = 0;
3219         table->MemoryACPILevel.UpH = 0;
3220         table->MemoryACPILevel.DownH = 100;
3221         table->MemoryACPILevel.VoltageDownH = 0;
3222         table->MemoryACPILevel.ActivityLevel =
3223                 cpu_to_be16((u16)pi->mclk_activity_target);
3224
3225         table->MemoryACPILevel.StutterEnable = false;
3226         table->MemoryACPILevel.StrobeEnable = false;
3227         table->MemoryACPILevel.EdcReadEnable = false;
3228         table->MemoryACPILevel.EdcWriteEnable = false;
3229         table->MemoryACPILevel.RttEnable = false;
3230
3231         return 0;
3232 }
3233
3234
3235 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3236 {
3237         struct ci_power_info *pi = ci_get_pi(adev);
3238         struct ci_ulv_parm *ulv = &pi->ulv;
3239
3240         if (ulv->supported) {
3241                 if (enable)
3242                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3243                                 0 : -EINVAL;
3244                 else
3245                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3246                                 0 : -EINVAL;
3247         }
3248
3249         return 0;
3250 }
3251
3252 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3253                                  SMU7_Discrete_Ulv *state)
3254 {
3255         struct ci_power_info *pi = ci_get_pi(adev);
3256         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3257
3258         state->CcPwrDynRm = 0;
3259         state->CcPwrDynRm1 = 0;
3260
3261         if (ulv_voltage == 0) {
3262                 pi->ulv.supported = false;
3263                 return 0;
3264         }
3265
3266         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3267                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3268                         state->VddcOffset = 0;
3269                 else
3270                         state->VddcOffset =
3271                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3272         } else {
3273                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3274                         state->VddcOffsetVid = 0;
3275                 else
3276                         state->VddcOffsetVid = (u8)
3277                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3278                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3279         }
3280         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3281
3282         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3283         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3284         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3285
3286         return 0;
3287 }
3288
3289 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3290                                     u32 engine_clock,
3291                                     SMU7_Discrete_GraphicsLevel *sclk)
3292 {
3293         struct ci_power_info *pi = ci_get_pi(adev);
3294         struct atom_clock_dividers dividers;
3295         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3296         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3297         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3298         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3299         u32 reference_clock = adev->clock.spll.reference_freq;
3300         u32 reference_divider;
3301         u32 fbdiv;
3302         int ret;
3303
3304         ret = amdgpu_atombios_get_clock_dividers(adev,
3305                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3306                                                  engine_clock, false, &dividers);
3307         if (ret)
3308                 return ret;
3309
3310         reference_divider = 1 + dividers.ref_div;
3311         fbdiv = dividers.fb_div & 0x3FFFFFF;
3312
3313         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3314         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3315         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3316
3317         if (pi->caps_sclk_ss_support) {
3318                 struct amdgpu_atom_ss ss;
3319                 u32 vco_freq = engine_clock * dividers.post_div;
3320
3321                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3322                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3323                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3324                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3325
3326                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3327                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3328                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3329
3330                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3331                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3332                 }
3333         }
3334
3335         sclk->SclkFrequency = engine_clock;
3336         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3337         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3338         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3339         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3340         sclk->SclkDid = (u8)dividers.post_divider;
3341
3342         return 0;
3343 }
3344
3345 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3346                                             u32 engine_clock,
3347                                             u16 sclk_activity_level_t,
3348                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3349 {
3350         struct ci_power_info *pi = ci_get_pi(adev);
3351         int ret;
3352
3353         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3354         if (ret)
3355                 return ret;
3356
3357         ret = ci_get_dependency_volt_by_clk(adev,
3358                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3359                                             engine_clock, &graphic_level->MinVddc);
3360         if (ret)
3361                 return ret;
3362
3363         graphic_level->SclkFrequency = engine_clock;
3364
3365         graphic_level->Flags =  0;
3366         graphic_level->MinVddcPhases = 1;
3367
3368         if (pi->vddc_phase_shed_control)
3369                 ci_populate_phase_value_based_on_sclk(adev,
3370                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3371                                                       engine_clock,
3372                                                       &graphic_level->MinVddcPhases);
3373
3374         graphic_level->ActivityLevel = sclk_activity_level_t;
3375
3376         graphic_level->CcPwrDynRm = 0;
3377         graphic_level->CcPwrDynRm1 = 0;
3378         graphic_level->EnabledForThrottle = 1;
3379         graphic_level->UpH = 0;
3380         graphic_level->DownH = 0;
3381         graphic_level->VoltageDownH = 0;
3382         graphic_level->PowerThrottle = 0;
3383
3384         if (pi->caps_sclk_ds)
3385                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3386                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3387
3388         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3389
3390         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3391         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3392         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3393         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3394         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3395         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3396         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3397         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3398         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3399         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3400         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3401
3402         return 0;
3403 }
3404
3405 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3406 {
3407         struct ci_power_info *pi = ci_get_pi(adev);
3408         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3409         u32 level_array_address = pi->dpm_table_start +
3410                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3411         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3412                 SMU7_MAX_LEVELS_GRAPHICS;
3413         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3414         u32 i, ret;
3415
3416         memset(levels, 0, level_array_size);
3417
3418         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3419                 ret = ci_populate_single_graphic_level(adev,
3420                                                        dpm_table->sclk_table.dpm_levels[i].value,
3421                                                        (u16)pi->activity_target[i],
3422                                                        &pi->smc_state_table.GraphicsLevel[i]);
3423                 if (ret)
3424                         return ret;
3425                 if (i > 1)
3426                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3427                 if (i == (dpm_table->sclk_table.count - 1))
3428                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3429                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3430         }
3431         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3432
3433         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3434         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3435                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3436
3437         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3438                                    (u8 *)levels, level_array_size,
3439                                    pi->sram_end);
3440         if (ret)
3441                 return ret;
3442
3443         return 0;
3444 }
3445
3446 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3447                                  SMU7_Discrete_Ulv *ulv_level)
3448 {
3449         return ci_populate_ulv_level(adev, ulv_level);
3450 }
3451
3452 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3453 {
3454         struct ci_power_info *pi = ci_get_pi(adev);
3455         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3456         u32 level_array_address = pi->dpm_table_start +
3457                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3458         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3459                 SMU7_MAX_LEVELS_MEMORY;
3460         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3461         u32 i, ret;
3462
3463         memset(levels, 0, level_array_size);
3464
3465         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3466                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3467                         return -EINVAL;
3468                 ret = ci_populate_single_memory_level(adev,
3469                                                       dpm_table->mclk_table.dpm_levels[i].value,
3470                                                       &pi->smc_state_table.MemoryLevel[i]);
3471                 if (ret)
3472                         return ret;
3473         }
3474
3475         if ((dpm_table->mclk_table.count >= 2) &&
3476             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3477                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3478                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3479                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3480                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3481         }
3482
3483         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3484
3485         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3486         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3487                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3488
3489         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3490                 PPSMC_DISPLAY_WATERMARK_HIGH;
3491
3492         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3493                                    (u8 *)levels, level_array_size,
3494                                    pi->sram_end);
3495         if (ret)
3496                 return ret;
3497
3498         return 0;
3499 }
3500
3501 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3502                                       struct ci_single_dpm_table* dpm_table,
3503                                       u32 count)
3504 {
3505         u32 i;
3506
3507         dpm_table->count = count;
3508         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3509                 dpm_table->dpm_levels[i].enabled = false;
3510 }
3511
3512 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3513                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3514 {
3515         dpm_table->dpm_levels[index].value = pcie_gen;
3516         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3517         dpm_table->dpm_levels[index].enabled = true;
3518 }
3519
3520 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3521 {
3522         struct ci_power_info *pi = ci_get_pi(adev);
3523
3524         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3525                 return -EINVAL;
3526
3527         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3528                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3529                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3530         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3531                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3532                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3533         }
3534
3535         ci_reset_single_dpm_table(adev,
3536                                   &pi->dpm_table.pcie_speed_table,
3537                                   SMU7_MAX_LEVELS_LINK);
3538
3539         if (adev->asic_type == CHIP_BONAIRE)
3540                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3541                                           pi->pcie_gen_powersaving.min,
3542                                           pi->pcie_lane_powersaving.max);
3543         else
3544                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3545                                           pi->pcie_gen_powersaving.min,
3546                                           pi->pcie_lane_powersaving.min);
3547         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3548                                   pi->pcie_gen_performance.min,
3549                                   pi->pcie_lane_performance.min);
3550         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3551                                   pi->pcie_gen_powersaving.min,
3552                                   pi->pcie_lane_powersaving.max);
3553         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3554                                   pi->pcie_gen_performance.min,
3555                                   pi->pcie_lane_performance.max);
3556         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3557                                   pi->pcie_gen_powersaving.max,
3558                                   pi->pcie_lane_powersaving.max);
3559         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3560                                   pi->pcie_gen_performance.max,
3561                                   pi->pcie_lane_performance.max);
3562
3563         pi->dpm_table.pcie_speed_table.count = 6;
3564
3565         return 0;
3566 }
3567
3568 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3569 {
3570         struct ci_power_info *pi = ci_get_pi(adev);
3571         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3572                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3573         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3574                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3575         struct amdgpu_cac_leakage_table *std_voltage_table =
3576                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3577         u32 i;
3578
3579         if (allowed_sclk_vddc_table == NULL)
3580                 return -EINVAL;
3581         if (allowed_sclk_vddc_table->count < 1)
3582                 return -EINVAL;
3583         if (allowed_mclk_table == NULL)
3584                 return -EINVAL;
3585         if (allowed_mclk_table->count < 1)
3586                 return -EINVAL;
3587
3588         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3589
3590         ci_reset_single_dpm_table(adev,
3591                                   &pi->dpm_table.sclk_table,
3592                                   SMU7_MAX_LEVELS_GRAPHICS);
3593         ci_reset_single_dpm_table(adev,
3594                                   &pi->dpm_table.mclk_table,
3595                                   SMU7_MAX_LEVELS_MEMORY);
3596         ci_reset_single_dpm_table(adev,
3597                                   &pi->dpm_table.vddc_table,
3598                                   SMU7_MAX_LEVELS_VDDC);
3599         ci_reset_single_dpm_table(adev,
3600                                   &pi->dpm_table.vddci_table,
3601                                   SMU7_MAX_LEVELS_VDDCI);
3602         ci_reset_single_dpm_table(adev,
3603                                   &pi->dpm_table.mvdd_table,
3604                                   SMU7_MAX_LEVELS_MVDD);
3605
3606         pi->dpm_table.sclk_table.count = 0;
3607         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3608                 if ((i == 0) ||
3609                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3610                      allowed_sclk_vddc_table->entries[i].clk)) {
3611                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3612                                 allowed_sclk_vddc_table->entries[i].clk;
3613                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3614                                 (i == 0) ? true : false;
3615                         pi->dpm_table.sclk_table.count++;
3616                 }
3617         }
3618
3619         pi->dpm_table.mclk_table.count = 0;
3620         for (i = 0; i < allowed_mclk_table->count; i++) {
3621                 if ((i == 0) ||
3622                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3623                      allowed_mclk_table->entries[i].clk)) {
3624                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3625                                 allowed_mclk_table->entries[i].clk;
3626                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3627                                 (i == 0) ? true : false;
3628                         pi->dpm_table.mclk_table.count++;
3629                 }
3630         }
3631
3632         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3633                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3634                         allowed_sclk_vddc_table->entries[i].v;
3635                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3636                         std_voltage_table->entries[i].leakage;
3637                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3638         }
3639         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3640
3641         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3642         if (allowed_mclk_table) {
3643                 for (i = 0; i < allowed_mclk_table->count; i++) {
3644                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3645                                 allowed_mclk_table->entries[i].v;
3646                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3647                 }
3648                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3649         }
3650
3651         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3652         if (allowed_mclk_table) {
3653                 for (i = 0; i < allowed_mclk_table->count; i++) {
3654                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3655                                 allowed_mclk_table->entries[i].v;
3656                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3657                 }
3658                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3659         }
3660
3661         ci_setup_default_pcie_tables(adev);
3662
3663         /* save a copy of the default DPM table */
3664         memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3665                         sizeof(struct ci_dpm_table));
3666
3667         return 0;
3668 }
3669
3670 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3671                               u32 value, u32 *boot_level)
3672 {
3673         u32 i;
3674         int ret = -EINVAL;
3675
3676         for(i = 0; i < table->count; i++) {
3677                 if (value == table->dpm_levels[i].value) {
3678                         *boot_level = i;
3679                         ret = 0;
3680                 }
3681         }
3682
3683         return ret;
3684 }
3685
3686 static void ci_save_default_power_profile(struct amdgpu_device *adev)
3687 {
3688         struct ci_power_info *pi = ci_get_pi(adev);
3689         struct SMU7_Discrete_GraphicsLevel *levels =
3690                                 pi->smc_state_table.GraphicsLevel;
3691         uint32_t min_level = 0;
3692
3693         pi->default_gfx_power_profile.activity_threshold =
3694                         be16_to_cpu(levels[0].ActivityLevel);
3695         pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
3696         pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
3697         pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
3698
3699         pi->default_compute_power_profile = pi->default_gfx_power_profile;
3700         pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
3701
3702         /* Optimize compute power profile: Use only highest
3703          * 2 power levels (if more than 2 are available), Hysteresis:
3704          * 0ms up, 5ms down
3705          */
3706         if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
3707                 min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
3708         else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
3709                 min_level = 1;
3710         pi->default_compute_power_profile.min_sclk =
3711                         be32_to_cpu(levels[min_level].SclkFrequency);
3712
3713         pi->default_compute_power_profile.up_hyst = 0;
3714         pi->default_compute_power_profile.down_hyst = 5;
3715
3716         pi->gfx_power_profile = pi->default_gfx_power_profile;
3717         pi->compute_power_profile = pi->default_compute_power_profile;
3718 }
3719
3720 static int ci_init_smc_table(struct amdgpu_device *adev)
3721 {
3722         struct ci_power_info *pi = ci_get_pi(adev);
3723         struct ci_ulv_parm *ulv = &pi->ulv;
3724         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3725         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3726         int ret;
3727
3728         ret = ci_setup_default_dpm_tables(adev);
3729         if (ret)
3730                 return ret;
3731
3732         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3733                 ci_populate_smc_voltage_tables(adev, table);
3734
3735         ci_init_fps_limits(adev);
3736
3737         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3738                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3739
3740         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3741                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3742
3743         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3744                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3745
3746         if (ulv->supported) {
3747                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3748                 if (ret)
3749                         return ret;
3750                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3751         }
3752
3753         ret = ci_populate_all_graphic_levels(adev);
3754         if (ret)
3755                 return ret;
3756
3757         ret = ci_populate_all_memory_levels(adev);
3758         if (ret)
3759                 return ret;
3760
3761         ci_populate_smc_link_level(adev, table);
3762
3763         ret = ci_populate_smc_acpi_level(adev, table);
3764         if (ret)
3765                 return ret;
3766
3767         ret = ci_populate_smc_vce_level(adev, table);
3768         if (ret)
3769                 return ret;
3770
3771         ret = ci_populate_smc_acp_level(adev, table);
3772         if (ret)
3773                 return ret;
3774
3775         ret = ci_populate_smc_samu_level(adev, table);
3776         if (ret)
3777                 return ret;
3778
3779         ret = ci_do_program_memory_timing_parameters(adev);
3780         if (ret)
3781                 return ret;
3782
3783         ret = ci_populate_smc_uvd_level(adev, table);
3784         if (ret)
3785                 return ret;
3786
3787         table->UvdBootLevel  = 0;
3788         table->VceBootLevel  = 0;
3789         table->AcpBootLevel  = 0;
3790         table->SamuBootLevel  = 0;
3791         table->GraphicsBootLevel  = 0;
3792         table->MemoryBootLevel  = 0;
3793
3794         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3795                                  pi->vbios_boot_state.sclk_bootup_value,
3796                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3797
3798         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3799                                  pi->vbios_boot_state.mclk_bootup_value,
3800                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3801
3802         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3803         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3804         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3805
3806         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3807
3808         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3809         if (ret)
3810                 return ret;
3811
3812         table->UVDInterval = 1;
3813         table->VCEInterval = 1;
3814         table->ACPInterval = 1;
3815         table->SAMUInterval = 1;
3816         table->GraphicsVoltageChangeEnable = 1;
3817         table->GraphicsThermThrottleEnable = 1;
3818         table->GraphicsInterval = 1;
3819         table->VoltageInterval = 1;
3820         table->ThermalInterval = 1;
3821         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3822                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3823         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3824                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3825         table->MemoryVoltageChangeEnable = 1;
3826         table->MemoryInterval = 1;
3827         table->VoltageResponseTime = 0;
3828         table->VddcVddciDelta = 4000;
3829         table->PhaseResponseTime = 0;
3830         table->MemoryThermThrottleEnable = 1;
3831         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3832         table->PCIeGenInterval = 1;
3833         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3834                 table->SVI2Enable  = 1;
3835         else
3836                 table->SVI2Enable  = 0;
3837
3838         table->ThermGpio = 17;
3839         table->SclkStepSize = 0x4000;
3840
3841         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3842         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3843         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3844         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3845         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3846         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3847         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3848         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3849         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3850         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3851         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3852         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3853         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3854         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3855
3856         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3857                                    pi->dpm_table_start +
3858                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3859                                    (u8 *)&table->SystemFlags,
3860                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3861                                    pi->sram_end);
3862         if (ret)
3863                 return ret;
3864
3865         ci_save_default_power_profile(adev);
3866
3867         return 0;
3868 }
3869
3870 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3871                                       struct ci_single_dpm_table *dpm_table,
3872                                       u32 low_limit, u32 high_limit)
3873 {
3874         u32 i;
3875
3876         for (i = 0; i < dpm_table->count; i++) {
3877                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3878                     (dpm_table->dpm_levels[i].value > high_limit))
3879                         dpm_table->dpm_levels[i].enabled = false;
3880                 else
3881                         dpm_table->dpm_levels[i].enabled = true;
3882         }
3883 }
3884
3885 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3886                                     u32 speed_low, u32 lanes_low,
3887                                     u32 speed_high, u32 lanes_high)
3888 {
3889         struct ci_power_info *pi = ci_get_pi(adev);
3890         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3891         u32 i, j;
3892
3893         for (i = 0; i < pcie_table->count; i++) {
3894                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3895                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3896                     (pcie_table->dpm_levels[i].value > speed_high) ||
3897                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3898                         pcie_table->dpm_levels[i].enabled = false;
3899                 else
3900                         pcie_table->dpm_levels[i].enabled = true;
3901         }
3902
3903         for (i = 0; i < pcie_table->count; i++) {
3904                 if (pcie_table->dpm_levels[i].enabled) {
3905                         for (j = i + 1; j < pcie_table->count; j++) {
3906                                 if (pcie_table->dpm_levels[j].enabled) {
3907                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3908                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3909                                                 pcie_table->dpm_levels[j].enabled = false;
3910                                 }
3911                         }
3912                 }
3913         }
3914 }
3915
3916 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3917                               struct amdgpu_ps *amdgpu_state)
3918 {
3919         struct ci_ps *state = ci_get_ps(amdgpu_state);
3920         struct ci_power_info *pi = ci_get_pi(adev);
3921         u32 high_limit_count;
3922
3923         if (state->performance_level_count < 1)
3924                 return -EINVAL;
3925
3926         if (state->performance_level_count == 1)
3927                 high_limit_count = 0;
3928         else
3929                 high_limit_count = 1;
3930
3931         ci_trim_single_dpm_states(adev,
3932                                   &pi->dpm_table.sclk_table,
3933                                   state->performance_levels[0].sclk,
3934                                   state->performance_levels[high_limit_count].sclk);
3935
3936         ci_trim_single_dpm_states(adev,
3937                                   &pi->dpm_table.mclk_table,
3938                                   state->performance_levels[0].mclk,
3939                                   state->performance_levels[high_limit_count].mclk);
3940
3941         ci_trim_pcie_dpm_states(adev,
3942                                 state->performance_levels[0].pcie_gen,
3943                                 state->performance_levels[0].pcie_lane,
3944                                 state->performance_levels[high_limit_count].pcie_gen,
3945                                 state->performance_levels[high_limit_count].pcie_lane);
3946
3947         return 0;
3948 }
3949
3950 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3951 {
3952         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3953                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3954         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3955                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3956         u32 requested_voltage = 0;
3957         u32 i;
3958
3959         if (disp_voltage_table == NULL)
3960                 return -EINVAL;
3961         if (!disp_voltage_table->count)
3962                 return -EINVAL;
3963
3964         for (i = 0; i < disp_voltage_table->count; i++) {
3965                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3966                         requested_voltage = disp_voltage_table->entries[i].v;
3967         }
3968
3969         for (i = 0; i < vddc_table->count; i++) {
3970                 if (requested_voltage <= vddc_table->entries[i].v) {
3971                         requested_voltage = vddc_table->entries[i].v;
3972                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3973                                                                   PPSMC_MSG_VddC_Request,
3974                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3975                                 0 : -EINVAL;
3976                 }
3977         }
3978
3979         return -EINVAL;
3980 }
3981
3982 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3983 {
3984         struct ci_power_info *pi = ci_get_pi(adev);
3985         PPSMC_Result result;
3986
3987         ci_apply_disp_minimum_voltage_request(adev);
3988
3989         if (!pi->sclk_dpm_key_disabled) {
3990                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3991                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3992                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3993                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3994                         if (result != PPSMC_Result_OK)
3995                                 return -EINVAL;
3996                 }
3997         }
3998
3999         if (!pi->mclk_dpm_key_disabled) {
4000                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4001                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4002                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
4003                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4004                         if (result != PPSMC_Result_OK)
4005                                 return -EINVAL;
4006                 }
4007         }
4008
4009 #if 0
4010         if (!pi->pcie_dpm_key_disabled) {
4011                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4012                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4013                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
4014                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4015                         if (result != PPSMC_Result_OK)
4016                                 return -EINVAL;
4017                 }
4018         }
4019 #endif
4020
4021         return 0;
4022 }
4023
4024 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4025                                                    struct amdgpu_ps *amdgpu_state)
4026 {
4027         struct ci_power_info *pi = ci_get_pi(adev);
4028         struct ci_ps *state = ci_get_ps(amdgpu_state);
4029         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4030         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4031         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4032         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4033         u32 i;
4034
4035         pi->need_update_smu7_dpm_table = 0;
4036
4037         for (i = 0; i < sclk_table->count; i++) {
4038                 if (sclk == sclk_table->dpm_levels[i].value)
4039                         break;
4040         }
4041
4042         if (i >= sclk_table->count) {
4043                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4044         } else {
4045                 /* XXX check display min clock requirements */
4046                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4047                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4048         }
4049
4050         for (i = 0; i < mclk_table->count; i++) {
4051                 if (mclk == mclk_table->dpm_levels[i].value)
4052                         break;
4053         }
4054
4055         if (i >= mclk_table->count)
4056                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4057
4058         if (adev->pm.dpm.current_active_crtc_count !=
4059             adev->pm.dpm.new_active_crtc_count)
4060                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4061 }
4062
4063 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4064                                                        struct amdgpu_ps *amdgpu_state)
4065 {
4066         struct ci_power_info *pi = ci_get_pi(adev);
4067         struct ci_ps *state = ci_get_ps(amdgpu_state);
4068         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4069         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4070         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4071         int ret;
4072
4073         if (!pi->need_update_smu7_dpm_table)
4074                 return 0;
4075
4076         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4077                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4078
4079         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4080                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4081
4082         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4083                 ret = ci_populate_all_graphic_levels(adev);
4084                 if (ret)
4085                         return ret;
4086         }
4087
4088         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4089                 ret = ci_populate_all_memory_levels(adev);
4090                 if (ret)
4091                         return ret;
4092         }
4093
4094         return 0;
4095 }
4096
4097 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4098 {
4099         struct ci_power_info *pi = ci_get_pi(adev);
4100         const struct amdgpu_clock_and_voltage_limits *max_limits;
4101         int i;
4102
4103         if (adev->pm.dpm.ac_power)
4104                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4105         else
4106                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4107
4108         if (enable) {
4109                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4110
4111                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4112                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4113                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4114
4115                                 if (!pi->caps_uvd_dpm)
4116                                         break;
4117                         }
4118                 }
4119
4120                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4121                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4122                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4123
4124                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4125                         pi->uvd_enabled = true;
4126                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4127                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4128                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4129                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4130                 }
4131         } else {
4132                 if (pi->uvd_enabled) {
4133                         pi->uvd_enabled = false;
4134                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4135                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4136                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4137                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4138                 }
4139         }
4140
4141         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4142                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4143                 0 : -EINVAL;
4144 }
4145
4146 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4147 {
4148         struct ci_power_info *pi = ci_get_pi(adev);
4149         const struct amdgpu_clock_and_voltage_limits *max_limits;
4150         int i;
4151
4152         if (adev->pm.dpm.ac_power)
4153                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4154         else
4155                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4156
4157         if (enable) {
4158                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4159                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4160                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4161                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4162
4163                                 if (!pi->caps_vce_dpm)
4164                                         break;
4165                         }
4166                 }
4167
4168                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4169                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4170                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4171         }
4172
4173         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4174                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4175                 0 : -EINVAL;
4176 }
4177
4178 #if 0
4179 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4180 {
4181         struct ci_power_info *pi = ci_get_pi(adev);
4182         const struct amdgpu_clock_and_voltage_limits *max_limits;
4183         int i;
4184
4185         if (adev->pm.dpm.ac_power)
4186                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4187         else
4188                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4189
4190         if (enable) {
4191                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4192                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4193                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4194                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4195
4196                                 if (!pi->caps_samu_dpm)
4197                                         break;
4198                         }
4199                 }
4200
4201                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4202                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4203                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4204         }
4205         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4206                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4207                 0 : -EINVAL;
4208 }
4209
4210 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4211 {
4212         struct ci_power_info *pi = ci_get_pi(adev);
4213         const struct amdgpu_clock_and_voltage_limits *max_limits;
4214         int i;
4215
4216         if (adev->pm.dpm.ac_power)
4217                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4218         else
4219                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4220
4221         if (enable) {
4222                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4223                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4224                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4225                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4226
4227                                 if (!pi->caps_acp_dpm)
4228                                         break;
4229                         }
4230                 }
4231
4232                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4233                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4234                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4235         }
4236
4237         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4238                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4239                 0 : -EINVAL;
4240 }
4241 #endif
4242
4243 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4244 {
4245         struct ci_power_info *pi = ci_get_pi(adev);
4246         u32 tmp;
4247         int ret = 0;
4248
4249         if (!gate) {
4250                 /* turn the clocks on when decoding */
4251                 if (pi->caps_uvd_dpm ||
4252                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4253                         pi->smc_state_table.UvdBootLevel = 0;
4254                 else
4255                         pi->smc_state_table.UvdBootLevel =
4256                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4257
4258                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4259                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4260                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4261                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4262                 ret = ci_enable_uvd_dpm(adev, true);
4263         } else {
4264                 ret = ci_enable_uvd_dpm(adev, false);
4265                 if (ret)
4266                         return ret;
4267         }
4268
4269         return ret;
4270 }
4271
4272 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4273 {
4274         u8 i;
4275         u32 min_evclk = 30000; /* ??? */
4276         struct amdgpu_vce_clock_voltage_dependency_table *table =
4277                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4278
4279         for (i = 0; i < table->count; i++) {
4280                 if (table->entries[i].evclk >= min_evclk)
4281                         return i;
4282         }
4283
4284         return table->count - 1;
4285 }
4286
4287 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4288                              struct amdgpu_ps *amdgpu_new_state,
4289                              struct amdgpu_ps *amdgpu_current_state)
4290 {
4291         struct ci_power_info *pi = ci_get_pi(adev);
4292         int ret = 0;
4293         u32 tmp;
4294
4295         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4296                 if (amdgpu_new_state->evclk) {
4297                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4298                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4299                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4300                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4301                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4302
4303                         ret = ci_enable_vce_dpm(adev, true);
4304                 } else {
4305                         ret = ci_enable_vce_dpm(adev, false);
4306                         if (ret)
4307                                 return ret;
4308                 }
4309         }
4310         return ret;
4311 }
4312
4313 #if 0
4314 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4315 {
4316         return ci_enable_samu_dpm(adev, gate);
4317 }
4318
4319 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4320 {
4321         struct ci_power_info *pi = ci_get_pi(adev);
4322         u32 tmp;
4323
4324         if (!gate) {
4325                 pi->smc_state_table.AcpBootLevel = 0;
4326
4327                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4328                 tmp &= ~AcpBootLevel_MASK;
4329                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4330                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4331         }
4332
4333         return ci_enable_acp_dpm(adev, !gate);
4334 }
4335 #endif
4336
4337 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4338                                              struct amdgpu_ps *amdgpu_state)
4339 {
4340         struct ci_power_info *pi = ci_get_pi(adev);
4341         int ret;
4342
4343         ret = ci_trim_dpm_states(adev, amdgpu_state);
4344         if (ret)
4345                 return ret;
4346
4347         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4348                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4349         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4350                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4351         pi->last_mclk_dpm_enable_mask =
4352                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4353         if (pi->uvd_enabled) {
4354                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4355                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4356         }
4357         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4358                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4359
4360         return 0;
4361 }
4362
4363 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4364                                        u32 level_mask)
4365 {
4366         u32 level = 0;
4367
4368         while ((level_mask & (1 << level)) == 0)
4369                 level++;
4370
4371         return level;
4372 }
4373
4374
4375 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4376                                           enum amd_dpm_forced_level level)
4377 {
4378         struct ci_power_info *pi = ci_get_pi(adev);
4379         u32 tmp, levels, i;
4380         int ret;
4381
4382         if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
4383                 if ((!pi->pcie_dpm_key_disabled) &&
4384                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4385                         levels = 0;
4386                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4387                         while (tmp >>= 1)
4388                                 levels++;
4389                         if (levels) {
4390                                 ret = ci_dpm_force_state_pcie(adev, level);
4391                                 if (ret)
4392                                         return ret;
4393                                 for (i = 0; i < adev->usec_timeout; i++) {
4394                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4395                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4396                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4397                                         if (tmp == levels)
4398                                                 break;
4399                                         udelay(1);
4400                                 }
4401                         }
4402                 }
4403                 if ((!pi->sclk_dpm_key_disabled) &&
4404                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4405                         levels = 0;
4406                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4407                         while (tmp >>= 1)
4408                                 levels++;
4409                         if (levels) {
4410                                 ret = ci_dpm_force_state_sclk(adev, levels);
4411                                 if (ret)
4412                                         return ret;
4413                                 for (i = 0; i < adev->usec_timeout; i++) {
4414                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4415                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4416                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4417                                         if (tmp == levels)
4418                                                 break;
4419                                         udelay(1);
4420                                 }
4421                         }
4422                 }
4423                 if ((!pi->mclk_dpm_key_disabled) &&
4424                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4425                         levels = 0;
4426                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4427                         while (tmp >>= 1)
4428                                 levels++;
4429                         if (levels) {
4430                                 ret = ci_dpm_force_state_mclk(adev, levels);
4431                                 if (ret)
4432                                         return ret;
4433                                 for (i = 0; i < adev->usec_timeout; i++) {
4434                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4435                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4436                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4437                                         if (tmp == levels)
4438                                                 break;
4439                                         udelay(1);
4440                                 }
4441                         }
4442                 }
4443         } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
4444                 if ((!pi->sclk_dpm_key_disabled) &&
4445                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4446                         levels = ci_get_lowest_enabled_level(adev,
4447                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4448                         ret = ci_dpm_force_state_sclk(adev, levels);
4449                         if (ret)
4450                                 return ret;
4451                         for (i = 0; i < adev->usec_timeout; i++) {
4452                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4453                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4454                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4455                                 if (tmp == levels)
4456                                         break;
4457                                 udelay(1);
4458                         }
4459                 }
4460                 if ((!pi->mclk_dpm_key_disabled) &&
4461                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4462                         levels = ci_get_lowest_enabled_level(adev,
4463                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4464                         ret = ci_dpm_force_state_mclk(adev, levels);
4465                         if (ret)
4466                                 return ret;
4467                         for (i = 0; i < adev->usec_timeout; i++) {
4468                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4469                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4470                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4471                                 if (tmp == levels)
4472                                         break;
4473                                 udelay(1);
4474                         }
4475                 }
4476                 if ((!pi->pcie_dpm_key_disabled) &&
4477                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4478                         levels = ci_get_lowest_enabled_level(adev,
4479                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4480                         ret = ci_dpm_force_state_pcie(adev, levels);
4481                         if (ret)
4482                                 return ret;
4483                         for (i = 0; i < adev->usec_timeout; i++) {
4484                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4485                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4486                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4487                                 if (tmp == levels)
4488                                         break;
4489                                 udelay(1);
4490                         }
4491                 }
4492         } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
4493                 if (!pi->pcie_dpm_key_disabled) {
4494                         PPSMC_Result smc_result;
4495
4496                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4497                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4498                         if (smc_result != PPSMC_Result_OK)
4499                                 return -EINVAL;
4500                 }
4501                 ret = ci_upload_dpm_level_enable_mask(adev);
4502                 if (ret)
4503                         return ret;
4504         }
4505
4506         adev->pm.dpm.forced_level = level;
4507
4508         return 0;
4509 }
4510
4511 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4512                                        struct ci_mc_reg_table *table)
4513 {
4514         u8 i, j, k;
4515         u32 temp_reg;
4516
4517         for (i = 0, j = table->last; i < table->last; i++) {
4518                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4519                         return -EINVAL;
4520                 switch(table->mc_reg_address[i].s1) {
4521                 case mmMC_SEQ_MISC1:
4522                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4523                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4524                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4525                         for (k = 0; k < table->num_entries; k++) {
4526                                 table->mc_reg_table_entry[k].mc_data[j] =
4527                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4528                         }
4529                         j++;
4530                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4531                                 return -EINVAL;
4532
4533                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4534                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4535                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4536                         for (k = 0; k < table->num_entries; k++) {
4537                                 table->mc_reg_table_entry[k].mc_data[j] =
4538                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4539                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4540                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4541                         }
4542                         j++;
4543                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4544                                 return -EINVAL;
4545
4546                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4547                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4548                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4549                                 for (k = 0; k < table->num_entries; k++) {
4550                                         table->mc_reg_table_entry[k].mc_data[j] =
4551                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4552                                 }
4553                                 j++;
4554                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4555                                         return -EINVAL;
4556                         }
4557                         break;
4558                 case mmMC_SEQ_RESERVE_M:
4559                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4560                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4561                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4562                         for (k = 0; k < table->num_entries; k++) {
4563                                 table->mc_reg_table_entry[k].mc_data[j] =
4564                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4565                         }
4566                         j++;
4567                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4568                                 return -EINVAL;
4569                         break;
4570                 default:
4571                         break;
4572                 }
4573
4574         }
4575
4576         table->last = j;
4577
4578         return 0;
4579 }
4580
4581 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4582 {
4583         bool result = true;
4584
4585         switch(in_reg) {
4586         case mmMC_SEQ_RAS_TIMING:
4587                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4588                 break;
4589         case mmMC_SEQ_DLL_STBY:
4590                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4591                 break;
4592         case mmMC_SEQ_G5PDX_CMD0:
4593                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4594                 break;
4595         case mmMC_SEQ_G5PDX_CMD1:
4596                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4597                 break;
4598         case mmMC_SEQ_G5PDX_CTRL:
4599                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4600                 break;
4601         case mmMC_SEQ_CAS_TIMING:
4602                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4603             break;
4604         case mmMC_SEQ_MISC_TIMING:
4605                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4606                 break;
4607         case mmMC_SEQ_MISC_TIMING2:
4608                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4609                 break;
4610         case mmMC_SEQ_PMG_DVS_CMD:
4611                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4612                 break;
4613         case mmMC_SEQ_PMG_DVS_CTL:
4614                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4615                 break;
4616         case mmMC_SEQ_RD_CTL_D0:
4617                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4618                 break;
4619         case mmMC_SEQ_RD_CTL_D1:
4620                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4621                 break;
4622         case mmMC_SEQ_WR_CTL_D0:
4623                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4624                 break;
4625         case mmMC_SEQ_WR_CTL_D1:
4626                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4627                 break;
4628         case mmMC_PMG_CMD_EMRS:
4629                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4630                 break;
4631         case mmMC_PMG_CMD_MRS:
4632                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4633                 break;
4634         case mmMC_PMG_CMD_MRS1:
4635                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4636                 break;
4637         case mmMC_SEQ_PMG_TIMING:
4638                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4639                 break;
4640         case mmMC_PMG_CMD_MRS2:
4641                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4642                 break;
4643         case mmMC_SEQ_WR_CTL_2:
4644                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4645                 break;
4646         default:
4647                 result = false;
4648                 break;
4649         }
4650
4651         return result;
4652 }
4653
4654 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4655 {
4656         u8 i, j;
4657
4658         for (i = 0; i < table->last; i++) {
4659                 for (j = 1; j < table->num_entries; j++) {
4660                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4661                             table->mc_reg_table_entry[j].mc_data[i]) {
4662                                 table->valid_flag |= 1 << i;
4663                                 break;
4664                         }
4665                 }
4666         }
4667 }
4668
4669 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4670 {
4671         u32 i;
4672         u16 address;
4673
4674         for (i = 0; i < table->last; i++) {
4675                 table->mc_reg_address[i].s0 =
4676                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4677                         address : table->mc_reg_address[i].s1;
4678         }
4679 }
4680
4681 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4682                                       struct ci_mc_reg_table *ci_table)
4683 {
4684         u8 i, j;
4685
4686         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4687                 return -EINVAL;
4688         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4689                 return -EINVAL;
4690
4691         for (i = 0; i < table->last; i++)
4692                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4693
4694         ci_table->last = table->last;
4695
4696         for (i = 0; i < table->num_entries; i++) {
4697                 ci_table->mc_reg_table_entry[i].mclk_max =
4698                         table->mc_reg_table_entry[i].mclk_max;
4699                 for (j = 0; j < table->last; j++)
4700                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4701                                 table->mc_reg_table_entry[i].mc_data[j];
4702         }
4703         ci_table->num_entries = table->num_entries;
4704
4705         return 0;
4706 }
4707
4708 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4709                                        struct ci_mc_reg_table *table)
4710 {
4711         u8 i, k;
4712         u32 tmp;
4713         bool patch;
4714
4715         tmp = RREG32(mmMC_SEQ_MISC0);
4716         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4717
4718         if (patch &&
4719             ((adev->pdev->device == 0x67B0) ||
4720              (adev->pdev->device == 0x67B1))) {
4721                 for (i = 0; i < table->last; i++) {
4722                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4723                                 return -EINVAL;
4724                         switch (table->mc_reg_address[i].s1) {
4725                         case mmMC_SEQ_MISC1:
4726                                 for (k = 0; k < table->num_entries; k++) {
4727                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4728                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4729                                                 table->mc_reg_table_entry[k].mc_data[i] =
4730                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4731                                                         0x00000007;
4732                                 }
4733                                 break;
4734                         case mmMC_SEQ_WR_CTL_D0:
4735                                 for (k = 0; k < table->num_entries; k++) {
4736                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4737                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4738                                                 table->mc_reg_table_entry[k].mc_data[i] =
4739                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4740                                                         0x0000D0DD;
4741                                 }
4742                                 break;
4743                         case mmMC_SEQ_WR_CTL_D1:
4744                                 for (k = 0; k < table->num_entries; k++) {
4745                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4746                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4747                                                 table->mc_reg_table_entry[k].mc_data[i] =
4748                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4749                                                         0x0000D0DD;
4750                                 }
4751                                 break;
4752                         case mmMC_SEQ_WR_CTL_2:
4753                                 for (k = 0; k < table->num_entries; k++) {
4754                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4755                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4756                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4757                                 }
4758                                 break;
4759                         case mmMC_SEQ_CAS_TIMING:
4760                                 for (k = 0; k < table->num_entries; k++) {
4761                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4762                                                 table->mc_reg_table_entry[k].mc_data[i] =
4763                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4764                                                         0x000C0140;
4765                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4766                                                 table->mc_reg_table_entry[k].mc_data[i] =
4767                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4768                                                         0x000C0150;
4769                                 }
4770                                 break;
4771                         case mmMC_SEQ_MISC_TIMING:
4772                                 for (k = 0; k < table->num_entries; k++) {
4773                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4774                                                 table->mc_reg_table_entry[k].mc_data[i] =
4775                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4776                                                         0x00000030;
4777                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4778                                                 table->mc_reg_table_entry[k].mc_data[i] =
4779                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4780                                                         0x00000035;
4781                                 }
4782                                 break;
4783                         default:
4784                                 break;
4785                         }
4786                 }
4787
4788                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4789                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4790                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4791                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4792                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4793         }
4794
4795         return 0;
4796 }
4797
4798 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4799 {
4800         struct ci_power_info *pi = ci_get_pi(adev);
4801         struct atom_mc_reg_table *table;
4802         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4803         u8 module_index = ci_get_memory_module_index(adev);
4804         int ret;
4805
4806         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4807         if (!table)
4808                 return -ENOMEM;
4809
4810         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4811         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4812         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4813         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4814         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4815         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4816         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4817         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4818         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4819         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4820         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4821         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4822         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4823         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4824         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4825         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4826         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4827         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4828         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4829         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4830
4831         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4832         if (ret)
4833                 goto init_mc_done;
4834
4835         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4836         if (ret)
4837                 goto init_mc_done;
4838
4839         ci_set_s0_mc_reg_index(ci_table);
4840
4841         ret = ci_register_patching_mc_seq(adev, ci_table);
4842         if (ret)
4843                 goto init_mc_done;
4844
4845         ret = ci_set_mc_special_registers(adev, ci_table);
4846         if (ret)
4847                 goto init_mc_done;
4848
4849         ci_set_valid_flag(ci_table);
4850
4851 init_mc_done:
4852         kfree(table);
4853
4854         return ret;
4855 }
4856
4857 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4858                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4859 {
4860         struct ci_power_info *pi = ci_get_pi(adev);
4861         u32 i, j;
4862
4863         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4864                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4865                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4866                                 return -EINVAL;
4867                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4868                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4869                         i++;
4870                 }
4871         }
4872
4873         mc_reg_table->last = (u8)i;
4874
4875         return 0;
4876 }
4877
4878 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4879                                     SMU7_Discrete_MCRegisterSet *data,
4880                                     u32 num_entries, u32 valid_flag)
4881 {
4882         u32 i, j;
4883
4884         for (i = 0, j = 0; j < num_entries; j++) {
4885                 if (valid_flag & (1 << j)) {
4886                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4887                         i++;
4888                 }
4889         }
4890 }
4891
4892 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4893                                                  const u32 memory_clock,
4894                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4895 {
4896         struct ci_power_info *pi = ci_get_pi(adev);
4897         u32 i = 0;
4898
4899         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4900                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4901                         break;
4902         }
4903
4904         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4905                 --i;
4906
4907         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4908                                 mc_reg_table_data, pi->mc_reg_table.last,
4909                                 pi->mc_reg_table.valid_flag);
4910 }
4911
4912 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4913                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4914 {
4915         struct ci_power_info *pi = ci_get_pi(adev);
4916         u32 i;
4917
4918         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4919                 ci_convert_mc_reg_table_entry_to_smc(adev,
4920                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4921                                                      &mc_reg_table->data[i]);
4922 }
4923
4924 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4925 {
4926         struct ci_power_info *pi = ci_get_pi(adev);
4927         int ret;
4928
4929         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4930
4931         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4932         if (ret)
4933                 return ret;
4934         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4935
4936         return amdgpu_ci_copy_bytes_to_smc(adev,
4937                                     pi->mc_reg_table_start,
4938                                     (u8 *)&pi->smc_mc_reg_table,
4939                                     sizeof(SMU7_Discrete_MCRegisters),
4940                                     pi->sram_end);
4941 }
4942
4943 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4944 {
4945         struct ci_power_info *pi = ci_get_pi(adev);
4946
4947         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4948                 return 0;
4949
4950         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4951
4952         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4953
4954         return amdgpu_ci_copy_bytes_to_smc(adev,
4955                                     pi->mc_reg_table_start +
4956                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4957                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4958                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4959                                     pi->dpm_table.mclk_table.count,
4960                                     pi->sram_end);
4961 }
4962
4963 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4964 {
4965         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4966
4967         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4968         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4969 }
4970
4971 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4972                                                       struct amdgpu_ps *amdgpu_state)
4973 {
4974         struct ci_ps *state = ci_get_ps(amdgpu_state);
4975         int i;
4976         u16 pcie_speed, max_speed = 0;
4977
4978         for (i = 0; i < state->performance_level_count; i++) {
4979                 pcie_speed = state->performance_levels[i].pcie_gen;
4980                 if (max_speed < pcie_speed)
4981                         max_speed = pcie_speed;
4982         }
4983
4984         return max_speed;
4985 }
4986
4987 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4988 {
4989         u32 speed_cntl = 0;
4990
4991         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4992                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4993         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4994
4995         return (u16)speed_cntl;
4996 }
4997
4998 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4999 {
5000         u32 link_width = 0;
5001
5002         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
5003                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
5004         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
5005
5006         switch (link_width) {
5007         case 1:
5008                 return 1;
5009         case 2:
5010                 return 2;
5011         case 3:
5012                 return 4;
5013         case 4:
5014                 return 8;
5015         case 0:
5016         case 6:
5017         default:
5018                 return 16;
5019         }
5020 }
5021
5022 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
5023                                                              struct amdgpu_ps *amdgpu_new_state,
5024                                                              struct amdgpu_ps *amdgpu_current_state)
5025 {
5026         struct ci_power_info *pi = ci_get_pi(adev);
5027         enum amdgpu_pcie_gen target_link_speed =
5028                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5029         enum amdgpu_pcie_gen current_link_speed;
5030
5031         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5032                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5033         else
5034                 current_link_speed = pi->force_pcie_gen;
5035
5036         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5037         pi->pspp_notify_required = false;
5038         if (target_link_speed > current_link_speed) {
5039                 switch (target_link_speed) {
5040 #ifdef CONFIG_ACPI
5041                 case AMDGPU_PCIE_GEN3:
5042                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5043                                 break;
5044                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5045                         if (current_link_speed == AMDGPU_PCIE_GEN2)
5046                                 break;
5047                 case AMDGPU_PCIE_GEN2:
5048                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5049                                 break;
5050 #endif
5051                 default:
5052                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5053                         break;
5054                 }
5055         } else {
5056                 if (target_link_speed < current_link_speed)
5057                         pi->pspp_notify_required = true;
5058         }
5059 }
5060
5061 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5062                                                            struct amdgpu_ps *amdgpu_new_state,
5063                                                            struct amdgpu_ps *amdgpu_current_state)
5064 {
5065         struct ci_power_info *pi = ci_get_pi(adev);
5066         enum amdgpu_pcie_gen target_link_speed =
5067                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5068         u8 request;
5069
5070         if (pi->pspp_notify_required) {
5071                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5072                         request = PCIE_PERF_REQ_PECI_GEN3;
5073                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5074                         request = PCIE_PERF_REQ_PECI_GEN2;
5075                 else
5076                         request = PCIE_PERF_REQ_PECI_GEN1;
5077
5078                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5079                     (ci_get_current_pcie_speed(adev) > 0))
5080                         return;
5081
5082 #ifdef CONFIG_ACPI
5083                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5084 #endif
5085         }
5086 }
5087
5088 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5089 {
5090         struct ci_power_info *pi = ci_get_pi(adev);
5091         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5092                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5093         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5094                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5095         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5096                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5097
5098         if (allowed_sclk_vddc_table == NULL)
5099                 return -EINVAL;
5100         if (allowed_sclk_vddc_table->count < 1)
5101                 return -EINVAL;
5102         if (allowed_mclk_vddc_table == NULL)
5103                 return -EINVAL;
5104         if (allowed_mclk_vddc_table->count < 1)
5105                 return -EINVAL;
5106         if (allowed_mclk_vddci_table == NULL)
5107                 return -EINVAL;
5108         if (allowed_mclk_vddci_table->count < 1)
5109                 return -EINVAL;
5110
5111         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5112         pi->max_vddc_in_pp_table =
5113                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5114
5115         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5116         pi->max_vddci_in_pp_table =
5117                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5118
5119         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5120                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5121         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5122                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5123         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5124                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5125         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5126                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5127
5128         return 0;
5129 }
5130
5131 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5132 {
5133         struct ci_power_info *pi = ci_get_pi(adev);
5134         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5135         u32 leakage_index;
5136
5137         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5138                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5139                         *vddc = leakage_table->actual_voltage[leakage_index];
5140                         break;
5141                 }
5142         }
5143 }
5144
5145 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5146 {
5147         struct ci_power_info *pi = ci_get_pi(adev);
5148         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5149         u32 leakage_index;
5150
5151         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5152                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5153                         *vddci = leakage_table->actual_voltage[leakage_index];
5154                         break;
5155                 }
5156         }
5157 }
5158
5159 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5160                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5161 {
5162         u32 i;
5163
5164         if (table) {
5165                 for (i = 0; i < table->count; i++)
5166                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5167         }
5168 }
5169
5170 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5171                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5172 {
5173         u32 i;
5174
5175         if (table) {
5176                 for (i = 0; i < table->count; i++)
5177                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5178         }
5179 }
5180
5181 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5182                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5183 {
5184         u32 i;
5185
5186         if (table) {
5187                 for (i = 0; i < table->count; i++)
5188                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5189         }
5190 }
5191
5192 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5193                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5194 {
5195         u32 i;
5196
5197         if (table) {
5198                 for (i = 0; i < table->count; i++)
5199                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5200         }
5201 }
5202
5203 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5204                                                                    struct amdgpu_phase_shedding_limits_table *table)
5205 {
5206         u32 i;
5207
5208         if (table) {
5209                 for (i = 0; i < table->count; i++)
5210                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5211         }
5212 }
5213
5214 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5215                                                             struct amdgpu_clock_and_voltage_limits *table)
5216 {
5217         if (table) {
5218                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5219                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5220         }
5221 }
5222
5223 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5224                                                          struct amdgpu_cac_leakage_table *table)
5225 {
5226         u32 i;
5227
5228         if (table) {
5229                 for (i = 0; i < table->count; i++)
5230                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5231         }
5232 }
5233
5234 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5235 {
5236
5237         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5238                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5239         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5240                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5241         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5242                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5243         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5244                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5245         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5246                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5247         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5248                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5249         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5250                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5251         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5252                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5253         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5254                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5255         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5256                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5257         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5258                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5259         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5260                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5261
5262 }
5263
5264 static void ci_update_current_ps(struct amdgpu_device *adev,
5265                                  struct amdgpu_ps *rps)
5266 {
5267         struct ci_ps *new_ps = ci_get_ps(rps);
5268         struct ci_power_info *pi = ci_get_pi(adev);
5269
5270         pi->current_rps = *rps;
5271         pi->current_ps = *new_ps;
5272         pi->current_rps.ps_priv = &pi->current_ps;
5273         adev->pm.dpm.current_ps = &pi->current_rps;
5274 }
5275
5276 static void ci_update_requested_ps(struct amdgpu_device *adev,
5277                                    struct amdgpu_ps *rps)
5278 {
5279         struct ci_ps *new_ps = ci_get_ps(rps);
5280         struct ci_power_info *pi = ci_get_pi(adev);
5281
5282         pi->requested_rps = *rps;
5283         pi->requested_ps = *new_ps;
5284         pi->requested_rps.ps_priv = &pi->requested_ps;
5285         adev->pm.dpm.requested_ps = &pi->requested_rps;
5286 }
5287
5288 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5289 {
5290         struct ci_power_info *pi = ci_get_pi(adev);
5291         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5292         struct amdgpu_ps *new_ps = &requested_ps;
5293
5294         ci_update_requested_ps(adev, new_ps);
5295
5296         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5297
5298         return 0;
5299 }
5300
5301 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5302 {
5303         struct ci_power_info *pi = ci_get_pi(adev);
5304         struct amdgpu_ps *new_ps = &pi->requested_rps;
5305
5306         ci_update_current_ps(adev, new_ps);
5307 }
5308
5309
5310 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5311 {
5312         ci_read_clock_registers(adev);
5313         ci_enable_acpi_power_management(adev);
5314         ci_init_sclk_t(adev);
5315 }
5316
5317 static int ci_dpm_enable(struct amdgpu_device *adev)
5318 {
5319         struct ci_power_info *pi = ci_get_pi(adev);
5320         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5321         int ret;
5322
5323         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5324                 ci_enable_voltage_control(adev);
5325                 ret = ci_construct_voltage_tables(adev);
5326                 if (ret) {
5327                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5328                         return ret;
5329                 }
5330         }
5331         if (pi->caps_dynamic_ac_timing) {
5332                 ret = ci_initialize_mc_reg_table(adev);
5333                 if (ret)
5334                         pi->caps_dynamic_ac_timing = false;
5335         }
5336         if (pi->dynamic_ss)
5337                 ci_enable_spread_spectrum(adev, true);
5338         if (pi->thermal_protection)
5339                 ci_enable_thermal_protection(adev, true);
5340         ci_program_sstp(adev);
5341         ci_enable_display_gap(adev);
5342         ci_program_vc(adev);
5343         ret = ci_upload_firmware(adev);
5344         if (ret) {
5345                 DRM_ERROR("ci_upload_firmware failed\n");
5346                 return ret;
5347         }
5348         ret = ci_process_firmware_header(adev);
5349         if (ret) {
5350                 DRM_ERROR("ci_process_firmware_header failed\n");
5351                 return ret;
5352         }
5353         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5354         if (ret) {
5355                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5356                 return ret;
5357         }
5358         ret = ci_init_smc_table(adev);
5359         if (ret) {
5360                 DRM_ERROR("ci_init_smc_table failed\n");
5361                 return ret;
5362         }
5363         ret = ci_init_arb_table_index(adev);
5364         if (ret) {
5365                 DRM_ERROR("ci_init_arb_table_index failed\n");
5366                 return ret;
5367         }
5368         if (pi->caps_dynamic_ac_timing) {
5369                 ret = ci_populate_initial_mc_reg_table(adev);
5370                 if (ret) {
5371                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5372                         return ret;
5373                 }
5374         }
5375         ret = ci_populate_pm_base(adev);
5376         if (ret) {
5377                 DRM_ERROR("ci_populate_pm_base failed\n");
5378                 return ret;
5379         }
5380         ci_dpm_start_smc(adev);
5381         ci_enable_vr_hot_gpio_interrupt(adev);
5382         ret = ci_notify_smc_display_change(adev, false);
5383         if (ret) {
5384                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5385                 return ret;
5386         }
5387         ci_enable_sclk_control(adev, true);
5388         ret = ci_enable_ulv(adev, true);
5389         if (ret) {
5390                 DRM_ERROR("ci_enable_ulv failed\n");
5391                 return ret;
5392         }
5393         ret = ci_enable_ds_master_switch(adev, true);
5394         if (ret) {
5395                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5396                 return ret;
5397         }
5398         ret = ci_start_dpm(adev);
5399         if (ret) {
5400                 DRM_ERROR("ci_start_dpm failed\n");
5401                 return ret;
5402         }
5403         ret = ci_enable_didt(adev, true);
5404         if (ret) {
5405                 DRM_ERROR("ci_enable_didt failed\n");
5406                 return ret;
5407         }
5408         ret = ci_enable_smc_cac(adev, true);
5409         if (ret) {
5410                 DRM_ERROR("ci_enable_smc_cac failed\n");
5411                 return ret;
5412         }
5413         ret = ci_enable_power_containment(adev, true);
5414         if (ret) {
5415                 DRM_ERROR("ci_enable_power_containment failed\n");
5416                 return ret;
5417         }
5418
5419         ret = ci_power_control_set_level(adev);
5420         if (ret) {
5421                 DRM_ERROR("ci_power_control_set_level failed\n");
5422                 return ret;
5423         }
5424
5425         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5426
5427         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5428         if (ret) {
5429                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5430                 return ret;
5431         }
5432
5433         ci_thermal_start_thermal_controller(adev);
5434
5435         ci_update_current_ps(adev, boot_ps);
5436
5437         return 0;
5438 }
5439
5440 static void ci_dpm_disable(struct amdgpu_device *adev)
5441 {
5442         struct ci_power_info *pi = ci_get_pi(adev);
5443         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5444
5445         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5446                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5447         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5448                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5449
5450         ci_dpm_powergate_uvd(adev, true);
5451
5452         if (!amdgpu_ci_is_smc_running(adev))
5453                 return;
5454
5455         ci_thermal_stop_thermal_controller(adev);
5456
5457         if (pi->thermal_protection)
5458                 ci_enable_thermal_protection(adev, false);
5459         ci_enable_power_containment(adev, false);
5460         ci_enable_smc_cac(adev, false);
5461         ci_enable_didt(adev, false);
5462         ci_enable_spread_spectrum(adev, false);
5463         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5464         ci_stop_dpm(adev);
5465         ci_enable_ds_master_switch(adev, false);
5466         ci_enable_ulv(adev, false);
5467         ci_clear_vc(adev);
5468         ci_reset_to_default(adev);
5469         ci_dpm_stop_smc(adev);
5470         ci_force_switch_to_arb_f0(adev);
5471         ci_enable_thermal_based_sclk_dpm(adev, false);
5472
5473         ci_update_current_ps(adev, boot_ps);
5474 }
5475
5476 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5477 {
5478         struct ci_power_info *pi = ci_get_pi(adev);
5479         struct amdgpu_ps *new_ps = &pi->requested_rps;
5480         struct amdgpu_ps *old_ps = &pi->current_rps;
5481         int ret;
5482
5483         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5484         if (pi->pcie_performance_request)
5485                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5486         ret = ci_freeze_sclk_mclk_dpm(adev);
5487         if (ret) {
5488                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5489                 return ret;
5490         }
5491         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5492         if (ret) {
5493                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5494                 return ret;
5495         }
5496         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5497         if (ret) {
5498                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5499                 return ret;
5500         }
5501
5502         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5503         if (ret) {
5504                 DRM_ERROR("ci_update_vce_dpm failed\n");
5505                 return ret;
5506         }
5507
5508         ret = ci_update_sclk_t(adev);
5509         if (ret) {
5510                 DRM_ERROR("ci_update_sclk_t failed\n");
5511                 return ret;
5512         }
5513         if (pi->caps_dynamic_ac_timing) {
5514                 ret = ci_update_and_upload_mc_reg_table(adev);
5515                 if (ret) {
5516                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5517                         return ret;
5518                 }
5519         }
5520         ret = ci_program_memory_timing_parameters(adev);
5521         if (ret) {
5522                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5523                 return ret;
5524         }
5525         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5526         if (ret) {
5527                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5528                 return ret;
5529         }
5530         ret = ci_upload_dpm_level_enable_mask(adev);
5531         if (ret) {
5532                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5533                 return ret;
5534         }
5535         if (pi->pcie_performance_request)
5536                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5537
5538         return 0;
5539 }
5540
5541 #if 0
5542 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5543 {
5544         ci_set_boot_state(adev);
5545 }
5546 #endif
5547
5548 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5549 {
5550         ci_program_display_gap(adev);
5551 }
5552
5553 union power_info {
5554         struct _ATOM_POWERPLAY_INFO info;
5555         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5556         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5557         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5558         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5559         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5560 };
5561
5562 union pplib_clock_info {
5563         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5564         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5565         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5566         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5567         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5568         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5569 };
5570
5571 union pplib_power_state {
5572         struct _ATOM_PPLIB_STATE v1;
5573         struct _ATOM_PPLIB_STATE_V2 v2;
5574 };
5575
5576 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5577                                           struct amdgpu_ps *rps,
5578                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5579                                           u8 table_rev)
5580 {
5581         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5582         rps->class = le16_to_cpu(non_clock_info->usClassification);
5583         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5584
5585         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5586                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5587                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5588         } else {
5589                 rps->vclk = 0;
5590                 rps->dclk = 0;
5591         }
5592
5593         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5594                 adev->pm.dpm.boot_ps = rps;
5595         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5596                 adev->pm.dpm.uvd_ps = rps;
5597 }
5598
5599 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5600                                       struct amdgpu_ps *rps, int index,
5601                                       union pplib_clock_info *clock_info)
5602 {
5603         struct ci_power_info *pi = ci_get_pi(adev);
5604         struct ci_ps *ps = ci_get_ps(rps);
5605         struct ci_pl *pl = &ps->performance_levels[index];
5606
5607         ps->performance_level_count = index + 1;
5608
5609         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5610         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5611         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5612         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5613
5614         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5615                                                    pi->sys_pcie_mask,
5616                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5617                                                    clock_info->ci.ucPCIEGen);
5618         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5619                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5620                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5621
5622         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5623                 pi->acpi_pcie_gen = pl->pcie_gen;
5624         }
5625
5626         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5627                 pi->ulv.supported = true;
5628                 pi->ulv.pl = *pl;
5629                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5630         }
5631
5632         /* patch up boot state */
5633         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5634                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5635                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5636                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5637                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5638         }
5639
5640         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5641         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5642                 pi->use_pcie_powersaving_levels = true;
5643                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5644                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5645                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5646                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5647                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5648                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5649                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5650                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5651                 break;
5652         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5653                 pi->use_pcie_performance_levels = true;
5654                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5655                         pi->pcie_gen_performance.max = pl->pcie_gen;
5656                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5657                         pi->pcie_gen_performance.min = pl->pcie_gen;
5658                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5659                         pi->pcie_lane_performance.max = pl->pcie_lane;
5660                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5661                         pi->pcie_lane_performance.min = pl->pcie_lane;
5662                 break;
5663         default:
5664                 break;
5665         }
5666 }
5667
5668 static int ci_parse_power_table(struct amdgpu_device *adev)
5669 {
5670         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5671         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5672         union pplib_power_state *power_state;
5673         int i, j, k, non_clock_array_index, clock_array_index;
5674         union pplib_clock_info *clock_info;
5675         struct _StateArray *state_array;
5676         struct _ClockInfoArray *clock_info_array;
5677         struct _NonClockInfoArray *non_clock_info_array;
5678         union power_info *power_info;
5679         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5680         u16 data_offset;
5681         u8 frev, crev;
5682         u8 *power_state_offset;
5683         struct ci_ps *ps;
5684
5685         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5686                                    &frev, &crev, &data_offset))
5687                 return -EINVAL;
5688         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5689
5690         amdgpu_add_thermal_controller(adev);
5691
5692         state_array = (struct _StateArray *)
5693                 (mode_info->atom_context->bios + data_offset +
5694                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5695         clock_info_array = (struct _ClockInfoArray *)
5696                 (mode_info->atom_context->bios + data_offset +
5697                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5698         non_clock_info_array = (struct _NonClockInfoArray *)
5699                 (mode_info->atom_context->bios + data_offset +
5700                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5701
5702         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5703                                   state_array->ucNumEntries, GFP_KERNEL);
5704         if (!adev->pm.dpm.ps)
5705                 return -ENOMEM;
5706         power_state_offset = (u8 *)state_array->states;
5707         for (i = 0; i < state_array->ucNumEntries; i++) {
5708                 u8 *idx;
5709                 power_state = (union pplib_power_state *)power_state_offset;
5710                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5711                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5712                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5713                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5714                 if (ps == NULL) {
5715                         kfree(adev->pm.dpm.ps);
5716                         return -ENOMEM;
5717                 }
5718                 adev->pm.dpm.ps[i].ps_priv = ps;
5719                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5720                                               non_clock_info,
5721                                               non_clock_info_array->ucEntrySize);
5722                 k = 0;
5723                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5724                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5725                         clock_array_index = idx[j];
5726                         if (clock_array_index >= clock_info_array->ucNumEntries)
5727                                 continue;
5728                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5729                                 break;
5730                         clock_info = (union pplib_clock_info *)
5731                                 ((u8 *)&clock_info_array->clockInfo[0] +
5732                                  (clock_array_index * clock_info_array->ucEntrySize));
5733                         ci_parse_pplib_clock_info(adev,
5734                                                   &adev->pm.dpm.ps[i], k,
5735                                                   clock_info);
5736                         k++;
5737                 }
5738                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5739         }
5740         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5741
5742         /* fill in the vce power states */
5743         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5744                 u32 sclk, mclk;
5745                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5746                 clock_info = (union pplib_clock_info *)
5747                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5748                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5749                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5750                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5751                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5752                 adev->pm.dpm.vce_states[i].sclk = sclk;
5753                 adev->pm.dpm.vce_states[i].mclk = mclk;
5754         }
5755
5756         return 0;
5757 }
5758
5759 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5760                                     struct ci_vbios_boot_state *boot_state)
5761 {
5762         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5763         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5764         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5765         u8 frev, crev;
5766         u16 data_offset;
5767
5768         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5769                                    &frev, &crev, &data_offset)) {
5770                 firmware_info =
5771                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5772                                                     data_offset);
5773                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5774                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5775                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5776                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5777                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5778                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5779                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5780
5781                 return 0;
5782         }
5783         return -EINVAL;
5784 }
5785
5786 static void ci_dpm_fini(struct amdgpu_device *adev)
5787 {
5788         int i;
5789
5790         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5791                 kfree(adev->pm.dpm.ps[i].ps_priv);
5792         }
5793         kfree(adev->pm.dpm.ps);
5794         kfree(adev->pm.dpm.priv);
5795         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5796         amdgpu_free_extended_power_table(adev);
5797 }
5798
5799 /**
5800  * ci_dpm_init_microcode - load ucode images from disk
5801  *
5802  * @adev: amdgpu_device pointer
5803  *
5804  * Use the firmware interface to load the ucode images into
5805  * the driver (not loaded into hw).
5806  * Returns 0 on success, error on failure.
5807  */
5808 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5809 {
5810         const char *chip_name;
5811         char fw_name[30];
5812         int err;
5813
5814         DRM_DEBUG("\n");
5815
5816         switch (adev->asic_type) {
5817         case CHIP_BONAIRE:
5818                 if ((adev->pdev->revision == 0x80) ||
5819                     (adev->pdev->revision == 0x81) ||
5820                     (adev->pdev->device == 0x665f))
5821                         chip_name = "bonaire_k";
5822                 else
5823                         chip_name = "bonaire";
5824                 break;
5825         case CHIP_HAWAII:
5826                 if (adev->pdev->revision == 0x80)
5827                         chip_name = "hawaii_k";
5828                 else
5829                         chip_name = "hawaii";
5830                 break;
5831         case CHIP_KAVERI:
5832         case CHIP_KABINI:
5833         case CHIP_MULLINS:
5834         default: BUG();
5835         }
5836
5837         snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5838         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5839         if (err)
5840                 goto out;
5841         err = amdgpu_ucode_validate(adev->pm.fw);
5842
5843 out:
5844         if (err) {
5845                 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
5846                 release_firmware(adev->pm.fw);
5847                 adev->pm.fw = NULL;
5848         }
5849         return err;
5850 }
5851
5852 static int ci_dpm_init(struct amdgpu_device *adev)
5853 {
5854         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5855         SMU7_Discrete_DpmTable *dpm_table;
5856         struct amdgpu_gpio_rec gpio;
5857         u16 data_offset, size;
5858         u8 frev, crev;
5859         struct ci_power_info *pi;
5860         int ret;
5861
5862         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5863         if (pi == NULL)
5864                 return -ENOMEM;
5865         adev->pm.dpm.priv = pi;
5866
5867         pi->sys_pcie_mask =
5868                 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5869                 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5870
5871         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5872
5873         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5874         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5875         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5876         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5877
5878         pi->pcie_lane_performance.max = 0;
5879         pi->pcie_lane_performance.min = 16;
5880         pi->pcie_lane_powersaving.max = 0;
5881         pi->pcie_lane_powersaving.min = 16;
5882
5883         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5884         if (ret) {
5885                 ci_dpm_fini(adev);
5886                 return ret;
5887         }
5888
5889         ret = amdgpu_get_platform_caps(adev);
5890         if (ret) {
5891                 ci_dpm_fini(adev);
5892                 return ret;
5893         }
5894
5895         ret = amdgpu_parse_extended_power_table(adev);
5896         if (ret) {
5897                 ci_dpm_fini(adev);
5898                 return ret;
5899         }
5900
5901         ret = ci_parse_power_table(adev);
5902         if (ret) {
5903                 ci_dpm_fini(adev);
5904                 return ret;
5905         }
5906
5907         pi->dll_default_on = false;
5908         pi->sram_end = SMC_RAM_END;
5909
5910         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5911         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5912         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5913         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5914         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5915         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5916         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5917         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5918
5919         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5920
5921         pi->sclk_dpm_key_disabled = 0;
5922         pi->mclk_dpm_key_disabled = 0;
5923         pi->pcie_dpm_key_disabled = 0;
5924         pi->thermal_sclk_dpm_enabled = 0;
5925
5926         if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
5927                 pi->caps_sclk_ds = true;
5928         else
5929                 pi->caps_sclk_ds = false;
5930
5931         pi->mclk_strobe_mode_threshold = 40000;
5932         pi->mclk_stutter_mode_threshold = 40000;
5933         pi->mclk_edc_enable_threshold = 40000;
5934         pi->mclk_edc_wr_enable_threshold = 40000;
5935
5936         ci_initialize_powertune_defaults(adev);
5937
5938         pi->caps_fps = false;
5939
5940         pi->caps_sclk_throttle_low_notification = false;
5941
5942         pi->caps_uvd_dpm = true;
5943         pi->caps_vce_dpm = true;
5944
5945         ci_get_leakage_voltages(adev);
5946         ci_patch_dependency_tables_with_leakage(adev);
5947         ci_set_private_data_variables_based_on_pptable(adev);
5948
5949         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5950                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5951         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5952                 ci_dpm_fini(adev);
5953                 return -ENOMEM;
5954         }
5955         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5956         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5957         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5958         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5959         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5960         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5961         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5962         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5963         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5964
5965         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5966         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5967         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5968
5969         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5970         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5971         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5972         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5973
5974         if (adev->asic_type == CHIP_HAWAII) {
5975                 pi->thermal_temp_setting.temperature_low = 94500;
5976                 pi->thermal_temp_setting.temperature_high = 95000;
5977                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5978         } else {
5979                 pi->thermal_temp_setting.temperature_low = 99500;
5980                 pi->thermal_temp_setting.temperature_high = 100000;
5981                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5982         }
5983
5984         pi->uvd_enabled = false;
5985
5986         dpm_table = &pi->smc_state_table;
5987
5988         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5989         if (gpio.valid) {
5990                 dpm_table->VRHotGpio = gpio.shift;
5991                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5992         } else {
5993                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5994                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5995         }
5996
5997         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5998         if (gpio.valid) {
5999                 dpm_table->AcDcGpio = gpio.shift;
6000                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6001         } else {
6002                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
6003                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6004         }
6005
6006         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
6007         if (gpio.valid) {
6008                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
6009
6010                 switch (gpio.shift) {
6011                 case 0:
6012                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6013                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6014                         break;
6015                 case 1:
6016                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6017                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6018                         break;
6019                 case 2:
6020                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6021                         break;
6022                 case 3:
6023                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6024                         break;
6025                 case 4:
6026                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6027                         break;
6028                 default:
6029                         DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
6030                         break;
6031                 }
6032                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6033         }
6034
6035         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6036         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6037         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6038         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6039                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6040         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6041                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6042
6043         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6044                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6045                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6046                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6047                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6048                 else
6049                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6050         }
6051
6052         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6053                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6054                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6055                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6056                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6057                 else
6058                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6059         }
6060
6061         pi->vddc_phase_shed_control = true;
6062
6063 #if defined(CONFIG_ACPI)
6064         pi->pcie_performance_request =
6065                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6066 #else
6067         pi->pcie_performance_request = false;
6068 #endif
6069
6070         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6071                                    &frev, &crev, &data_offset)) {
6072                 pi->caps_sclk_ss_support = true;
6073                 pi->caps_mclk_ss_support = true;
6074                 pi->dynamic_ss = true;
6075         } else {
6076                 pi->caps_sclk_ss_support = false;
6077                 pi->caps_mclk_ss_support = false;
6078                 pi->dynamic_ss = true;
6079         }
6080
6081         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6082                 pi->thermal_protection = true;
6083         else
6084                 pi->thermal_protection = false;
6085
6086         pi->caps_dynamic_ac_timing = true;
6087
6088         pi->uvd_power_gated = true;
6089
6090         /* make sure dc limits are valid */
6091         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6092             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6093                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6094                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6095
6096         pi->fan_ctrl_is_in_default_mode = true;
6097
6098         return 0;
6099 }
6100
6101 static void
6102 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6103                                                struct seq_file *m)
6104 {
6105         struct ci_power_info *pi = ci_get_pi(adev);
6106         struct amdgpu_ps *rps = &pi->current_rps;
6107         u32 sclk = ci_get_average_sclk_freq(adev);
6108         u32 mclk = ci_get_average_mclk_freq(adev);
6109         u32 activity_percent = 50;
6110         int ret;
6111
6112         ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6113                                         &activity_percent);
6114
6115         if (ret == 0) {
6116                 activity_percent += 0x80;
6117                 activity_percent >>= 8;
6118                 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6119         }
6120
6121         seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
6122         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6123         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6124                    sclk, mclk);
6125         seq_printf(m, "GPU load: %u %%\n", activity_percent);
6126 }
6127
6128 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6129                                      struct amdgpu_ps *rps)
6130 {
6131         struct ci_ps *ps = ci_get_ps(rps);
6132         struct ci_pl *pl;
6133         int i;
6134
6135         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6136         amdgpu_dpm_print_cap_info(rps->caps);
6137         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6138         for (i = 0; i < ps->performance_level_count; i++) {
6139                 pl = &ps->performance_levels[i];
6140                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6141                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6142         }
6143         amdgpu_dpm_print_ps_status(adev, rps);
6144 }
6145
6146 static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6147                                                 const struct ci_pl *ci_cpl2)
6148 {
6149         return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6150                   (ci_cpl1->sclk == ci_cpl2->sclk) &&
6151                   (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6152                   (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6153 }
6154
6155 static int ci_check_state_equal(struct amdgpu_device *adev,
6156                                 struct amdgpu_ps *cps,
6157                                 struct amdgpu_ps *rps,
6158                                 bool *equal)
6159 {
6160         struct ci_ps *ci_cps;
6161         struct ci_ps *ci_rps;
6162         int i;
6163
6164         if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6165                 return -EINVAL;
6166
6167         ci_cps = ci_get_ps(cps);
6168         ci_rps = ci_get_ps(rps);
6169
6170         if (ci_cps == NULL) {
6171                 *equal = false;
6172                 return 0;
6173         }
6174
6175         if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6176
6177                 *equal = false;
6178                 return 0;
6179         }
6180
6181         for (i = 0; i < ci_cps->performance_level_count; i++) {
6182                 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6183                                         &(ci_rps->performance_levels[i]))) {
6184                         *equal = false;
6185                         return 0;
6186                 }
6187         }
6188
6189         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6190         *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6191         *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6192
6193         return 0;
6194 }
6195
6196 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6197 {
6198         struct ci_power_info *pi = ci_get_pi(adev);
6199         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6200
6201         if (low)
6202                 return requested_state->performance_levels[0].sclk;
6203         else
6204                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6205 }
6206
6207 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6208 {
6209         struct ci_power_info *pi = ci_get_pi(adev);
6210         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6211
6212         if (low)
6213                 return requested_state->performance_levels[0].mclk;
6214         else
6215                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6216 }
6217
6218 /* get temperature in millidegrees */
6219 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6220 {
6221         u32 temp;
6222         int actual_temp = 0;
6223
6224         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6225                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6226
6227         if (temp & 0x200)
6228                 actual_temp = 255;
6229         else
6230                 actual_temp = temp & 0x1ff;
6231
6232         actual_temp = actual_temp * 1000;
6233
6234         return actual_temp;
6235 }
6236
6237 static int ci_set_temperature_range(struct amdgpu_device *adev)
6238 {
6239         int ret;
6240
6241         ret = ci_thermal_enable_alert(adev, false);
6242         if (ret)
6243                 return ret;
6244         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6245                                                CISLANDS_TEMP_RANGE_MAX);
6246         if (ret)
6247                 return ret;
6248         ret = ci_thermal_enable_alert(adev, true);
6249         if (ret)
6250                 return ret;
6251         return ret;
6252 }
6253
6254 static int ci_dpm_early_init(void *handle)
6255 {
6256         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6257
6258         ci_dpm_set_dpm_funcs(adev);
6259         ci_dpm_set_irq_funcs(adev);
6260
6261         return 0;
6262 }
6263
6264 static int ci_dpm_late_init(void *handle)
6265 {
6266         int ret;
6267         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6268
6269         if (!amdgpu_dpm)
6270                 return 0;
6271
6272         /* init the sysfs and debugfs files late */
6273         ret = amdgpu_pm_sysfs_init(adev);
6274         if (ret)
6275                 return ret;
6276
6277         ret = ci_set_temperature_range(adev);
6278         if (ret)
6279                 return ret;
6280
6281         return 0;
6282 }
6283
6284 static int ci_dpm_sw_init(void *handle)
6285 {
6286         int ret;
6287         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6288
6289         ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6290                                 &adev->pm.dpm.thermal.irq);
6291         if (ret)
6292                 return ret;
6293
6294         ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6295                                 &adev->pm.dpm.thermal.irq);
6296         if (ret)
6297                 return ret;
6298
6299         /* default to balanced state */
6300         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6301         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6302         adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
6303         adev->pm.default_sclk = adev->clock.default_sclk;
6304         adev->pm.default_mclk = adev->clock.default_mclk;
6305         adev->pm.current_sclk = adev->clock.default_sclk;
6306         adev->pm.current_mclk = adev->clock.default_mclk;
6307         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6308
6309         ret = ci_dpm_init_microcode(adev);
6310         if (ret)
6311                 return ret;
6312
6313         if (amdgpu_dpm == 0)
6314                 return 0;
6315
6316         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6317         mutex_lock(&adev->pm.mutex);
6318         ret = ci_dpm_init(adev);
6319         if (ret)
6320                 goto dpm_failed;
6321         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6322         if (amdgpu_dpm == 1)
6323                 amdgpu_pm_print_power_states(adev);
6324         mutex_unlock(&adev->pm.mutex);
6325         DRM_INFO("amdgpu: dpm initialized\n");
6326
6327         return 0;
6328
6329 dpm_failed:
6330         ci_dpm_fini(adev);
6331         mutex_unlock(&adev->pm.mutex);
6332         DRM_ERROR("amdgpu: dpm initialization failed\n");
6333         return ret;
6334 }
6335
6336 static int ci_dpm_sw_fini(void *handle)
6337 {
6338         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6339
6340         flush_work(&adev->pm.dpm.thermal.work);
6341
6342         mutex_lock(&adev->pm.mutex);
6343         amdgpu_pm_sysfs_fini(adev);
6344         ci_dpm_fini(adev);
6345         mutex_unlock(&adev->pm.mutex);
6346
6347         release_firmware(adev->pm.fw);
6348         adev->pm.fw = NULL;
6349
6350         return 0;
6351 }
6352
6353 static int ci_dpm_hw_init(void *handle)
6354 {
6355         int ret;
6356
6357         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6358
6359         if (!amdgpu_dpm) {
6360                 ret = ci_upload_firmware(adev);
6361                 if (ret) {
6362                         DRM_ERROR("ci_upload_firmware failed\n");
6363                         return ret;
6364                 }
6365                 ci_dpm_start_smc(adev);
6366                 return 0;
6367         }
6368
6369         mutex_lock(&adev->pm.mutex);
6370         ci_dpm_setup_asic(adev);
6371         ret = ci_dpm_enable(adev);
6372         if (ret)
6373                 adev->pm.dpm_enabled = false;
6374         else
6375                 adev->pm.dpm_enabled = true;
6376         mutex_unlock(&adev->pm.mutex);
6377
6378         return ret;
6379 }
6380
6381 static int ci_dpm_hw_fini(void *handle)
6382 {
6383         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6384
6385         if (adev->pm.dpm_enabled) {
6386                 mutex_lock(&adev->pm.mutex);
6387                 ci_dpm_disable(adev);
6388                 mutex_unlock(&adev->pm.mutex);
6389         } else {
6390                 ci_dpm_stop_smc(adev);
6391         }
6392
6393         return 0;
6394 }
6395
6396 static int ci_dpm_suspend(void *handle)
6397 {
6398         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6399
6400         if (adev->pm.dpm_enabled) {
6401                 mutex_lock(&adev->pm.mutex);
6402                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6403                                AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6404                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6405                                AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6406                 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6407                 adev->pm.dpm.last_state = adev->pm.dpm.state;
6408                 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6409                 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6410                 mutex_unlock(&adev->pm.mutex);
6411                 amdgpu_pm_compute_clocks(adev);
6412
6413         }
6414
6415         return 0;
6416 }
6417
6418 static int ci_dpm_resume(void *handle)
6419 {
6420         int ret;
6421         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6422
6423         if (adev->pm.dpm_enabled) {
6424                 /* asic init will reset to the boot state */
6425                 mutex_lock(&adev->pm.mutex);
6426                 ci_dpm_setup_asic(adev);
6427                 ret = ci_dpm_enable(adev);
6428                 if (ret)
6429                         adev->pm.dpm_enabled = false;
6430                 else
6431                         adev->pm.dpm_enabled = true;
6432                 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6433                 adev->pm.dpm.state = adev->pm.dpm.last_state;
6434                 mutex_unlock(&adev->pm.mutex);
6435                 if (adev->pm.dpm_enabled)
6436                         amdgpu_pm_compute_clocks(adev);
6437         }
6438         return 0;
6439 }
6440
6441 static bool ci_dpm_is_idle(void *handle)
6442 {
6443         /* XXX */
6444         return true;
6445 }
6446
6447 static int ci_dpm_wait_for_idle(void *handle)
6448 {
6449         /* XXX */
6450         return 0;
6451 }
6452
6453 static int ci_dpm_soft_reset(void *handle)
6454 {
6455         return 0;
6456 }
6457
6458 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6459                                       struct amdgpu_irq_src *source,
6460                                       unsigned type,
6461                                       enum amdgpu_interrupt_state state)
6462 {
6463         u32 cg_thermal_int;
6464
6465         switch (type) {
6466         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6467                 switch (state) {
6468                 case AMDGPU_IRQ_STATE_DISABLE:
6469                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6470                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6471                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6472                         break;
6473                 case AMDGPU_IRQ_STATE_ENABLE:
6474                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6475                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6476                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6477                         break;
6478                 default:
6479                         break;
6480                 }
6481                 break;
6482
6483         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6484                 switch (state) {
6485                 case AMDGPU_IRQ_STATE_DISABLE:
6486                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6487                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6488                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6489                         break;
6490                 case AMDGPU_IRQ_STATE_ENABLE:
6491                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6492                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6493                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6494                         break;
6495                 default:
6496                         break;
6497                 }
6498                 break;
6499
6500         default:
6501                 break;
6502         }
6503         return 0;
6504 }
6505
6506 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6507                                     struct amdgpu_irq_src *source,
6508                                     struct amdgpu_iv_entry *entry)
6509 {
6510         bool queue_thermal = false;
6511
6512         if (entry == NULL)
6513                 return -EINVAL;
6514
6515         switch (entry->src_id) {
6516         case 230: /* thermal low to high */
6517                 DRM_DEBUG("IH: thermal low to high\n");
6518                 adev->pm.dpm.thermal.high_to_low = false;
6519                 queue_thermal = true;
6520                 break;
6521         case 231: /* thermal high to low */
6522                 DRM_DEBUG("IH: thermal high to low\n");
6523                 adev->pm.dpm.thermal.high_to_low = true;
6524                 queue_thermal = true;
6525                 break;
6526         default:
6527                 break;
6528         }
6529
6530         if (queue_thermal)
6531                 schedule_work(&adev->pm.dpm.thermal.work);
6532
6533         return 0;
6534 }
6535
6536 static int ci_dpm_set_clockgating_state(void *handle,
6537                                           enum amd_clockgating_state state)
6538 {
6539         return 0;
6540 }
6541
6542 static int ci_dpm_set_powergating_state(void *handle,
6543                                           enum amd_powergating_state state)
6544 {
6545         return 0;
6546 }
6547
6548 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6549                 enum pp_clock_type type, char *buf)
6550 {
6551         struct ci_power_info *pi = ci_get_pi(adev);
6552         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6553         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6554         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6555
6556         int i, now, size = 0;
6557         uint32_t clock, pcie_speed;
6558
6559         switch (type) {
6560         case PP_SCLK:
6561                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6562                 clock = RREG32(mmSMC_MSG_ARG_0);
6563
6564                 for (i = 0; i < sclk_table->count; i++) {
6565                         if (clock > sclk_table->dpm_levels[i].value)
6566                                 continue;
6567                         break;
6568                 }
6569                 now = i;
6570
6571                 for (i = 0; i < sclk_table->count; i++)
6572                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6573                                         i, sclk_table->dpm_levels[i].value / 100,
6574                                         (i == now) ? "*" : "");
6575                 break;
6576         case PP_MCLK:
6577                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6578                 clock = RREG32(mmSMC_MSG_ARG_0);
6579
6580                 for (i = 0; i < mclk_table->count; i++) {
6581                         if (clock > mclk_table->dpm_levels[i].value)
6582                                 continue;
6583                         break;
6584                 }
6585                 now = i;
6586
6587                 for (i = 0; i < mclk_table->count; i++)
6588                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6589                                         i, mclk_table->dpm_levels[i].value / 100,
6590                                         (i == now) ? "*" : "");
6591                 break;
6592         case PP_PCIE:
6593                 pcie_speed = ci_get_current_pcie_speed(adev);
6594                 for (i = 0; i < pcie_table->count; i++) {
6595                         if (pcie_speed != pcie_table->dpm_levels[i].value)
6596                                 continue;
6597                         break;
6598                 }
6599                 now = i;
6600
6601                 for (i = 0; i < pcie_table->count; i++)
6602                         size += sprintf(buf + size, "%d: %s %s\n", i,
6603                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6604                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6605                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6606                                         (i == now) ? "*" : "");
6607                 break;
6608         default:
6609                 break;
6610         }
6611
6612         return size;
6613 }
6614
6615 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6616                 enum pp_clock_type type, uint32_t mask)
6617 {
6618         struct ci_power_info *pi = ci_get_pi(adev);
6619
6620         if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
6621                                 AMD_DPM_FORCED_LEVEL_LOW |
6622                                 AMD_DPM_FORCED_LEVEL_HIGH))
6623                 return -EINVAL;
6624
6625         switch (type) {
6626         case PP_SCLK:
6627                 if (!pi->sclk_dpm_key_disabled)
6628                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6629                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
6630                                         pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6631                 break;
6632
6633         case PP_MCLK:
6634                 if (!pi->mclk_dpm_key_disabled)
6635                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6636                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
6637                                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6638                 break;
6639
6640         case PP_PCIE:
6641         {
6642                 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6643                 uint32_t level = 0;
6644
6645                 while (tmp >>= 1)
6646                         level++;
6647
6648                 if (!pi->pcie_dpm_key_disabled)
6649                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6650                                         PPSMC_MSG_PCIeDPM_ForceLevel,
6651                                         level);
6652                 break;
6653         }
6654         default:
6655                 break;
6656         }
6657
6658         return 0;
6659 }
6660
6661 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6662 {
6663         struct ci_power_info *pi = ci_get_pi(adev);
6664         struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6665         struct ci_single_dpm_table *golden_sclk_table =
6666                         &(pi->golden_dpm_table.sclk_table);
6667         int value;
6668
6669         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6670                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6671                         100 /
6672                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6673
6674         return value;
6675 }
6676
6677 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6678 {
6679         struct ci_power_info *pi = ci_get_pi(adev);
6680         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6681         struct ci_single_dpm_table *golden_sclk_table =
6682                         &(pi->golden_dpm_table.sclk_table);
6683
6684         if (value > 20)
6685                 value = 20;
6686
6687         ps->performance_levels[ps->performance_level_count - 1].sclk =
6688                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6689                         value / 100 +
6690                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6691
6692         return 0;
6693 }
6694
6695 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6696 {
6697         struct ci_power_info *pi = ci_get_pi(adev);
6698         struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6699         struct ci_single_dpm_table *golden_mclk_table =
6700                         &(pi->golden_dpm_table.mclk_table);
6701         int value;
6702
6703         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6704                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6705                         100 /
6706                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6707
6708         return value;
6709 }
6710
6711 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6712 {
6713         struct ci_power_info *pi = ci_get_pi(adev);
6714         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6715         struct ci_single_dpm_table *golden_mclk_table =
6716                         &(pi->golden_dpm_table.mclk_table);
6717
6718         if (value > 20)
6719                 value = 20;
6720
6721         ps->performance_levels[ps->performance_level_count - 1].mclk =
6722                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6723                         value / 100 +
6724                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6725
6726         return 0;
6727 }
6728
6729 static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
6730                 struct amd_pp_profile *query)
6731 {
6732         struct ci_power_info *pi = ci_get_pi(adev);
6733
6734         if (!pi || !query)
6735                 return -EINVAL;
6736
6737         if (query->type == AMD_PP_GFX_PROFILE)
6738                 memcpy(query, &pi->gfx_power_profile,
6739                                 sizeof(struct amd_pp_profile));
6740         else if (query->type == AMD_PP_COMPUTE_PROFILE)
6741                 memcpy(query, &pi->compute_power_profile,
6742                                 sizeof(struct amd_pp_profile));
6743         else
6744                 return -EINVAL;
6745
6746         return 0;
6747 }
6748
6749 static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
6750                 struct amd_pp_profile *request)
6751 {
6752         struct ci_power_info *pi = ci_get_pi(adev);
6753         struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6754         struct SMU7_Discrete_GraphicsLevel *levels =
6755                         pi->smc_state_table.GraphicsLevel;
6756         uint32_t array = pi->dpm_table_start +
6757                         offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
6758         uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
6759                         SMU7_MAX_LEVELS_GRAPHICS;
6760         uint32_t i;
6761
6762         for (i = 0; i < dpm_table->sclk_table.count; i++) {
6763                 levels[i].ActivityLevel =
6764                                 cpu_to_be16(request->activity_threshold);
6765                 levels[i].EnabledForActivity = 1;
6766                 levels[i].UpH = request->up_hyst;
6767                 levels[i].DownH = request->down_hyst;
6768         }
6769
6770         return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
6771                                 array_size, pi->sram_end);
6772 }
6773
6774 static void ci_find_min_clock_masks(struct amdgpu_device *adev,
6775                 uint32_t *sclk_mask, uint32_t *mclk_mask,
6776                 uint32_t min_sclk, uint32_t min_mclk)
6777 {
6778         struct ci_power_info *pi = ci_get_pi(adev);
6779         struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6780         uint32_t i;
6781
6782         for (i = 0; i < dpm_table->sclk_table.count; i++) {
6783                 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
6784                         dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
6785                         *sclk_mask |= 1 << i;
6786         }
6787
6788         for (i = 0; i < dpm_table->mclk_table.count; i++) {
6789                 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
6790                         dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
6791                         *mclk_mask |= 1 << i;
6792         }
6793 }
6794
6795 static int ci_set_power_profile_state(struct amdgpu_device *adev,
6796                 struct amd_pp_profile *request)
6797 {
6798         struct ci_power_info *pi = ci_get_pi(adev);
6799         int tmp_result, result = 0;
6800         uint32_t sclk_mask = 0, mclk_mask = 0;
6801
6802         tmp_result = ci_freeze_sclk_mclk_dpm(adev);
6803         if (tmp_result) {
6804                 DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
6805                 result = tmp_result;
6806         }
6807
6808         tmp_result = ci_populate_requested_graphic_levels(adev,
6809                         request);
6810         if (tmp_result) {
6811                 DRM_ERROR("Failed to populate requested graphic levels!");
6812                 result = tmp_result;
6813         }
6814
6815         tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
6816         if (tmp_result) {
6817                 DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
6818                 result = tmp_result;
6819         }
6820
6821         ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
6822                         request->min_sclk, request->min_mclk);
6823
6824         if (sclk_mask) {
6825                 if (!pi->sclk_dpm_key_disabled)
6826                         amdgpu_ci_send_msg_to_smc_with_parameter(
6827                                 adev,
6828                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6829                                 pi->dpm_level_enable_mask.
6830                                 sclk_dpm_enable_mask &
6831                                 sclk_mask);
6832         }
6833
6834         if (mclk_mask) {
6835                 if (!pi->mclk_dpm_key_disabled)
6836                         amdgpu_ci_send_msg_to_smc_with_parameter(
6837                                 adev,
6838                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6839                                 pi->dpm_level_enable_mask.
6840                                 mclk_dpm_enable_mask &
6841                                 mclk_mask);
6842         }
6843
6844
6845         return result;
6846 }
6847
6848 static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
6849                 struct amd_pp_profile *request)
6850 {
6851         struct ci_power_info *pi = ci_get_pi(adev);
6852         int ret = -1;
6853
6854         if (!pi || !request)
6855                 return -EINVAL;
6856
6857         if (adev->pm.dpm.forced_level !=
6858                         AMD_DPM_FORCED_LEVEL_AUTO)
6859                 return -EINVAL;
6860
6861         if (request->min_sclk ||
6862                 request->min_mclk ||
6863                 request->activity_threshold ||
6864                 request->up_hyst ||
6865                 request->down_hyst) {
6866                 if (request->type == AMD_PP_GFX_PROFILE)
6867                         memcpy(&pi->gfx_power_profile, request,
6868                                         sizeof(struct amd_pp_profile));
6869                 else if (request->type == AMD_PP_COMPUTE_PROFILE)
6870                         memcpy(&pi->compute_power_profile, request,
6871                                         sizeof(struct amd_pp_profile));
6872                 else
6873                         return -EINVAL;
6874
6875                 if (request->type == pi->current_power_profile)
6876                         ret = ci_set_power_profile_state(
6877                                         adev,
6878                                         request);
6879         } else {
6880                 /* set power profile if it exists */
6881                 switch (request->type) {
6882                 case AMD_PP_GFX_PROFILE:
6883                         ret = ci_set_power_profile_state(
6884                                 adev,
6885                                 &pi->gfx_power_profile);
6886                         break;
6887                 case AMD_PP_COMPUTE_PROFILE:
6888                         ret = ci_set_power_profile_state(
6889                                 adev,
6890                                 &pi->compute_power_profile);
6891                         break;
6892                 default:
6893                         return -EINVAL;
6894                 }
6895         }
6896
6897         if (!ret)
6898                 pi->current_power_profile = request->type;
6899
6900         return 0;
6901 }
6902
6903 static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
6904                 struct amd_pp_profile *request)
6905 {
6906         struct ci_power_info *pi = ci_get_pi(adev);
6907
6908         if (!pi || !request)
6909                 return -EINVAL;
6910
6911         if (request->type == AMD_PP_GFX_PROFILE) {
6912                 pi->gfx_power_profile = pi->default_gfx_power_profile;
6913                 return ci_dpm_set_power_profile_state(adev,
6914                                           &pi->gfx_power_profile);
6915         } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
6916                 pi->compute_power_profile =
6917                         pi->default_compute_power_profile;
6918                 return ci_dpm_set_power_profile_state(adev,
6919                                           &pi->compute_power_profile);
6920         } else
6921                 return -EINVAL;
6922 }
6923
6924 static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
6925                 enum amd_pp_profile_type type)
6926 {
6927         struct ci_power_info *pi = ci_get_pi(adev);
6928         struct amd_pp_profile request = {0};
6929
6930         if (!pi)
6931                 return -EINVAL;
6932
6933         if (pi->current_power_profile != type) {
6934                 request.type = type;
6935                 return ci_dpm_set_power_profile_state(adev, &request);
6936         }
6937
6938         return 0;
6939 }
6940
6941 static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
6942                               void *value, int *size)
6943 {
6944         u32 activity_percent = 50;
6945         int ret;
6946
6947         /* size must be at least 4 bytes for all sensors */
6948         if (*size < 4)
6949                 return -EINVAL;
6950
6951         switch (idx) {
6952         case AMDGPU_PP_SENSOR_GFX_SCLK:
6953                 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6954                 *size = 4;
6955                 return 0;
6956         case AMDGPU_PP_SENSOR_GFX_MCLK:
6957                 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6958                 *size = 4;
6959                 return 0;
6960         case AMDGPU_PP_SENSOR_GPU_TEMP:
6961                 *((uint32_t *)value) = ci_dpm_get_temp(adev);
6962                 *size = 4;
6963                 return 0;
6964         case AMDGPU_PP_SENSOR_GPU_LOAD:
6965                 ret = ci_read_smc_soft_register(adev,
6966                                                 offsetof(SMU7_SoftRegisters,
6967                                                          AverageGraphicsA),
6968                                                 &activity_percent);
6969                 if (ret == 0) {
6970                         activity_percent += 0x80;
6971                         activity_percent >>= 8;
6972                         activity_percent =
6973                                 activity_percent > 100 ? 100 : activity_percent;
6974                 }
6975                 *((uint32_t *)value) = activity_percent;
6976                 *size = 4;
6977                 return 0;
6978         default:
6979                 return -EINVAL;
6980         }
6981 }
6982
6983 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6984         .name = "ci_dpm",
6985         .early_init = ci_dpm_early_init,
6986         .late_init = ci_dpm_late_init,
6987         .sw_init = ci_dpm_sw_init,
6988         .sw_fini = ci_dpm_sw_fini,
6989         .hw_init = ci_dpm_hw_init,
6990         .hw_fini = ci_dpm_hw_fini,
6991         .suspend = ci_dpm_suspend,
6992         .resume = ci_dpm_resume,
6993         .is_idle = ci_dpm_is_idle,
6994         .wait_for_idle = ci_dpm_wait_for_idle,
6995         .soft_reset = ci_dpm_soft_reset,
6996         .set_clockgating_state = ci_dpm_set_clockgating_state,
6997         .set_powergating_state = ci_dpm_set_powergating_state,
6998 };
6999
7000 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
7001         .get_temperature = &ci_dpm_get_temp,
7002         .pre_set_power_state = &ci_dpm_pre_set_power_state,
7003         .set_power_state = &ci_dpm_set_power_state,
7004         .post_set_power_state = &ci_dpm_post_set_power_state,
7005         .display_configuration_changed = &ci_dpm_display_configuration_changed,
7006         .get_sclk = &ci_dpm_get_sclk,
7007         .get_mclk = &ci_dpm_get_mclk,
7008         .print_power_state = &ci_dpm_print_power_state,
7009         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
7010         .force_performance_level = &ci_dpm_force_performance_level,
7011         .vblank_too_short = &ci_dpm_vblank_too_short,
7012         .powergate_uvd = &ci_dpm_powergate_uvd,
7013         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
7014         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
7015         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
7016         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
7017         .print_clock_levels = ci_dpm_print_clock_levels,
7018         .force_clock_level = ci_dpm_force_clock_level,
7019         .get_sclk_od = ci_dpm_get_sclk_od,
7020         .set_sclk_od = ci_dpm_set_sclk_od,
7021         .get_mclk_od = ci_dpm_get_mclk_od,
7022         .set_mclk_od = ci_dpm_set_mclk_od,
7023         .check_state_equal = ci_check_state_equal,
7024         .get_vce_clock_state = amdgpu_get_vce_clock_state,
7025         .get_power_profile_state = ci_dpm_get_power_profile_state,
7026         .set_power_profile_state = ci_dpm_set_power_profile_state,
7027         .reset_power_profile_state = ci_dpm_reset_power_profile_state,
7028         .switch_power_profile = ci_dpm_switch_power_profile,
7029         .read_sensor = ci_dpm_read_sensor,
7030 };
7031
7032 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
7033 {
7034         if (adev->pm.funcs == NULL)
7035                 adev->pm.funcs = &ci_dpm_funcs;
7036 }
7037
7038 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
7039         .set = ci_dpm_set_interrupt_state,
7040         .process = ci_dpm_process_interrupt,
7041 };
7042
7043 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
7044 {
7045         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
7046         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
7047 }